Ruby 3.5.0dev (2025-06-27 revision 8bba087ae567421cd574b5b5772fd3039ff39b4d)
thread.c (8bba087ae567421cd574b5b5772fd3039ff39b4d)
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "ruby/debug.h"
93#include "ruby/io.h"
94#include "ruby/thread.h"
95#include "ruby/thread_native.h"
96#include "timev.h"
97#include "vm_core.h"
98#include "ractor_core.h"
99#include "vm_debug.h"
100#include "vm_sync.h"
101
102#include "ccan/list/list.h"
103
104#ifndef USE_NATIVE_THREAD_PRIORITY
105#define USE_NATIVE_THREAD_PRIORITY 0
106#define RUBY_THREAD_PRIORITY_MAX 3
107#define RUBY_THREAD_PRIORITY_MIN -3
108#endif
109
110static VALUE rb_cThreadShield;
111static VALUE cThGroup;
112
113static VALUE sym_immediate;
114static VALUE sym_on_blocking;
115static VALUE sym_never;
116
117static uint32_t thread_default_quantum_ms = 100;
118
119#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
120#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
121
122static inline VALUE
123rb_thread_local_storage(VALUE thread)
124{
125 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
126 rb_ivar_set(thread, idLocals, rb_hash_new());
127 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
128 }
129 return rb_ivar_get(thread, idLocals);
130}
131
132enum SLEEP_FLAGS {
133 SLEEP_DEADLOCKABLE = 0x01,
134 SLEEP_SPURIOUS_CHECK = 0x02,
135 SLEEP_ALLOW_SPURIOUS = 0x04,
136 SLEEP_NO_CHECKINTS = 0x08,
137};
138
139static void sleep_forever(rb_thread_t *th, unsigned int fl);
140static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
141
142static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
143static int rb_threadptr_dead(rb_thread_t *th);
144static void rb_check_deadlock(rb_ractor_t *r);
145static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
146static const char *thread_status_name(rb_thread_t *th, int detail);
147static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
148NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
149MAYBE_UNUSED(static int consume_communication_pipe(int fd));
150
151static rb_atomic_t system_working = 1;
152static rb_internal_thread_specific_key_t specific_key_count;
153
154/********************************************************************************/
155
156#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
157
159 enum rb_thread_status prev_status;
160};
161
162static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
163static void unblock_function_clear(rb_thread_t *th);
164
165static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
166 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
167static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
168
169#define THREAD_BLOCKING_BEGIN(th) do { \
170 struct rb_thread_sched * const sched = TH_SCHED(th); \
171 RB_VM_SAVE_MACHINE_CONTEXT(th); \
172 thread_sched_to_waiting((sched), (th));
173
174#define THREAD_BLOCKING_END(th) \
175 thread_sched_to_running((sched), (th)); \
176 rb_ractor_thread_switch(th->ractor, th, false); \
177} while(0)
178
179#ifdef __GNUC__
180#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
181#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
182#else
183#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
184#endif
185#else
186#define only_if_constant(expr, notconst) notconst
187#endif
188#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
189 struct rb_blocking_region_buffer __region; \
190 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
191 /* always return true unless fail_if_interrupted */ \
192 !only_if_constant(fail_if_interrupted, TRUE)) { \
193 /* Important that this is inlined into the macro, and not part of \
194 * blocking_region_begin - see bug #20493 */ \
195 RB_VM_SAVE_MACHINE_CONTEXT(th); \
196 thread_sched_to_waiting(TH_SCHED(th), th); \
197 exec; \
198 blocking_region_end(th, &__region); \
199 }; \
200} while(0)
201
202/*
203 * returns true if this thread was spuriously interrupted, false otherwise
204 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
205 */
206#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
207static inline int
208vm_check_ints_blocking(rb_execution_context_t *ec)
209{
210#ifdef RUBY_ASSERT_CRITICAL_SECTION
211 VM_ASSERT(ruby_assert_critical_section_entered == 0);
212#endif
213
214 rb_thread_t *th = rb_ec_thread_ptr(ec);
215
216 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
217 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
218 }
219 else {
220 th->pending_interrupt_queue_checked = 0;
221 RUBY_VM_SET_INTERRUPT(ec);
222 }
223 return rb_threadptr_execute_interrupts(th, 1);
224}
225
226int
227rb_vm_check_ints_blocking(rb_execution_context_t *ec)
228{
229 return vm_check_ints_blocking(ec);
230}
231
232/*
233 * poll() is supported by many OSes, but so far Linux is the only
234 * one we know of that supports using poll() in all places select()
235 * would work.
236 */
237#if defined(HAVE_POLL)
238# if defined(__linux__)
239# define USE_POLL
240# endif
241# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
242# define USE_POLL
243 /* FreeBSD does not set POLLOUT when POLLHUP happens */
244# define POLLERR_SET (POLLHUP | POLLERR)
245# endif
246#endif
247
248static void
249timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
250 const struct timeval *timeout)
251{
252 if (timeout) {
253 *rel = rb_timeval2hrtime(timeout);
254 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
255 *to = rel;
256 }
257 else {
258 *to = 0;
259 }
260}
261
262MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
263MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
264MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
265
266#include THREAD_IMPL_SRC
267
268/*
269 * TODO: somebody with win32 knowledge should be able to get rid of
270 * timer-thread by busy-waiting on signals. And it should be possible
271 * to make the GVL in thread_pthread.c be platform-independent.
272 */
273#ifndef BUSY_WAIT_SIGNALS
274# define BUSY_WAIT_SIGNALS (0)
275#endif
276
277#ifndef USE_EVENTFD
278# define USE_EVENTFD (0)
279#endif
280
281#include "thread_sync.c"
282
283void
284rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
285{
287}
288
289void
290rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
291{
293}
294
295void
296rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
297{
299}
300
301void
302rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
303{
305}
306
307static int
308unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
309{
310 do {
311 if (fail_if_interrupted) {
312 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
313 return FALSE;
314 }
315 }
316 else {
317 RUBY_VM_CHECK_INTS(th->ec);
318 }
319
320 rb_native_mutex_lock(&th->interrupt_lock);
321 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
322 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
323
324 VM_ASSERT(th->unblock.func == NULL);
325
326 th->unblock.func = func;
327 th->unblock.arg = arg;
328 rb_native_mutex_unlock(&th->interrupt_lock);
329
330 return TRUE;
331}
332
333static void
334unblock_function_clear(rb_thread_t *th)
335{
336 rb_native_mutex_lock(&th->interrupt_lock);
337 th->unblock.func = 0;
338 rb_native_mutex_unlock(&th->interrupt_lock);
339}
340
341static void
342threadptr_set_interrupt_locked(rb_thread_t *th, bool trap)
343{
344 // th->interrupt_lock should be acquired here
345
346 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
347
348 if (trap) {
349 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
350 }
351 else {
352 RUBY_VM_SET_INTERRUPT(th->ec);
353 }
354
355 if (th->unblock.func != NULL) {
356 (th->unblock.func)(th->unblock.arg);
357 }
358 else {
359 /* none */
360 }
361}
362
363static void
364threadptr_set_interrupt(rb_thread_t *th, int trap)
365{
366 rb_native_mutex_lock(&th->interrupt_lock);
367 {
368 threadptr_set_interrupt_locked(th, trap);
369 }
370 rb_native_mutex_unlock(&th->interrupt_lock);
371}
372
373/* Set interrupt flag on another thread or current thread, and call its UBF if it has one set */
374void
375rb_threadptr_interrupt(rb_thread_t *th)
376{
377 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
378 threadptr_set_interrupt(th, false);
379}
380
381static void
382threadptr_trap_interrupt(rb_thread_t *th)
383{
384 threadptr_set_interrupt(th, true);
385}
386
387static void
388terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
389{
390 rb_thread_t *th = 0;
391
392 ccan_list_for_each(&r->threads.set, th, lt_node) {
393 if (th != main_thread) {
394 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
395
396 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
397 rb_threadptr_interrupt(th);
398
399 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
400 }
401 else {
402 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
403 }
404 }
405}
406
407static void
408rb_threadptr_join_list_wakeup(rb_thread_t *thread)
409{
410 while (thread->join_list) {
411 struct rb_waiting_list *join_list = thread->join_list;
412
413 // Consume the entry from the join list:
414 thread->join_list = join_list->next;
415
416 rb_thread_t *target_thread = join_list->thread;
417
418 if (target_thread->scheduler != Qnil && join_list->fiber) {
419 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
420 }
421 else {
422 rb_threadptr_interrupt(target_thread);
423
424 switch (target_thread->status) {
425 case THREAD_STOPPED:
426 case THREAD_STOPPED_FOREVER:
427 target_thread->status = THREAD_RUNNABLE;
428 break;
429 default:
430 break;
431 }
432 }
433 }
434}
435
436void
437rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
438{
439 while (th->keeping_mutexes) {
440 rb_mutex_t *mutex = th->keeping_mutexes;
441 th->keeping_mutexes = mutex->next_mutex;
442
443 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
444
445 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
446 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
447 }
448}
449
450void
451rb_thread_terminate_all(rb_thread_t *th)
452{
453 rb_ractor_t *cr = th->ractor;
454 rb_execution_context_t * volatile ec = th->ec;
455 volatile int sleeping = 0;
456
457 if (cr->threads.main != th) {
458 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
459 (void *)cr->threads.main, (void *)th);
460 }
461
462 /* unlock all locking mutexes */
463 rb_threadptr_unlock_all_locking_mutexes(th);
464
465 EC_PUSH_TAG(ec);
466 if (EC_EXEC_TAG() == TAG_NONE) {
467 retry:
468 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
469
470 terminate_all(cr, th);
471
472 while (rb_ractor_living_thread_num(cr) > 1) {
473 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
474 /*q
475 * Thread exiting routine in thread_start_func_2 notify
476 * me when the last sub-thread exit.
477 */
478 sleeping = 1;
479 native_sleep(th, &rel);
480 RUBY_VM_CHECK_INTS_BLOCKING(ec);
481 sleeping = 0;
482 }
483 }
484 else {
485 /*
486 * When caught an exception (e.g. Ctrl+C), let's broadcast
487 * kill request again to ensure killing all threads even
488 * if they are blocked on sleep, mutex, etc.
489 */
490 if (sleeping) {
491 sleeping = 0;
492 goto retry;
493 }
494 }
495 EC_POP_TAG();
496}
497
498void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
499static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
500
501static void
502thread_cleanup_func_before_exec(void *th_ptr)
503{
504 rb_thread_t *th = th_ptr;
505 th->status = THREAD_KILLED;
506
507 // The thread stack doesn't exist in the forked process:
508 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
509
510 threadptr_interrupt_exec_cleanup(th);
511 rb_threadptr_root_fiber_terminate(th);
512}
513
514static void
515thread_cleanup_func(void *th_ptr, int atfork)
516{
517 rb_thread_t *th = th_ptr;
518
519 th->locking_mutex = Qfalse;
520 thread_cleanup_func_before_exec(th_ptr);
521
522 if (atfork) {
523 native_thread_destroy_atfork(th->nt);
524 th->nt = NULL;
525 return;
526 }
527
528 rb_native_mutex_destroy(&th->interrupt_lock);
529}
530
531static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
532static VALUE rb_thread_to_s(VALUE thread);
533
534void
535ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
536{
537 native_thread_init_stack(th, local_in_parent_frame);
538}
539
540const VALUE *
541rb_vm_proc_local_ep(VALUE proc)
542{
543 const VALUE *ep = vm_proc_ep(proc);
544
545 if (ep) {
546 return rb_vm_ep_local_ep(ep);
547 }
548 else {
549 return NULL;
550 }
551}
552
553// for ractor, defined in vm.c
554VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
555 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
556
557static VALUE
558thread_do_start_proc(rb_thread_t *th)
559{
560 VALUE args = th->invoke_arg.proc.args;
561 const VALUE *args_ptr;
562 int args_len;
563 VALUE procval = th->invoke_arg.proc.proc;
564 rb_proc_t *proc;
565 GetProcPtr(procval, proc);
566
567 th->ec->errinfo = Qnil;
568 th->ec->root_lep = rb_vm_proc_local_ep(procval);
569 th->ec->root_svar = Qfalse;
570
571 vm_check_ints_blocking(th->ec);
572
573 if (th->invoke_type == thread_invoke_type_ractor_proc) {
574 VALUE self = rb_ractor_self(th->ractor);
575 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
576
577 VM_ASSERT(FIXNUM_P(args));
578 args_len = FIX2INT(args);
579 args_ptr = ALLOCA_N(VALUE, args_len);
580 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
581 vm_check_ints_blocking(th->ec);
582
583 return rb_vm_invoke_proc_with_self(
584 th->ec, proc, self,
585 args_len, args_ptr,
586 th->invoke_arg.proc.kw_splat,
587 VM_BLOCK_HANDLER_NONE
588 );
589 }
590 else {
591 args_len = RARRAY_LENINT(args);
592 if (args_len < 8) {
593 /* free proc.args if the length is enough small */
594 args_ptr = ALLOCA_N(VALUE, args_len);
595 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
596 th->invoke_arg.proc.args = Qnil;
597 }
598 else {
599 args_ptr = RARRAY_CONST_PTR(args);
600 }
601
602 vm_check_ints_blocking(th->ec);
603
604 return rb_vm_invoke_proc(
605 th->ec, proc,
606 args_len, args_ptr,
607 th->invoke_arg.proc.kw_splat,
608 VM_BLOCK_HANDLER_NONE
609 );
610 }
611}
612
613static VALUE
614thread_do_start(rb_thread_t *th)
615{
616 native_set_thread_name(th);
617 VALUE result = Qundef;
618
619 switch (th->invoke_type) {
620 case thread_invoke_type_proc:
621 result = thread_do_start_proc(th);
622 break;
623
624 case thread_invoke_type_ractor_proc:
625 result = thread_do_start_proc(th);
626 rb_ractor_atexit(th->ec, result);
627 break;
628
629 case thread_invoke_type_func:
630 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
631 break;
632
633 case thread_invoke_type_none:
634 rb_bug("unreachable");
635 }
636
637 return result;
638}
639
640void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
641
642static int
643thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
644{
645 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
646 VM_ASSERT(th != th->vm->ractor.main_thread);
647
648 enum ruby_tag_type state;
649 VALUE errinfo = Qnil;
650 rb_thread_t *ractor_main_th = th->ractor->threads.main;
651
652 // setup ractor
653 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
654 RB_VM_LOCK();
655 {
656 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
657 rb_ractor_t *r = th->ractor;
658 r->r_stdin = rb_io_prep_stdin();
659 r->r_stdout = rb_io_prep_stdout();
660 r->r_stderr = rb_io_prep_stderr();
661 }
662 RB_VM_UNLOCK();
663 }
664
665 // Ensure that we are not joinable.
666 VM_ASSERT(UNDEF_P(th->value));
667
668 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
669 VALUE result = Qundef;
670
671 EC_PUSH_TAG(th->ec);
672
673 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
674 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
675
676 result = thread_do_start(th);
677 }
678
679 if (!fiber_scheduler_closed) {
680 fiber_scheduler_closed = 1;
682 }
683
684 if (!event_thread_end_hooked) {
685 event_thread_end_hooked = 1;
686 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
687 }
688
689 if (state == TAG_NONE) {
690 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
691 th->value = result;
692 }
693 else {
694 errinfo = th->ec->errinfo;
695
696 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
697 if (!NIL_P(exc)) errinfo = exc;
698
699 if (state == TAG_FATAL) {
700 if (th->invoke_type == thread_invoke_type_ractor_proc) {
701 rb_ractor_atexit(th->ec, Qnil);
702 }
703 /* fatal error within this thread, need to stop whole script */
704 }
705 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
706 /* exit on main_thread. */
707 }
708 else {
709 if (th->report_on_exception) {
710 VALUE mesg = rb_thread_to_s(th->self);
711 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
712 rb_write_error_str(mesg);
713 rb_ec_error_print(th->ec, errinfo);
714 }
715
716 if (th->invoke_type == thread_invoke_type_ractor_proc) {
717 rb_ractor_atexit_exception(th->ec);
718 }
719
720 if (th->vm->thread_abort_on_exception ||
721 th->abort_on_exception || RTEST(ruby_debug)) {
722 /* exit on main_thread */
723 }
724 else {
725 errinfo = Qnil;
726 }
727 }
728 th->value = Qnil;
729 }
730
731 // The thread is effectively finished and can be joined.
732 VM_ASSERT(!UNDEF_P(th->value));
733
734 rb_threadptr_join_list_wakeup(th);
735 rb_threadptr_unlock_all_locking_mutexes(th);
736
737 if (th->invoke_type == thread_invoke_type_ractor_proc) {
738 rb_thread_terminate_all(th);
739 rb_ractor_teardown(th->ec);
740 }
741
742 th->status = THREAD_KILLED;
743 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
744
745 if (th->vm->ractor.main_thread == th) {
746 ruby_stop(0);
747 }
748
749 if (RB_TYPE_P(errinfo, T_OBJECT)) {
750 /* treat with normal error object */
751 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
752 }
753
754 EC_POP_TAG();
755
756 rb_ec_clear_current_thread_trace_func(th->ec);
757
758 /* locking_mutex must be Qfalse */
759 if (th->locking_mutex != Qfalse) {
760 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
761 (void *)th, th->locking_mutex);
762 }
763
764 if (ractor_main_th->status == THREAD_KILLED &&
765 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
766 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
767 rb_threadptr_interrupt(ractor_main_th);
768 }
769
770 rb_check_deadlock(th->ractor);
771
772 rb_fiber_close(th->ec->fiber_ptr);
773
774 thread_cleanup_func(th, FALSE);
775 VM_ASSERT(th->ec->vm_stack == NULL);
776
777 if (th->invoke_type == thread_invoke_type_ractor_proc) {
778 // after rb_ractor_living_threads_remove()
779 // GC will happen anytime and this ractor can be collected (and destroy GVL).
780 // So gvl_release() should be before it.
781 thread_sched_to_dead(TH_SCHED(th), th);
782 rb_ractor_living_threads_remove(th->ractor, th);
783 }
784 else {
785 rb_ractor_living_threads_remove(th->ractor, th);
786 thread_sched_to_dead(TH_SCHED(th), th);
787 }
788
789 return 0;
790}
793 enum thread_invoke_type type;
794
795 // for normal proc thread
796 VALUE args;
797 VALUE proc;
798
799 // for ractor
800 rb_ractor_t *g;
801
802 // for func
803 VALUE (*fn)(void *);
804};
805
806static void thread_specific_storage_alloc(rb_thread_t *th);
807
808static VALUE
809thread_create_core(VALUE thval, struct thread_create_params *params)
810{
811 rb_execution_context_t *ec = GET_EC();
812 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
813 int err;
814
815 thread_specific_storage_alloc(th);
816
817 if (OBJ_FROZEN(current_th->thgroup)) {
818 rb_raise(rb_eThreadError,
819 "can't start a new thread (frozen ThreadGroup)");
820 }
821
822 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
823
824 switch (params->type) {
825 case thread_invoke_type_proc:
826 th->invoke_type = thread_invoke_type_proc;
827 th->invoke_arg.proc.args = params->args;
828 th->invoke_arg.proc.proc = params->proc;
829 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
830 break;
831
832 case thread_invoke_type_ractor_proc:
833#if RACTOR_CHECK_MODE > 0
834 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
835#endif
836 th->invoke_type = thread_invoke_type_ractor_proc;
837 th->ractor = params->g;
838 th->ractor->threads.main = th;
839 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
840 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
841 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
842 rb_ractor_send_parameters(ec, params->g, params->args);
843 break;
844
845 case thread_invoke_type_func:
846 th->invoke_type = thread_invoke_type_func;
847 th->invoke_arg.func.func = params->fn;
848 th->invoke_arg.func.arg = (void *)params->args;
849 break;
850
851 default:
852 rb_bug("unreachable");
853 }
854
855 th->priority = current_th->priority;
856 th->thgroup = current_th->thgroup;
857
858 th->pending_interrupt_queue = rb_ary_hidden_new(0);
859 th->pending_interrupt_queue_checked = 0;
860 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
861 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
862
863 rb_native_mutex_initialize(&th->interrupt_lock);
864
865 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
866
867 rb_ractor_living_threads_insert(th->ractor, th);
868
869 /* kick thread */
870 err = native_thread_create(th);
871 if (err) {
872 th->status = THREAD_KILLED;
873 rb_ractor_living_threads_remove(th->ractor, th);
874 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
875 }
876 return thval;
877}
878
879#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
880
881/*
882 * call-seq:
883 * Thread.new { ... } -> thread
884 * Thread.new(*args, &proc) -> thread
885 * Thread.new(*args) { |args| ... } -> thread
886 *
887 * Creates a new thread executing the given block.
888 *
889 * Any +args+ given to ::new will be passed to the block:
890 *
891 * arr = []
892 * a, b, c = 1, 2, 3
893 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
894 * arr #=> [1, 2, 3]
895 *
896 * A ThreadError exception is raised if ::new is called without a block.
897 *
898 * If you're going to subclass Thread, be sure to call super in your
899 * +initialize+ method, otherwise a ThreadError will be raised.
900 */
901static VALUE
902thread_s_new(int argc, VALUE *argv, VALUE klass)
903{
904 rb_thread_t *th;
905 VALUE thread = rb_thread_alloc(klass);
906
907 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
908 rb_raise(rb_eThreadError, "can't alloc thread");
909 }
910
911 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
912 th = rb_thread_ptr(thread);
913 if (!threadptr_initialized(th)) {
914 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
915 klass);
916 }
917 return thread;
918}
919
920/*
921 * call-seq:
922 * Thread.start([args]*) {|args| block } -> thread
923 * Thread.fork([args]*) {|args| block } -> thread
924 *
925 * Basically the same as ::new. However, if class Thread is subclassed, then
926 * calling +start+ in that subclass will not invoke the subclass's
927 * +initialize+ method.
928 */
929
930static VALUE
931thread_start(VALUE klass, VALUE args)
932{
933 struct thread_create_params params = {
934 .type = thread_invoke_type_proc,
935 .args = args,
936 .proc = rb_block_proc(),
937 };
938 return thread_create_core(rb_thread_alloc(klass), &params);
939}
940
941static VALUE
942threadptr_invoke_proc_location(rb_thread_t *th)
943{
944 if (th->invoke_type == thread_invoke_type_proc) {
945 return rb_proc_location(th->invoke_arg.proc.proc);
946 }
947 else {
948 return Qnil;
949 }
950}
951
952/* :nodoc: */
953static VALUE
954thread_initialize(VALUE thread, VALUE args)
955{
956 rb_thread_t *th = rb_thread_ptr(thread);
957
958 if (!rb_block_given_p()) {
959 rb_raise(rb_eThreadError, "must be called with a block");
960 }
961 else if (th->invoke_type != thread_invoke_type_none) {
962 VALUE loc = threadptr_invoke_proc_location(th);
963 if (!NIL_P(loc)) {
964 rb_raise(rb_eThreadError,
965 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
966 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
967 }
968 else {
969 rb_raise(rb_eThreadError, "already initialized thread");
970 }
971 }
972 else {
973 struct thread_create_params params = {
974 .type = thread_invoke_type_proc,
975 .args = args,
976 .proc = rb_block_proc(),
977 };
978 return thread_create_core(thread, &params);
979 }
980}
981
982VALUE
983rb_thread_create(VALUE (*fn)(void *), void *arg)
984{
985 struct thread_create_params params = {
986 .type = thread_invoke_type_func,
987 .fn = fn,
988 .args = (VALUE)arg,
989 };
990 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
991}
992
993VALUE
994rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
995{
996 struct thread_create_params params = {
997 .type = thread_invoke_type_ractor_proc,
998 .g = r,
999 .args = args,
1000 .proc = proc,
1001 };
1002 return thread_create_core(rb_thread_alloc(rb_cThread), &params);;
1003}
1004
1006struct join_arg {
1007 struct rb_waiting_list *waiter;
1008 rb_thread_t *target;
1009 VALUE timeout;
1010 rb_hrtime_t *limit;
1011};
1012
1013static VALUE
1014remove_from_join_list(VALUE arg)
1015{
1016 struct join_arg *p = (struct join_arg *)arg;
1017 rb_thread_t *target_thread = p->target;
1018
1019 if (target_thread->status != THREAD_KILLED) {
1020 struct rb_waiting_list **join_list = &target_thread->join_list;
1021
1022 while (*join_list) {
1023 if (*join_list == p->waiter) {
1024 *join_list = (*join_list)->next;
1025 break;
1026 }
1027
1028 join_list = &(*join_list)->next;
1029 }
1030 }
1031
1032 return Qnil;
1033}
1034
1035static int
1036thread_finished(rb_thread_t *th)
1037{
1038 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1039}
1040
1041static VALUE
1042thread_join_sleep(VALUE arg)
1043{
1044 struct join_arg *p = (struct join_arg *)arg;
1045 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1046 rb_hrtime_t end = 0, *limit = p->limit;
1047
1048 if (limit) {
1049 end = rb_hrtime_add(*limit, rb_hrtime_now());
1050 }
1051
1052 while (!thread_finished(target_th)) {
1053 VALUE scheduler = rb_fiber_scheduler_current();
1054
1055 if (!limit) {
1056 if (scheduler != Qnil) {
1057 rb_fiber_scheduler_block(scheduler, target_th->self, Qnil);
1058 }
1059 else {
1060 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1061 }
1062 }
1063 else {
1064 if (hrtime_update_expire(limit, end)) {
1065 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1066 return Qfalse;
1067 }
1068
1069 if (scheduler != Qnil) {
1070 VALUE timeout = rb_float_new(hrtime2double(*limit));
1071 rb_fiber_scheduler_block(scheduler, target_th->self, timeout);
1072 }
1073 else {
1074 th->status = THREAD_STOPPED;
1075 native_sleep(th, limit);
1076 }
1077 }
1078 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1079 th->status = THREAD_RUNNABLE;
1080
1081 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1082 }
1083
1084 return Qtrue;
1085}
1086
1087static VALUE
1088thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1089{
1090 rb_execution_context_t *ec = GET_EC();
1091 rb_thread_t *th = ec->thread_ptr;
1092 rb_fiber_t *fiber = ec->fiber_ptr;
1093
1094 if (th == target_th) {
1095 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1096 }
1097
1098 if (th->ractor->threads.main == target_th) {
1099 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1100 }
1101
1102 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1103
1104 if (target_th->status != THREAD_KILLED) {
1105 struct rb_waiting_list waiter;
1106 waiter.next = target_th->join_list;
1107 waiter.thread = th;
1108 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1109 target_th->join_list = &waiter;
1110
1111 struct join_arg arg;
1112 arg.waiter = &waiter;
1113 arg.target = target_th;
1114 arg.timeout = timeout;
1115 arg.limit = limit;
1116
1117 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1118 return Qnil;
1119 }
1120 }
1121
1122 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1123
1124 if (target_th->ec->errinfo != Qnil) {
1125 VALUE err = target_th->ec->errinfo;
1126
1127 if (FIXNUM_P(err)) {
1128 switch (err) {
1129 case INT2FIX(TAG_FATAL):
1130 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1131
1132 /* OK. killed. */
1133 break;
1134 default:
1135 if (err == RUBY_FATAL_FIBER_KILLED) { // not integer constant so can't be a case expression
1136 // root fiber killed in non-main thread
1137 break;
1138 }
1139 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1140 }
1141 }
1142 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1143 rb_bug("thread_join: THROW_DATA should not reach here.");
1144 }
1145 else {
1146 /* normal exception */
1147 rb_exc_raise(err);
1148 }
1149 }
1150 return target_th->self;
1151}
1152
1153/*
1154 * call-seq:
1155 * thr.join -> thr
1156 * thr.join(limit) -> thr
1157 *
1158 * The calling thread will suspend execution and run this +thr+.
1159 *
1160 * Does not return until +thr+ exits or until the given +limit+ seconds have
1161 * passed.
1162 *
1163 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1164 * returned.
1165 *
1166 * Any threads not joined will be killed when the main program exits.
1167 *
1168 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1169 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1170 * will be processed at this time.
1171 *
1172 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1173 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1174 * x.join # Let thread x finish, thread a will be killed on exit.
1175 * #=> "axyz"
1176 *
1177 * The following example illustrates the +limit+ parameter.
1178 *
1179 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1180 * puts "Waiting" until y.join(0.15)
1181 *
1182 * This will produce:
1183 *
1184 * tick...
1185 * Waiting
1186 * tick...
1187 * Waiting
1188 * tick...
1189 * tick...
1190 */
1191
1192static VALUE
1193thread_join_m(int argc, VALUE *argv, VALUE self)
1194{
1195 VALUE timeout = Qnil;
1196 rb_hrtime_t rel = 0, *limit = 0;
1197
1198 if (rb_check_arity(argc, 0, 1)) {
1199 timeout = argv[0];
1200 }
1201
1202 // Convert the timeout eagerly, so it's always converted and deterministic
1203 /*
1204 * This supports INFINITY and negative values, so we can't use
1205 * rb_time_interval right now...
1206 */
1207 if (NIL_P(timeout)) {
1208 /* unlimited */
1209 }
1210 else if (FIXNUM_P(timeout)) {
1211 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1212 limit = &rel;
1213 }
1214 else {
1215 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1216 }
1217
1218 return thread_join(rb_thread_ptr(self), timeout, limit);
1219}
1220
1221/*
1222 * call-seq:
1223 * thr.value -> obj
1224 *
1225 * Waits for +thr+ to complete, using #join, and returns its value or raises
1226 * the exception which terminated the thread.
1227 *
1228 * a = Thread.new { 2 + 2 }
1229 * a.value #=> 4
1230 *
1231 * b = Thread.new { raise 'something went wrong' }
1232 * b.value #=> RuntimeError: something went wrong
1233 */
1234
1235static VALUE
1236thread_value(VALUE self)
1237{
1238 rb_thread_t *th = rb_thread_ptr(self);
1239 thread_join(th, Qnil, 0);
1240 if (UNDEF_P(th->value)) {
1241 // If the thread is dead because we forked th->value is still Qundef.
1242 return Qnil;
1243 }
1244 return th->value;
1245}
1246
1247/*
1248 * Thread Scheduling
1249 */
1250
1251static void
1252getclockofday(struct timespec *ts)
1253{
1254#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1255 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1256 return;
1257#endif
1258 rb_timespec_now(ts);
1259}
1260
1261/*
1262 * Don't inline this, since library call is already time consuming
1263 * and we don't want "struct timespec" on stack too long for GC
1264 */
1265NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1266rb_hrtime_t
1267rb_hrtime_now(void)
1268{
1269 struct timespec ts;
1270
1271 getclockofday(&ts);
1272 return rb_timespec2hrtime(&ts);
1273}
1274
1275/*
1276 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1277 * being uninitialized, maybe other versions, too.
1278 */
1279COMPILER_WARNING_PUSH
1280#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1281COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1282#endif
1283#ifndef PRIu64
1284#define PRIu64 PRI_64_PREFIX "u"
1285#endif
1286/*
1287 * @end is the absolute time when @ts is set to expire
1288 * Returns true if @end has past
1289 * Updates @ts and returns false otherwise
1290 */
1291static int
1292hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1293{
1294 rb_hrtime_t now = rb_hrtime_now();
1295
1296 if (now > end) return 1;
1297
1298 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1299
1300 *timeout = end - now;
1301 return 0;
1302}
1303COMPILER_WARNING_POP
1304
1305static int
1306sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1307{
1308 enum rb_thread_status prev_status = th->status;
1309 int woke;
1310 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1311
1312 th->status = THREAD_STOPPED;
1313 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1314 while (th->status == THREAD_STOPPED) {
1315 native_sleep(th, &rel);
1316 woke = vm_check_ints_blocking(th->ec);
1317 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1318 break;
1319 if (hrtime_update_expire(&rel, end))
1320 break;
1321 woke = 1;
1322 }
1323 th->status = prev_status;
1324 return woke;
1325}
1326
1327static int
1328sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1329{
1330 enum rb_thread_status prev_status = th->status;
1331 int woke;
1332 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1333
1334 th->status = THREAD_STOPPED;
1335 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1336 while (th->status == THREAD_STOPPED) {
1337 native_sleep(th, &rel);
1338 woke = vm_check_ints_blocking(th->ec);
1339 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1340 break;
1341 if (hrtime_update_expire(&rel, end))
1342 break;
1343 woke = 1;
1344 }
1345 th->status = prev_status;
1346 return woke;
1347}
1348
1349static void
1350sleep_forever(rb_thread_t *th, unsigned int fl)
1351{
1352 enum rb_thread_status prev_status = th->status;
1353 enum rb_thread_status status;
1354 int woke;
1355
1356 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1357 th->status = status;
1358
1359 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1360
1361 while (th->status == status) {
1362 if (fl & SLEEP_DEADLOCKABLE) {
1363 rb_ractor_sleeper_threads_inc(th->ractor);
1364 rb_check_deadlock(th->ractor);
1365 }
1366 {
1367 native_sleep(th, 0);
1368 }
1369 if (fl & SLEEP_DEADLOCKABLE) {
1370 rb_ractor_sleeper_threads_dec(th->ractor);
1371 }
1372 if (fl & SLEEP_ALLOW_SPURIOUS) {
1373 break;
1374 }
1375
1376 woke = vm_check_ints_blocking(th->ec);
1377
1378 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1379 break;
1380 }
1381 }
1382 th->status = prev_status;
1383}
1384
1385void
1387{
1388 RUBY_DEBUG_LOG("forever");
1389 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1390}
1391
1392void
1394{
1395 RUBY_DEBUG_LOG("deadly");
1396 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1397}
1398
1399static void
1400rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1401{
1402 VALUE scheduler = rb_fiber_scheduler_current();
1403 if (scheduler != Qnil) {
1404 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1405 }
1406 else {
1407 RUBY_DEBUG_LOG("...");
1408 if (end) {
1409 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1410 }
1411 else {
1412 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1413 }
1414 }
1415}
1416
1417void
1418rb_thread_wait_for(struct timeval time)
1419{
1420 rb_thread_t *th = GET_THREAD();
1421
1422 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1423}
1424
1425void
1426rb_ec_check_ints(rb_execution_context_t *ec)
1427{
1428 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1429}
1430
1431/*
1432 * CAUTION: This function causes thread switching.
1433 * rb_thread_check_ints() check ruby's interrupts.
1434 * some interrupt needs thread switching/invoke handlers,
1435 * and so on.
1436 */
1437
1438void
1440{
1441 rb_ec_check_ints(GET_EC());
1442}
1443
1444/*
1445 * Hidden API for tcl/tk wrapper.
1446 * There is no guarantee to perpetuate it.
1447 */
1448int
1449rb_thread_check_trap_pending(void)
1450{
1451 return rb_signal_buff_size() != 0;
1452}
1453
1454/* This function can be called in blocking region. */
1457{
1458 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1459}
1460
1461void
1462rb_thread_sleep(int sec)
1463{
1465}
1466
1467static void
1468rb_thread_schedule_limits(uint32_t limits_us)
1469{
1470 if (!rb_thread_alone()) {
1471 rb_thread_t *th = GET_THREAD();
1472 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1473
1474 if (th->running_time_us >= limits_us) {
1475 RUBY_DEBUG_LOG("switch %s", "start");
1476
1477 RB_VM_SAVE_MACHINE_CONTEXT(th);
1478 thread_sched_yield(TH_SCHED(th), th);
1479 rb_ractor_thread_switch(th->ractor, th, true);
1480
1481 RUBY_DEBUG_LOG("switch %s", "done");
1482 }
1483 }
1484}
1485
1486void
1488{
1489 rb_thread_schedule_limits(0);
1490 RUBY_VM_CHECK_INTS(GET_EC());
1491}
1492
1493/* blocking region */
1494
1495static inline int
1496blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1497 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1498{
1499#ifdef RUBY_ASSERT_CRITICAL_SECTION
1500 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1501#endif
1502 VM_ASSERT(th == GET_THREAD());
1503
1504 region->prev_status = th->status;
1505 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1506 th->blocking_region_buffer = region;
1507 th->status = THREAD_STOPPED;
1508 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1509
1510 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1511 return TRUE;
1512 }
1513 else {
1514 return FALSE;
1515 }
1516}
1517
1518static inline void
1519blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1520{
1521 /* entry to ubf_list still permitted at this point, make it impossible: */
1522 unblock_function_clear(th);
1523 /* entry to ubf_list impossible at this point, so unregister is safe: */
1524 unregister_ubf_list(th);
1525
1526 thread_sched_to_running(TH_SCHED(th), th);
1527 rb_ractor_thread_switch(th->ractor, th, false);
1528
1529 th->blocking_region_buffer = 0;
1530 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1531 if (th->status == THREAD_STOPPED) {
1532 th->status = region->prev_status;
1533 }
1534
1535 RUBY_DEBUG_LOG("end");
1536
1537#ifndef _WIN32
1538 // GET_THREAD() clears WSAGetLastError()
1539 VM_ASSERT(th == GET_THREAD());
1540#endif
1541}
1542
1543/*
1544 * Resolve sentinel unblock function values to their actual function pointers
1545 * and appropriate data2 values. This centralizes the logic for handling
1546 * RUBY_UBF_IO and RUBY_UBF_PROCESS sentinel values.
1547 *
1548 * @param unblock_function Pointer to unblock function pointer (modified in place)
1549 * @param data2 Pointer to data2 pointer (modified in place)
1550 * @param thread Thread context for resolving data2 when needed
1551 * @return true if sentinel values were resolved, false otherwise
1552 */
1553bool
1554rb_thread_resolve_unblock_function(rb_unblock_function_t **unblock_function, void **data2, struct rb_thread_struct *thread)
1555{
1556 rb_unblock_function_t *ubf = *unblock_function;
1557
1558 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1559 *unblock_function = ubf_select;
1560 *data2 = thread;
1561 return true;
1562 }
1563 return false;
1564}
1565
1566void *
1567rb_nogvl(void *(*func)(void *), void *data1,
1568 rb_unblock_function_t *ubf, void *data2,
1569 int flags)
1570{
1571 if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1572 VALUE scheduler = rb_fiber_scheduler_current();
1573 if (scheduler != Qnil) {
1575
1576 VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1577
1578 if (!UNDEF_P(result)) {
1579 rb_errno_set(state.saved_errno);
1580 return state.result;
1581 }
1582 }
1583 }
1584
1585 void *val = 0;
1586 rb_execution_context_t *ec = GET_EC();
1587 rb_thread_t *th = rb_ec_thread_ptr(ec);
1588 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1589 bool is_main_thread = vm->ractor.main_thread == th;
1590 int saved_errno = 0;
1591
1592 rb_thread_resolve_unblock_function(&ubf, &data2, th);
1593
1594 if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1595 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1596 vm->ubf_async_safe = 1;
1597 }
1598 }
1599
1600 rb_vm_t *volatile saved_vm = vm;
1601 BLOCKING_REGION(th, {
1602 val = func(data1);
1603 saved_errno = rb_errno();
1604 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1605 vm = saved_vm;
1606
1607 if (is_main_thread) vm->ubf_async_safe = 0;
1608
1609 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1610 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1611 }
1612
1613 rb_errno_set(saved_errno);
1614
1615 return val;
1616}
1617
1618/*
1619 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1620 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1621 * without interrupt process.
1622 *
1623 * rb_thread_call_without_gvl() does:
1624 * (1) Check interrupts.
1625 * (2) release GVL.
1626 * Other Ruby threads may run in parallel.
1627 * (3) call func with data1
1628 * (4) acquire GVL.
1629 * Other Ruby threads can not run in parallel any more.
1630 * (5) Check interrupts.
1631 *
1632 * rb_thread_call_without_gvl2() does:
1633 * (1) Check interrupt and return if interrupted.
1634 * (2) release GVL.
1635 * (3) call func with data1 and a pointer to the flags.
1636 * (4) acquire GVL.
1637 *
1638 * If another thread interrupts this thread (Thread#kill, signal delivery,
1639 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1640 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1641 * toggling a cancellation flag, canceling the invocation of a call inside
1642 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1643 *
1644 * There are built-in ubfs and you can specify these ubfs:
1645 *
1646 * * RUBY_UBF_IO: ubf for IO operation
1647 * * RUBY_UBF_PROCESS: ubf for process operation
1648 *
1649 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1650 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1651 * provide proper ubf(), your program will not stop for Control+C or other
1652 * shutdown events.
1653 *
1654 * "Check interrupts" on above list means checking asynchronous
1655 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1656 * request, and so on) and calling corresponding procedures
1657 * (such as `trap' for signals, raise an exception for Thread#raise).
1658 * If `func()' finished and received interrupts, you may skip interrupt
1659 * checking. For example, assume the following func() it reads data from file.
1660 *
1661 * read_func(...) {
1662 * // (a) before read
1663 * read(buffer); // (b) reading
1664 * // (c) after read
1665 * }
1666 *
1667 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1668 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1669 * at (c), after *read* operation is completed, checking interrupts is harmful
1670 * because it causes irrevocable side-effect, the read data will vanish. To
1671 * avoid such problem, the `read_func()' should be used with
1672 * `rb_thread_call_without_gvl2()'.
1673 *
1674 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1675 * immediately. This function does not show when the execution was interrupted.
1676 * For example, there are 4 possible timing (a), (b), (c) and before calling
1677 * read_func(). You need to record progress of a read_func() and check
1678 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1679 * `rb_thread_check_ints()' correctly or your program can not process proper
1680 * process such as `trap' and so on.
1681 *
1682 * NOTE: You can not execute most of Ruby C API and touch Ruby
1683 * objects in `func()' and `ubf()', including raising an
1684 * exception, because current thread doesn't acquire GVL
1685 * (it causes synchronization problems). If you need to
1686 * call ruby functions either use rb_thread_call_with_gvl()
1687 * or read source code of C APIs and confirm safety by
1688 * yourself.
1689 *
1690 * NOTE: In short, this API is difficult to use safely. I recommend you
1691 * use other ways if you have. We lack experiences to use this API.
1692 * Please report your problem related on it.
1693 *
1694 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1695 * for a short running `func()'. Be sure to benchmark and use this
1696 * mechanism when `func()' consumes enough time.
1697 *
1698 * Safe C API:
1699 * * rb_thread_interrupted() - check interrupt flag
1700 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1701 * they will work without GVL, and may acquire GVL when GC is needed.
1702 */
1703void *
1704rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1705 rb_unblock_function_t *ubf, void *data2)
1706{
1707 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1708}
1709
1710void *
1711rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1712 rb_unblock_function_t *ubf, void *data2)
1713{
1714 return rb_nogvl(func, data1, ubf, data2, 0);
1715}
1716
1717static int
1718waitfd_to_waiting_flag(int wfd_event)
1719{
1720 return wfd_event << 1;
1721}
1722
1723static struct ccan_list_head *
1724rb_io_blocking_operations(struct rb_io *io)
1725{
1726 rb_serial_t fork_generation = GET_VM()->fork_gen;
1727
1728 // On fork, all existing entries in this list (which are stack allocated) become invalid.
1729 // Therefore, we re-initialize the list which clears it.
1730 if (io->fork_generation != fork_generation) {
1731 ccan_list_head_init(&io->blocking_operations);
1732 io->fork_generation = fork_generation;
1733 }
1734
1735 return &io->blocking_operations;
1736}
1737
1738/*
1739 * Registers a blocking operation for an IO object. This is used to track all threads and fibers
1740 * that are currently blocked on this IO for reading, writing or other operations.
1741 *
1742 * When the IO is closed, all blocking operations will be notified via rb_fiber_scheduler_fiber_interrupt
1743 * for fibers with a scheduler, or via rb_threadptr_interrupt for threads without a scheduler.
1744 *
1745 * @parameter io The IO object on which the operation will block
1746 * @parameter blocking_operation The operation details including the execution context that will be blocked
1747 */
1748static void
1749rb_io_blocking_operation_enter(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1750{
1751 ccan_list_add(rb_io_blocking_operations(io), &blocking_operation->list);
1752}
1753
1754static void
1755rb_io_blocking_operation_pop(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1756{
1757 ccan_list_del(&blocking_operation->list);
1758}
1761 struct rb_io *io;
1762 struct rb_io_blocking_operation *blocking_operation;
1763};
1764
1765static VALUE
1766io_blocking_operation_exit(VALUE _arguments)
1767{
1768 struct io_blocking_operation_arguments *arguments = (void*)_arguments;
1769 struct rb_io_blocking_operation *blocking_operation = arguments->blocking_operation;
1770
1771 rb_io_blocking_operation_pop(arguments->io, blocking_operation);
1772
1773 rb_io_t *io = arguments->io;
1774 rb_thread_t *thread = io->closing_ec->thread_ptr;
1775 rb_fiber_t *fiber = io->closing_ec->fiber_ptr;
1776
1777 if (thread->scheduler != Qnil) {
1778 // This can cause spurious wakeups...
1779 rb_fiber_scheduler_unblock(thread->scheduler, io->self, rb_fiberptr_self(fiber));
1780 }
1781 else {
1782 rb_thread_wakeup(thread->self);
1783 }
1784
1785 return Qnil;
1786}
1787
1788/*
1789 * Called when a blocking operation completes or is interrupted. Removes the operation from
1790 * the IO's blocking_operations list and wakes up any waiting threads/fibers.
1791 *
1792 * If there's a wakeup_mutex (meaning an IO close is in progress), synchronizes the cleanup
1793 * through that mutex to ensure proper coordination with the closing thread.
1794 *
1795 * @parameter io The IO object the operation was performed on
1796 * @parameter blocking_operation The completed operation to clean up
1797 */
1798static void
1799rb_io_blocking_operation_exit(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1800{
1801 VALUE wakeup_mutex = io->wakeup_mutex;
1802
1803 // Indicate that the blocking operation is no longer active:
1804 blocking_operation->ec = NULL;
1805
1806 if (RB_TEST(wakeup_mutex)) {
1807 struct io_blocking_operation_arguments arguments = {
1808 .io = io,
1809 .blocking_operation = blocking_operation
1810 };
1811
1812 rb_mutex_synchronize(wakeup_mutex, io_blocking_operation_exit, (VALUE)&arguments);
1813 }
1814 else {
1815 // If there's no wakeup_mutex, we can safely remove the operation directly:
1816 rb_io_blocking_operation_pop(io, blocking_operation);
1817 }
1818}
1819
1820static VALUE
1821rb_thread_io_blocking_operation_ensure(VALUE _argument)
1822{
1823 struct io_blocking_operation_arguments *arguments = (void*)_argument;
1824
1825 rb_io_blocking_operation_exit(arguments->io, arguments->blocking_operation);
1826
1827 return Qnil;
1828}
1829
1830/*
1831 * Executes a function that performs a blocking IO operation, while properly tracking
1832 * the operation in the IO's blocking_operations list. This ensures proper cleanup
1833 * and interruption handling if the IO is closed while blocked.
1834 *
1835 * The operation is automatically removed from the blocking_operations list when the function
1836 * returns, whether normally or due to an exception.
1837 *
1838 * @parameter self The IO object
1839 * @parameter function The function to execute that will perform the blocking operation
1840 * @parameter argument The argument to pass to the function
1841 * @returns The result of the blocking operation function
1842 */
1843VALUE
1844rb_thread_io_blocking_operation(VALUE self, VALUE(*function)(VALUE), VALUE argument)
1845{
1846 struct rb_io *io;
1847 RB_IO_POINTER(self, io);
1848
1849 rb_execution_context_t *ec = GET_EC();
1850 struct rb_io_blocking_operation blocking_operation = {
1851 .ec = ec,
1852 };
1853 rb_io_blocking_operation_enter(io, &blocking_operation);
1854
1856 .io = io,
1857 .blocking_operation = &blocking_operation
1858 };
1859
1860 return rb_ensure(function, argument, rb_thread_io_blocking_operation_ensure, (VALUE)&io_blocking_operation_arguments);
1861}
1862
1863static bool
1864thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1865{
1866#if defined(USE_MN_THREADS) && USE_MN_THREADS
1867 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1868#else
1869 return false;
1870#endif
1871}
1872
1873// true if need retry
1874static bool
1875thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1876{
1877#if defined(USE_MN_THREADS) && USE_MN_THREADS
1878 if (thread_io_mn_schedulable(th, events, timeout)) {
1879 rb_hrtime_t rel, *prel;
1880
1881 if (timeout) {
1882 rel = rb_timeval2hrtime(timeout);
1883 prel = &rel;
1884 }
1885 else {
1886 prel = NULL;
1887 }
1888
1889 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1890
1891 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1892 // timeout
1893 return false;
1894 }
1895 else {
1896 return true;
1897 }
1898 }
1899#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1900 return false;
1901}
1902
1903// assume read/write
1904static bool
1905blocking_call_retryable_p(int r, int eno)
1906{
1907 if (r != -1) return false;
1908
1909 switch (eno) {
1910 case EAGAIN:
1911#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1912 case EWOULDBLOCK:
1913#endif
1914 return true;
1915 default:
1916 return false;
1917 }
1918}
1919
1920bool
1921rb_thread_mn_schedulable(VALUE thval)
1922{
1923 rb_thread_t *th = rb_thread_ptr(thval);
1924 return th->mn_schedulable;
1925}
1926
1927VALUE
1928rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void *data1, int events)
1929{
1930 rb_execution_context_t * volatile ec = GET_EC();
1931 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
1932
1933 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), io->fd, events);
1934
1935 volatile VALUE val = Qundef; /* shouldn't be used */
1936 volatile int saved_errno = 0;
1937 enum ruby_tag_type state;
1938 volatile bool prev_mn_schedulable = th->mn_schedulable;
1939 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1940
1941 int fd = io->fd;
1942
1943 // `errno` is only valid when there is an actual error - but we can't
1944 // extract that from the return value of `func` alone, so we clear any
1945 // prior `errno` value here so that we can later check if it was set by
1946 // `func` or not (as opposed to some previously set value).
1947 errno = 0;
1948
1949 struct rb_io_blocking_operation blocking_operation = {
1950 .ec = ec,
1951 };
1952 rb_io_blocking_operation_enter(io, &blocking_operation);
1953
1954 {
1955 EC_PUSH_TAG(ec);
1956 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1957 volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1958 retry:
1959 BLOCKING_REGION(th, {
1960 val = func(data1);
1961 saved_errno = errno;
1962 }, ubf_select, th, FALSE);
1963
1964 RUBY_ASSERT(th == rb_ec_thread_ptr(ec));
1965 if (events &&
1966 blocking_call_retryable_p((int)val, saved_errno) &&
1967 thread_io_wait_events(th, fd, events, NULL)) {
1968 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1969 goto retry;
1970 }
1971
1972 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1973
1974 state = saved_state;
1975 }
1976 EC_POP_TAG();
1977
1978 th = rb_ec_thread_ptr(ec);
1979 th->mn_schedulable = prev_mn_schedulable;
1980 }
1981
1982 rb_io_blocking_operation_exit(io, &blocking_operation);
1983
1984 if (state) {
1985 EC_JUMP_TAG(ec, state);
1986 }
1987
1988 // If the error was a timeout, we raise a specific exception for that:
1989 if (saved_errno == ETIMEDOUT) {
1990 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1991 }
1992
1993 errno = saved_errno;
1994
1995 return val;
1996}
1997
1998VALUE
1999rb_thread_io_blocking_region(struct rb_io *io, rb_blocking_function_t *func, void *data1)
2000{
2001 return rb_thread_io_blocking_call(io, func, data1, 0);
2002}
2003
2004/*
2005 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
2006 *
2007 * After releasing GVL using
2008 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
2009 * methods. If you need to access Ruby you must use this function
2010 * rb_thread_call_with_gvl().
2011 *
2012 * This function rb_thread_call_with_gvl() does:
2013 * (1) acquire GVL.
2014 * (2) call passed function `func'.
2015 * (3) release GVL.
2016 * (4) return a value which is returned at (2).
2017 *
2018 * NOTE: You should not return Ruby object at (2) because such Object
2019 * will not be marked.
2020 *
2021 * NOTE: If an exception is raised in `func', this function DOES NOT
2022 * protect (catch) the exception. If you have any resources
2023 * which should free before throwing exception, you need use
2024 * rb_protect() in `func' and return a value which represents
2025 * exception was raised.
2026 *
2027 * NOTE: This function should not be called by a thread which was not
2028 * created as Ruby thread (created by Thread.new or so). In other
2029 * words, this function *DOES NOT* associate or convert a NON-Ruby
2030 * thread to a Ruby thread.
2031 */
2032void *
2033rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
2034{
2035 rb_thread_t *th = ruby_thread_from_native();
2036 struct rb_blocking_region_buffer *brb;
2037 struct rb_unblock_callback prev_unblock;
2038 void *r;
2039
2040 if (th == 0) {
2041 /* Error has occurred, but we can't use rb_bug()
2042 * because this thread is not Ruby's thread.
2043 * What should we do?
2044 */
2045 bp();
2046 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
2047 exit(EXIT_FAILURE);
2048 }
2049
2050 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
2051 prev_unblock = th->unblock;
2052
2053 if (brb == 0) {
2054 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
2055 }
2056
2057 blocking_region_end(th, brb);
2058 /* enter to Ruby world: You can access Ruby values, methods and so on. */
2059 r = (*func)(data1);
2060 /* leave from Ruby world: You can not access Ruby values, etc. */
2061 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
2062 RUBY_ASSERT_ALWAYS(released);
2063 RB_VM_SAVE_MACHINE_CONTEXT(th);
2064 thread_sched_to_waiting(TH_SCHED(th), th);
2065 return r;
2066}
2067
2068/*
2069 * ruby_thread_has_gvl_p - check if current native thread has GVL.
2070 */
2071
2073ruby_thread_has_gvl_p(void)
2074{
2075 rb_thread_t *th = ruby_thread_from_native();
2076
2077 if (th && th->blocking_region_buffer == 0) {
2078 return 1;
2079 }
2080 else {
2081 return 0;
2082 }
2083}
2084
2085/*
2086 * call-seq:
2087 * Thread.pass -> nil
2088 *
2089 * Give the thread scheduler a hint to pass execution to another thread.
2090 * A running thread may or may not switch, it depends on OS and processor.
2091 */
2092
2093static VALUE
2094thread_s_pass(VALUE klass)
2095{
2097 return Qnil;
2098}
2099
2100/*****************************************************/
2101
2102/*
2103 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
2104 *
2105 * Async events such as an exception thrown by Thread#raise,
2106 * Thread#kill and thread termination (after main thread termination)
2107 * will be queued to th->pending_interrupt_queue.
2108 * - clear: clear the queue.
2109 * - enque: enqueue err object into queue.
2110 * - deque: dequeue err object from queue.
2111 * - active_p: return 1 if the queue should be checked.
2112 *
2113 * All rb_threadptr_pending_interrupt_* functions are called by
2114 * a GVL acquired thread, of course.
2115 * Note that all "rb_" prefix APIs need GVL to call.
2116 */
2117
2118void
2119rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
2120{
2121 rb_ary_clear(th->pending_interrupt_queue);
2122}
2123
2124void
2125rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
2126{
2127 rb_ary_push(th->pending_interrupt_queue, v);
2128 th->pending_interrupt_queue_checked = 0;
2129}
2130
2131static void
2132threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2133{
2134 if (!th->pending_interrupt_queue) {
2135 rb_raise(rb_eThreadError, "uninitialized thread");
2136 }
2137}
2138
2139enum handle_interrupt_timing {
2140 INTERRUPT_NONE,
2141 INTERRUPT_IMMEDIATE,
2142 INTERRUPT_ON_BLOCKING,
2143 INTERRUPT_NEVER
2144};
2145
2146static enum handle_interrupt_timing
2147rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2148{
2149 if (sym == sym_immediate) {
2150 return INTERRUPT_IMMEDIATE;
2151 }
2152 else if (sym == sym_on_blocking) {
2153 return INTERRUPT_ON_BLOCKING;
2154 }
2155 else if (sym == sym_never) {
2156 return INTERRUPT_NEVER;
2157 }
2158 else {
2159 rb_raise(rb_eThreadError, "unknown mask signature");
2160 }
2161}
2162
2163static enum handle_interrupt_timing
2164rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2165{
2166 VALUE mask;
2167 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2168 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2169 VALUE mod;
2170 long i;
2171
2172 for (i=0; i<mask_stack_len; i++) {
2173 mask = mask_stack[mask_stack_len-(i+1)];
2174
2175 if (SYMBOL_P(mask)) {
2176 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2177 if (err != rb_cInteger) {
2178 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2179 }
2180 else {
2181 continue;
2182 }
2183 }
2184
2185 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2186 VALUE klass = mod;
2187 VALUE sym;
2188
2189 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2190 klass = RBASIC(mod)->klass;
2191 }
2192 else if (mod != RCLASS_ORIGIN(mod)) {
2193 continue;
2194 }
2195
2196 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2197 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2198 }
2199 }
2200 /* try to next mask */
2201 }
2202 return INTERRUPT_NONE;
2203}
2204
2205static int
2206rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2207{
2208 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2209}
2210
2211static int
2212rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2213{
2214 int i;
2215 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2216 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2217 if (rb_obj_is_kind_of(e, err)) {
2218 return TRUE;
2219 }
2220 }
2221 return FALSE;
2222}
2223
2224static VALUE
2225rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2226{
2227#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2228 int i;
2229
2230 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2231 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2232
2233 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2234
2235 switch (mask_timing) {
2236 case INTERRUPT_ON_BLOCKING:
2237 if (timing != INTERRUPT_ON_BLOCKING) {
2238 break;
2239 }
2240 /* fall through */
2241 case INTERRUPT_NONE: /* default: IMMEDIATE */
2242 case INTERRUPT_IMMEDIATE:
2243 rb_ary_delete_at(th->pending_interrupt_queue, i);
2244 return err;
2245 case INTERRUPT_NEVER:
2246 break;
2247 }
2248 }
2249
2250 th->pending_interrupt_queue_checked = 1;
2251 return Qundef;
2252#else
2253 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2254 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2255 th->pending_interrupt_queue_checked = 1;
2256 }
2257 return err;
2258#endif
2259}
2260
2261static int
2262threadptr_pending_interrupt_active_p(rb_thread_t *th)
2263{
2264 /*
2265 * For optimization, we don't check async errinfo queue
2266 * if the queue and the thread interrupt mask were not changed
2267 * since last check.
2268 */
2269 if (th->pending_interrupt_queue_checked) {
2270 return 0;
2271 }
2272
2273 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2274 return 0;
2275 }
2276
2277 return 1;
2278}
2279
2280static int
2281handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2282{
2283 VALUE *maskp = (VALUE *)args;
2284
2285 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2286 rb_raise(rb_eArgError, "unknown mask signature");
2287 }
2288
2289 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2290 *maskp = val;
2291 return ST_CONTINUE;
2292 }
2293
2294 if (RTEST(*maskp)) {
2295 if (!RB_TYPE_P(*maskp, T_HASH)) {
2296 VALUE prev = *maskp;
2297 *maskp = rb_ident_hash_new();
2298 if (SYMBOL_P(prev)) {
2299 rb_hash_aset(*maskp, rb_eException, prev);
2300 }
2301 }
2302 rb_hash_aset(*maskp, key, val);
2303 }
2304 else {
2305 *maskp = Qfalse;
2306 }
2307
2308 return ST_CONTINUE;
2309}
2310
2311/*
2312 * call-seq:
2313 * Thread.handle_interrupt(hash) { ... } -> result of the block
2314 *
2315 * Changes asynchronous interrupt timing.
2316 *
2317 * _interrupt_ means asynchronous event and corresponding procedure
2318 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2319 * and main thread termination (if main thread terminates, then all
2320 * other thread will be killed).
2321 *
2322 * The given +hash+ has pairs like <code>ExceptionClass =>
2323 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2324 * the given block. The TimingSymbol can be one of the following symbols:
2325 *
2326 * [+:immediate+] Invoke interrupts immediately.
2327 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2328 * [+:never+] Never invoke all interrupts.
2329 *
2330 * _BlockingOperation_ means that the operation will block the calling thread,
2331 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2332 * operation executed without GVL.
2333 *
2334 * Masked asynchronous interrupts are delayed until they are enabled.
2335 * This method is similar to sigprocmask(3).
2336 *
2337 * === NOTE
2338 *
2339 * Asynchronous interrupts are difficult to use.
2340 *
2341 * If you need to communicate between threads, please consider to use another way such as Queue.
2342 *
2343 * Or use them with deep understanding about this method.
2344 *
2345 * === Usage
2346 *
2347 * In this example, we can guard from Thread#raise exceptions.
2348 *
2349 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2350 * ignored in the first block of the main thread. In the second
2351 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2352 *
2353 * th = Thread.new do
2354 * Thread.handle_interrupt(RuntimeError => :never) {
2355 * begin
2356 * # You can write resource allocation code safely.
2357 * Thread.handle_interrupt(RuntimeError => :immediate) {
2358 * # ...
2359 * }
2360 * ensure
2361 * # You can write resource deallocation code safely.
2362 * end
2363 * }
2364 * end
2365 * Thread.pass
2366 * # ...
2367 * th.raise "stop"
2368 *
2369 * While we are ignoring the RuntimeError exception, it's safe to write our
2370 * resource allocation code. Then, the ensure block is where we can safely
2371 * deallocate your resources.
2372 *
2373 * ==== Stack control settings
2374 *
2375 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2376 * to control more than one ExceptionClass and TimingSymbol at a time.
2377 *
2378 * Thread.handle_interrupt(FooError => :never) {
2379 * Thread.handle_interrupt(BarError => :never) {
2380 * # FooError and BarError are prohibited.
2381 * }
2382 * }
2383 *
2384 * ==== Inheritance with ExceptionClass
2385 *
2386 * All exceptions inherited from the ExceptionClass parameter will be considered.
2387 *
2388 * Thread.handle_interrupt(Exception => :never) {
2389 * # all exceptions inherited from Exception are prohibited.
2390 * }
2391 *
2392 * For handling all interrupts, use +Object+ and not +Exception+
2393 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2394 */
2395static VALUE
2396rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2397{
2398 VALUE mask = Qundef;
2399 rb_execution_context_t * volatile ec = GET_EC();
2400 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2401 volatile VALUE r = Qnil;
2402 enum ruby_tag_type state;
2403
2404 if (!rb_block_given_p()) {
2405 rb_raise(rb_eArgError, "block is needed.");
2406 }
2407
2408 mask_arg = rb_to_hash_type(mask_arg);
2409
2410 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2411 mask = Qnil;
2412 }
2413
2414 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2415
2416 if (UNDEF_P(mask)) {
2417 return rb_yield(Qnil);
2418 }
2419
2420 if (!RTEST(mask)) {
2421 mask = mask_arg;
2422 }
2423 else if (RB_TYPE_P(mask, T_HASH)) {
2424 OBJ_FREEZE(mask);
2425 }
2426
2427 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2428 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2429 th->pending_interrupt_queue_checked = 0;
2430 RUBY_VM_SET_INTERRUPT(th->ec);
2431 }
2432
2433 EC_PUSH_TAG(th->ec);
2434 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2435 r = rb_yield(Qnil);
2436 }
2437 EC_POP_TAG();
2438
2439 rb_ary_pop(th->pending_interrupt_mask_stack);
2440 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2441 th->pending_interrupt_queue_checked = 0;
2442 RUBY_VM_SET_INTERRUPT(th->ec);
2443 }
2444
2445 RUBY_VM_CHECK_INTS(th->ec);
2446
2447 if (state) {
2448 EC_JUMP_TAG(th->ec, state);
2449 }
2450
2451 return r;
2452}
2453
2454/*
2455 * call-seq:
2456 * target_thread.pending_interrupt?(error = nil) -> true/false
2457 *
2458 * Returns whether or not the asynchronous queue is empty for the target thread.
2459 *
2460 * If +error+ is given, then check only for +error+ type deferred events.
2461 *
2462 * See ::pending_interrupt? for more information.
2463 */
2464static VALUE
2465rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2466{
2467 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2468
2469 if (!target_th->pending_interrupt_queue) {
2470 return Qfalse;
2471 }
2472 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2473 return Qfalse;
2474 }
2475 if (rb_check_arity(argc, 0, 1)) {
2476 VALUE err = argv[0];
2477 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2478 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2479 }
2480 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2481 }
2482 else {
2483 return Qtrue;
2484 }
2485}
2486
2487/*
2488 * call-seq:
2489 * Thread.pending_interrupt?(error = nil) -> true/false
2490 *
2491 * Returns whether or not the asynchronous queue is empty.
2492 *
2493 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2494 * this method can be used to determine if there are any deferred events.
2495 *
2496 * If you find this method returns true, then you may finish +:never+ blocks.
2497 *
2498 * For example, the following method processes deferred asynchronous events
2499 * immediately.
2500 *
2501 * def Thread.kick_interrupt_immediately
2502 * Thread.handle_interrupt(Object => :immediate) {
2503 * Thread.pass
2504 * }
2505 * end
2506 *
2507 * If +error+ is given, then check only for +error+ type deferred events.
2508 *
2509 * === Usage
2510 *
2511 * th = Thread.new{
2512 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2513 * while true
2514 * ...
2515 * # reach safe point to invoke interrupt
2516 * if Thread.pending_interrupt?
2517 * Thread.handle_interrupt(Object => :immediate){}
2518 * end
2519 * ...
2520 * end
2521 * }
2522 * }
2523 * ...
2524 * th.raise # stop thread
2525 *
2526 * This example can also be written as the following, which you should use to
2527 * avoid asynchronous interrupts.
2528 *
2529 * flag = true
2530 * th = Thread.new{
2531 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2532 * while true
2533 * ...
2534 * # reach safe point to invoke interrupt
2535 * break if flag == false
2536 * ...
2537 * end
2538 * }
2539 * }
2540 * ...
2541 * flag = false # stop thread
2542 */
2543
2544static VALUE
2545rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2546{
2547 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2548}
2549
2550NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2551
2552static void
2553rb_threadptr_to_kill(rb_thread_t *th)
2554{
2555 VM_ASSERT(GET_THREAD() == th);
2556 rb_threadptr_pending_interrupt_clear(th);
2557 th->status = THREAD_RUNNABLE;
2558 th->to_kill = 1;
2559 th->ec->errinfo = INT2FIX(TAG_FATAL);
2560 EC_JUMP_TAG(th->ec, TAG_FATAL);
2561}
2562
2563static inline rb_atomic_t
2564threadptr_get_interrupts(rb_thread_t *th)
2565{
2566 rb_execution_context_t *ec = th->ec;
2567 rb_atomic_t interrupt;
2568 rb_atomic_t old;
2569
2570 old = ATOMIC_LOAD_RELAXED(ec->interrupt_flag);
2571 do {
2572 interrupt = old;
2573 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2574 } while (old != interrupt);
2575 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2576}
2577
2578static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2579
2580// Execute interrupts on currently running thread
2581// In certain situations, calling this function will raise an exception. Some examples are:
2582// * during VM shutdown (`rb_ractor_terminate_all`)
2583// * Call to Thread#exit for current thread (`rb_thread_kill`)
2584// * Call to Thread#raise for current thread
2585int
2586rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2587{
2588 rb_atomic_t interrupt;
2589 int postponed_job_interrupt = 0;
2590 int ret = FALSE;
2591
2592 VM_ASSERT(GET_THREAD() == th);
2593
2594 if (th->ec->raised_flag) return ret;
2595
2596 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2597 int sig;
2598 int timer_interrupt;
2599 int pending_interrupt;
2600 int trap_interrupt;
2601 int terminate_interrupt;
2602
2603 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2604 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2605 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2606 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2607 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2608
2609 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2610 RB_VM_LOCKING();
2611 }
2612
2613 if (postponed_job_interrupt) {
2614 rb_postponed_job_flush(th->vm);
2615 }
2616
2617 if (trap_interrupt) {
2618 /* signal handling */
2619 if (th == th->vm->ractor.main_thread) {
2620 enum rb_thread_status prev_status = th->status;
2621
2622 th->status = THREAD_RUNNABLE;
2623 {
2624 while ((sig = rb_get_next_signal()) != 0) {
2625 ret |= rb_signal_exec(th, sig);
2626 }
2627 }
2628 th->status = prev_status;
2629 }
2630
2631 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2632 enum rb_thread_status prev_status = th->status;
2633
2634 th->status = THREAD_RUNNABLE;
2635 {
2636 threadptr_interrupt_exec_exec(th);
2637 }
2638 th->status = prev_status;
2639 }
2640 }
2641
2642 /* exception from another thread */
2643 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2644 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2645 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2646 ret = TRUE;
2647
2648 if (UNDEF_P(err)) {
2649 /* no error */
2650 }
2651 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2652 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2653 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2654 terminate_interrupt = 1;
2655 }
2656 else {
2657 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2658 /* the only special exception to be queued across thread */
2659 err = ruby_vm_special_exception_copy(err);
2660 }
2661 /* set runnable if th was slept. */
2662 if (th->status == THREAD_STOPPED ||
2663 th->status == THREAD_STOPPED_FOREVER)
2664 th->status = THREAD_RUNNABLE;
2665 rb_exc_raise(err);
2666 }
2667 }
2668
2669 if (terminate_interrupt) {
2670 rb_threadptr_to_kill(th);
2671 }
2672
2673 if (timer_interrupt) {
2674 uint32_t limits_us = thread_default_quantum_ms * 1000;
2675
2676 if (th->priority > 0)
2677 limits_us <<= th->priority;
2678 else
2679 limits_us >>= -th->priority;
2680
2681 if (th->status == THREAD_RUNNABLE)
2682 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2683
2684 VM_ASSERT(th->ec->cfp);
2685 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2686 0, 0, 0, Qundef);
2687
2688 rb_thread_schedule_limits(limits_us);
2689 }
2690 }
2691 return ret;
2692}
2693
2694void
2695rb_thread_execute_interrupts(VALUE thval)
2696{
2697 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2698}
2699
2700static void
2701rb_threadptr_ready(rb_thread_t *th)
2702{
2703 rb_threadptr_interrupt(th);
2704}
2705
2706static VALUE
2707rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2708{
2709 VALUE exc;
2710
2711 if (rb_threadptr_dead(target_th)) {
2712 return Qnil;
2713 }
2714
2715 if (argc == 0) {
2716 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2717 }
2718 else {
2719 exc = rb_make_exception(argc, argv);
2720 }
2721
2722 /* making an exception object can switch thread,
2723 so we need to check thread deadness again */
2724 if (rb_threadptr_dead(target_th)) {
2725 return Qnil;
2726 }
2727
2728 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2729 rb_threadptr_pending_interrupt_enque(target_th, exc);
2730 rb_threadptr_interrupt(target_th);
2731 return Qnil;
2732}
2733
2734void
2735rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2736{
2737 VALUE argv[2];
2738
2739 argv[0] = rb_eSignal;
2740 argv[1] = INT2FIX(sig);
2741 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2742}
2743
2744void
2745rb_threadptr_signal_exit(rb_thread_t *th)
2746{
2747 VALUE argv[2];
2748
2749 argv[0] = rb_eSystemExit;
2750 argv[1] = rb_str_new2("exit");
2751
2752 // TODO: check signal raise deliverly
2753 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2754}
2755
2756int
2757rb_ec_set_raised(rb_execution_context_t *ec)
2758{
2759 if (ec->raised_flag & RAISED_EXCEPTION) {
2760 return 1;
2761 }
2762 ec->raised_flag |= RAISED_EXCEPTION;
2763 return 0;
2764}
2765
2766int
2767rb_ec_reset_raised(rb_execution_context_t *ec)
2768{
2769 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2770 return 0;
2771 }
2772 ec->raised_flag &= ~RAISED_EXCEPTION;
2773 return 1;
2774}
2775
2776/*
2777 * Thread-safe IO closing mechanism.
2778 *
2779 * When an IO is closed while other threads or fibers are blocked on it, we need to:
2780 * 1. Track and notify all blocking operations through io->blocking_operations
2781 * 2. Ensure only one thread can close at a time using io->closing_ec
2782 * 3. Synchronize cleanup using wakeup_mutex
2783 *
2784 * The close process works as follows:
2785 * - First check if any thread is already closing (io->closing_ec)
2786 * - Set up wakeup_mutex for synchronization
2787 * - Iterate through all blocking operations in io->blocking_operations
2788 * - For each blocked fiber with a scheduler:
2789 * - Notify via rb_fiber_scheduler_fiber_interrupt
2790 * - For each blocked thread without a scheduler:
2791 * - Enqueue IOError via rb_threadptr_pending_interrupt_enque
2792 * - Wake via rb_threadptr_interrupt
2793 * - Wait on wakeup_mutex until all operations are cleaned up
2794 * - Only then clear closing state and allow actual close to proceed
2795 */
2796static VALUE
2797thread_io_close_notify_all(VALUE _io)
2798{
2799 struct rb_io *io = (struct rb_io *)_io;
2800
2801 size_t count = 0;
2802 rb_vm_t *vm = io->closing_ec->thread_ptr->vm;
2803 VALUE error = vm->special_exceptions[ruby_error_stream_closed];
2804
2805 struct rb_io_blocking_operation *blocking_operation;
2806 ccan_list_for_each(rb_io_blocking_operations(io), blocking_operation, list) {
2807 rb_execution_context_t *ec = blocking_operation->ec;
2808
2809 // If the operation is in progress, we need to interrupt it:
2810 if (ec) {
2811 rb_thread_t *thread = ec->thread_ptr;
2812
2813 VALUE result = RUBY_Qundef;
2814 if (thread->scheduler != Qnil) {
2815 result = rb_fiber_scheduler_fiber_interrupt(thread->scheduler, rb_fiberptr_self(ec->fiber_ptr), error);
2816 }
2817
2818 if (result == RUBY_Qundef) {
2819 // If the thread is not the current thread, we need to enqueue an error:
2820 rb_threadptr_pending_interrupt_enque(thread, error);
2821 rb_threadptr_interrupt(thread);
2822 }
2823 }
2824
2825 count += 1;
2826 }
2827
2828 return (VALUE)count;
2829}
2830
2831size_t
2832rb_thread_io_close_interrupt(struct rb_io *io)
2833{
2834 // We guard this operation based on `io->closing_ec` -> only one thread will ever enter this function.
2835 if (io->closing_ec) {
2836 return 0;
2837 }
2838
2839 // If there are no blocking operations, we are done:
2840 if (ccan_list_empty(rb_io_blocking_operations(io))) {
2841 return 0;
2842 }
2843
2844 // Otherwise, we are now closing the IO:
2845 rb_execution_context_t *ec = GET_EC();
2846 io->closing_ec = ec;
2847
2848 // This is used to ensure the correct execution context is woken up after the blocking operation is interrupted:
2849 io->wakeup_mutex = rb_mutex_new();
2850 rb_mutex_allow_trap(io->wakeup_mutex, 1);
2851
2852 // We need to use a mutex here as entering the fiber scheduler may cause a context switch:
2853 VALUE result = rb_mutex_synchronize(io->wakeup_mutex, thread_io_close_notify_all, (VALUE)io);
2854
2855 return (size_t)result;
2856}
2857
2858void
2859rb_thread_io_close_wait(struct rb_io* io)
2860{
2861 VALUE wakeup_mutex = io->wakeup_mutex;
2862
2863 if (!RB_TEST(wakeup_mutex)) {
2864 // There was nobody else using this file when we closed it, so we never bothered to allocate a mutex:
2865 return;
2866 }
2867
2868 rb_mutex_lock(wakeup_mutex);
2869 while (!ccan_list_empty(rb_io_blocking_operations(io))) {
2870 rb_mutex_sleep(wakeup_mutex, Qnil);
2871 }
2872 rb_mutex_unlock(wakeup_mutex);
2873
2874 // We are done closing:
2875 io->wakeup_mutex = Qnil;
2876 io->closing_ec = NULL;
2877}
2878
2879void
2880rb_thread_fd_close(int fd)
2881{
2882 rb_warn("rb_thread_fd_close is deprecated (and is now a no-op).");
2883}
2884
2885/*
2886 * call-seq:
2887 * thr.raise
2888 * thr.raise(string)
2889 * thr.raise(exception [, string [, array]])
2890 *
2891 * Raises an exception from the given thread. The caller does not have to be
2892 * +thr+. See Kernel#raise for more information.
2893 *
2894 * Thread.abort_on_exception = true
2895 * a = Thread.new { sleep(200) }
2896 * a.raise("Gotcha")
2897 *
2898 * This will produce:
2899 *
2900 * prog.rb:3: Gotcha (RuntimeError)
2901 * from prog.rb:2:in `initialize'
2902 * from prog.rb:2:in `new'
2903 * from prog.rb:2
2904 */
2905
2906static VALUE
2907thread_raise_m(int argc, VALUE *argv, VALUE self)
2908{
2909 rb_thread_t *target_th = rb_thread_ptr(self);
2910 const rb_thread_t *current_th = GET_THREAD();
2911
2912 threadptr_check_pending_interrupt_queue(target_th);
2913 rb_threadptr_raise(target_th, argc, argv);
2914
2915 /* To perform Thread.current.raise as Kernel.raise */
2916 if (current_th == target_th) {
2917 RUBY_VM_CHECK_INTS(target_th->ec);
2918 }
2919 return Qnil;
2920}
2921
2922
2923/*
2924 * call-seq:
2925 * thr.exit -> thr
2926 * thr.kill -> thr
2927 * thr.terminate -> thr
2928 *
2929 * Terminates +thr+ and schedules another thread to be run, returning
2930 * the terminated Thread. If this is the main thread, or the last
2931 * thread, exits the process.
2932 */
2933
2935rb_thread_kill(VALUE thread)
2936{
2937 rb_thread_t *target_th = rb_thread_ptr(thread);
2938
2939 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2940 return thread;
2941 }
2942 if (target_th == target_th->vm->ractor.main_thread) {
2943 rb_exit(EXIT_SUCCESS);
2944 }
2945
2946 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2947
2948 if (target_th == GET_THREAD()) {
2949 /* kill myself immediately */
2950 rb_threadptr_to_kill(target_th);
2951 }
2952 else {
2953 threadptr_check_pending_interrupt_queue(target_th);
2954 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2955 rb_threadptr_interrupt(target_th);
2956 }
2957
2958 return thread;
2959}
2960
2961int
2962rb_thread_to_be_killed(VALUE thread)
2963{
2964 rb_thread_t *target_th = rb_thread_ptr(thread);
2965
2966 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2967 return TRUE;
2968 }
2969 return FALSE;
2970}
2971
2972/*
2973 * call-seq:
2974 * Thread.kill(thread) -> thread
2975 *
2976 * Causes the given +thread+ to exit, see also Thread::exit.
2977 *
2978 * count = 0
2979 * a = Thread.new { loop { count += 1 } }
2980 * sleep(0.1) #=> 0
2981 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2982 * count #=> 93947
2983 * a.alive? #=> false
2984 */
2985
2986static VALUE
2987rb_thread_s_kill(VALUE obj, VALUE th)
2988{
2989 return rb_thread_kill(th);
2990}
2991
2992
2993/*
2994 * call-seq:
2995 * Thread.exit -> thread
2996 *
2997 * Terminates the currently running thread and schedules another thread to be
2998 * run.
2999 *
3000 * If this thread is already marked to be killed, ::exit returns the Thread.
3001 *
3002 * If this is the main thread, or the last thread, exit the process.
3003 */
3004
3005static VALUE
3006rb_thread_exit(VALUE _)
3007{
3008 rb_thread_t *th = GET_THREAD();
3009 return rb_thread_kill(th->self);
3010}
3011
3012
3013/*
3014 * call-seq:
3015 * thr.wakeup -> thr
3016 *
3017 * Marks a given thread as eligible for scheduling, however it may still
3018 * remain blocked on I/O.
3019 *
3020 * *Note:* This does not invoke the scheduler, see #run for more information.
3021 *
3022 * c = Thread.new { Thread.stop; puts "hey!" }
3023 * sleep 0.1 while c.status!='sleep'
3024 * c.wakeup
3025 * c.join
3026 * #=> "hey!"
3027 */
3028
3030rb_thread_wakeup(VALUE thread)
3031{
3032 if (!RTEST(rb_thread_wakeup_alive(thread))) {
3033 rb_raise(rb_eThreadError, "killed thread");
3034 }
3035 return thread;
3036}
3037
3040{
3041 rb_thread_t *target_th = rb_thread_ptr(thread);
3042 if (target_th->status == THREAD_KILLED) return Qnil;
3043
3044 rb_threadptr_ready(target_th);
3045
3046 if (target_th->status == THREAD_STOPPED ||
3047 target_th->status == THREAD_STOPPED_FOREVER) {
3048 target_th->status = THREAD_RUNNABLE;
3049 }
3050
3051 return thread;
3052}
3053
3054
3055/*
3056 * call-seq:
3057 * thr.run -> thr
3058 *
3059 * Wakes up +thr+, making it eligible for scheduling.
3060 *
3061 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
3062 * sleep 0.1 while a.status!='sleep'
3063 * puts "Got here"
3064 * a.run
3065 * a.join
3066 *
3067 * This will produce:
3068 *
3069 * a
3070 * Got here
3071 * c
3072 *
3073 * See also the instance method #wakeup.
3074 */
3075
3077rb_thread_run(VALUE thread)
3078{
3079 rb_thread_wakeup(thread);
3081 return thread;
3082}
3083
3084
3086rb_thread_stop(void)
3087{
3088 if (rb_thread_alone()) {
3089 rb_raise(rb_eThreadError,
3090 "stopping only thread\n\tnote: use sleep to stop forever");
3091 }
3093 return Qnil;
3094}
3095
3096/*
3097 * call-seq:
3098 * Thread.stop -> nil
3099 *
3100 * Stops execution of the current thread, putting it into a ``sleep'' state,
3101 * and schedules execution of another thread.
3102 *
3103 * a = Thread.new { print "a"; Thread.stop; print "c" }
3104 * sleep 0.1 while a.status!='sleep'
3105 * print "b"
3106 * a.run
3107 * a.join
3108 * #=> "abc"
3109 */
3110
3111static VALUE
3112thread_stop(VALUE _)
3113{
3114 return rb_thread_stop();
3115}
3116
3117/********************************************************************/
3118
3119VALUE
3120rb_thread_list(void)
3121{
3122 // TODO
3123 return rb_ractor_thread_list();
3124}
3125
3126/*
3127 * call-seq:
3128 * Thread.list -> array
3129 *
3130 * Returns an array of Thread objects for all threads that are either runnable
3131 * or stopped.
3132 *
3133 * Thread.new { sleep(200) }
3134 * Thread.new { 1000000.times {|i| i*i } }
3135 * Thread.new { Thread.stop }
3136 * Thread.list.each {|t| p t}
3137 *
3138 * This will produce:
3139 *
3140 * #<Thread:0x401b3e84 sleep>
3141 * #<Thread:0x401b3f38 run>
3142 * #<Thread:0x401b3fb0 sleep>
3143 * #<Thread:0x401bdf4c run>
3144 */
3145
3146static VALUE
3147thread_list(VALUE _)
3148{
3149 return rb_thread_list();
3150}
3151
3154{
3155 return GET_THREAD()->self;
3156}
3157
3158/*
3159 * call-seq:
3160 * Thread.current -> thread
3161 *
3162 * Returns the currently executing thread.
3163 *
3164 * Thread.current #=> #<Thread:0x401bdf4c run>
3165 */
3166
3167static VALUE
3168thread_s_current(VALUE klass)
3169{
3170 return rb_thread_current();
3171}
3172
3174rb_thread_main(void)
3175{
3176 return GET_RACTOR()->threads.main->self;
3177}
3178
3179/*
3180 * call-seq:
3181 * Thread.main -> thread
3182 *
3183 * Returns the main thread.
3184 */
3185
3186static VALUE
3187rb_thread_s_main(VALUE klass)
3188{
3189 return rb_thread_main();
3190}
3191
3192
3193/*
3194 * call-seq:
3195 * Thread.abort_on_exception -> true or false
3196 *
3197 * Returns the status of the global ``abort on exception'' condition.
3198 *
3199 * The default is +false+.
3200 *
3201 * When set to +true+, if any thread is aborted by an exception, the
3202 * raised exception will be re-raised in the main thread.
3203 *
3204 * Can also be specified by the global $DEBUG flag or command line option
3205 * +-d+.
3206 *
3207 * See also ::abort_on_exception=.
3208 *
3209 * There is also an instance level method to set this for a specific thread,
3210 * see #abort_on_exception.
3211 */
3212
3213static VALUE
3214rb_thread_s_abort_exc(VALUE _)
3215{
3216 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3217}
3218
3219
3220/*
3221 * call-seq:
3222 * Thread.abort_on_exception= boolean -> true or false
3223 *
3224 * When set to +true+, if any thread is aborted by an exception, the
3225 * raised exception will be re-raised in the main thread.
3226 * Returns the new state.
3227 *
3228 * Thread.abort_on_exception = true
3229 * t1 = Thread.new do
3230 * puts "In new thread"
3231 * raise "Exception from thread"
3232 * end
3233 * sleep(1)
3234 * puts "not reached"
3235 *
3236 * This will produce:
3237 *
3238 * In new thread
3239 * prog.rb:4: Exception from thread (RuntimeError)
3240 * from prog.rb:2:in `initialize'
3241 * from prog.rb:2:in `new'
3242 * from prog.rb:2
3243 *
3244 * See also ::abort_on_exception.
3245 *
3246 * There is also an instance level method to set this for a specific thread,
3247 * see #abort_on_exception=.
3248 */
3249
3250static VALUE
3251rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3252{
3253 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3254 return val;
3255}
3256
3257
3258/*
3259 * call-seq:
3260 * thr.abort_on_exception -> true or false
3261 *
3262 * Returns the status of the thread-local ``abort on exception'' condition for
3263 * this +thr+.
3264 *
3265 * The default is +false+.
3266 *
3267 * See also #abort_on_exception=.
3268 *
3269 * There is also a class level method to set this for all threads, see
3270 * ::abort_on_exception.
3271 */
3272
3273static VALUE
3274rb_thread_abort_exc(VALUE thread)
3275{
3276 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3277}
3278
3279
3280/*
3281 * call-seq:
3282 * thr.abort_on_exception= boolean -> true or false
3283 *
3284 * When set to +true+, if this +thr+ is aborted by an exception, the
3285 * raised exception will be re-raised in the main thread.
3286 *
3287 * See also #abort_on_exception.
3288 *
3289 * There is also a class level method to set this for all threads, see
3290 * ::abort_on_exception=.
3291 */
3292
3293static VALUE
3294rb_thread_abort_exc_set(VALUE thread, VALUE val)
3295{
3296 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3297 return val;
3298}
3299
3300
3301/*
3302 * call-seq:
3303 * Thread.report_on_exception -> true or false
3304 *
3305 * Returns the status of the global ``report on exception'' condition.
3306 *
3307 * The default is +true+ since Ruby 2.5.
3308 *
3309 * All threads created when this flag is true will report
3310 * a message on $stderr if an exception kills the thread.
3311 *
3312 * Thread.new { 1.times { raise } }
3313 *
3314 * will produce this output on $stderr:
3315 *
3316 * #<Thread:...> terminated with exception (report_on_exception is true):
3317 * Traceback (most recent call last):
3318 * 2: from -e:1:in `block in <main>'
3319 * 1: from -e:1:in `times'
3320 *
3321 * This is done to catch errors in threads early.
3322 * In some cases, you might not want this output.
3323 * There are multiple ways to avoid the extra output:
3324 *
3325 * * If the exception is not intended, the best is to fix the cause of
3326 * the exception so it does not happen anymore.
3327 * * If the exception is intended, it might be better to rescue it closer to
3328 * where it is raised rather then let it kill the Thread.
3329 * * If it is guaranteed the Thread will be joined with Thread#join or
3330 * Thread#value, then it is safe to disable this report with
3331 * <code>Thread.current.report_on_exception = false</code>
3332 * when starting the Thread.
3333 * However, this might handle the exception much later, or not at all
3334 * if the Thread is never joined due to the parent thread being blocked, etc.
3335 *
3336 * See also ::report_on_exception=.
3337 *
3338 * There is also an instance level method to set this for a specific thread,
3339 * see #report_on_exception=.
3340 *
3341 */
3342
3343static VALUE
3344rb_thread_s_report_exc(VALUE _)
3345{
3346 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3347}
3348
3349
3350/*
3351 * call-seq:
3352 * Thread.report_on_exception= boolean -> true or false
3353 *
3354 * Returns the new state.
3355 * When set to +true+, all threads created afterwards will inherit the
3356 * condition and report a message on $stderr if an exception kills a thread:
3357 *
3358 * Thread.report_on_exception = true
3359 * t1 = Thread.new do
3360 * puts "In new thread"
3361 * raise "Exception from thread"
3362 * end
3363 * sleep(1)
3364 * puts "In the main thread"
3365 *
3366 * This will produce:
3367 *
3368 * In new thread
3369 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3370 * Traceback (most recent call last):
3371 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3372 * In the main thread
3373 *
3374 * See also ::report_on_exception.
3375 *
3376 * There is also an instance level method to set this for a specific thread,
3377 * see #report_on_exception=.
3378 */
3379
3380static VALUE
3381rb_thread_s_report_exc_set(VALUE self, VALUE val)
3382{
3383 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3384 return val;
3385}
3386
3387
3388/*
3389 * call-seq:
3390 * Thread.ignore_deadlock -> true or false
3391 *
3392 * Returns the status of the global ``ignore deadlock'' condition.
3393 * The default is +false+, so that deadlock conditions are not ignored.
3394 *
3395 * See also ::ignore_deadlock=.
3396 *
3397 */
3398
3399static VALUE
3400rb_thread_s_ignore_deadlock(VALUE _)
3401{
3402 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3403}
3404
3405
3406/*
3407 * call-seq:
3408 * Thread.ignore_deadlock = boolean -> true or false
3409 *
3410 * Returns the new state.
3411 * When set to +true+, the VM will not check for deadlock conditions.
3412 * It is only useful to set this if your application can break a
3413 * deadlock condition via some other means, such as a signal.
3414 *
3415 * Thread.ignore_deadlock = true
3416 * queue = Thread::Queue.new
3417 *
3418 * trap(:SIGUSR1){queue.push "Received signal"}
3419 *
3420 * # raises fatal error unless ignoring deadlock
3421 * puts queue.pop
3422 *
3423 * See also ::ignore_deadlock.
3424 */
3425
3426static VALUE
3427rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3428{
3429 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3430 return val;
3431}
3432
3433
3434/*
3435 * call-seq:
3436 * thr.report_on_exception -> true or false
3437 *
3438 * Returns the status of the thread-local ``report on exception'' condition for
3439 * this +thr+.
3440 *
3441 * The default value when creating a Thread is the value of
3442 * the global flag Thread.report_on_exception.
3443 *
3444 * See also #report_on_exception=.
3445 *
3446 * There is also a class level method to set this for all new threads, see
3447 * ::report_on_exception=.
3448 */
3449
3450static VALUE
3451rb_thread_report_exc(VALUE thread)
3452{
3453 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3454}
3455
3456
3457/*
3458 * call-seq:
3459 * thr.report_on_exception= boolean -> true or false
3460 *
3461 * When set to +true+, a message is printed on $stderr if an exception
3462 * kills this +thr+. See ::report_on_exception for details.
3463 *
3464 * See also #report_on_exception.
3465 *
3466 * There is also a class level method to set this for all new threads, see
3467 * ::report_on_exception=.
3468 */
3469
3470static VALUE
3471rb_thread_report_exc_set(VALUE thread, VALUE val)
3472{
3473 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3474 return val;
3475}
3476
3477
3478/*
3479 * call-seq:
3480 * thr.group -> thgrp or nil
3481 *
3482 * Returns the ThreadGroup which contains the given thread.
3483 *
3484 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3485 */
3486
3487VALUE
3488rb_thread_group(VALUE thread)
3489{
3490 return rb_thread_ptr(thread)->thgroup;
3491}
3492
3493static const char *
3494thread_status_name(rb_thread_t *th, int detail)
3495{
3496 switch (th->status) {
3497 case THREAD_RUNNABLE:
3498 return th->to_kill ? "aborting" : "run";
3499 case THREAD_STOPPED_FOREVER:
3500 if (detail) return "sleep_forever";
3501 case THREAD_STOPPED:
3502 return "sleep";
3503 case THREAD_KILLED:
3504 return "dead";
3505 default:
3506 return "unknown";
3507 }
3508}
3509
3510static int
3511rb_threadptr_dead(rb_thread_t *th)
3512{
3513 return th->status == THREAD_KILLED;
3514}
3515
3516
3517/*
3518 * call-seq:
3519 * thr.status -> string, false or nil
3520 *
3521 * Returns the status of +thr+.
3522 *
3523 * [<tt>"sleep"</tt>]
3524 * Returned if this thread is sleeping or waiting on I/O
3525 * [<tt>"run"</tt>]
3526 * When this thread is executing
3527 * [<tt>"aborting"</tt>]
3528 * If this thread is aborting
3529 * [+false+]
3530 * When this thread is terminated normally
3531 * [+nil+]
3532 * If terminated with an exception.
3533 *
3534 * a = Thread.new { raise("die now") }
3535 * b = Thread.new { Thread.stop }
3536 * c = Thread.new { Thread.exit }
3537 * d = Thread.new { sleep }
3538 * d.kill #=> #<Thread:0x401b3678 aborting>
3539 * a.status #=> nil
3540 * b.status #=> "sleep"
3541 * c.status #=> false
3542 * d.status #=> "aborting"
3543 * Thread.current.status #=> "run"
3544 *
3545 * See also the instance methods #alive? and #stop?
3546 */
3547
3548static VALUE
3549rb_thread_status(VALUE thread)
3550{
3551 rb_thread_t *target_th = rb_thread_ptr(thread);
3552
3553 if (rb_threadptr_dead(target_th)) {
3554 if (!NIL_P(target_th->ec->errinfo) &&
3555 !FIXNUM_P(target_th->ec->errinfo)) {
3556 return Qnil;
3557 }
3558 else {
3559 return Qfalse;
3560 }
3561 }
3562 else {
3563 return rb_str_new2(thread_status_name(target_th, FALSE));
3564 }
3565}
3566
3567
3568/*
3569 * call-seq:
3570 * thr.alive? -> true or false
3571 *
3572 * Returns +true+ if +thr+ is running or sleeping.
3573 *
3574 * thr = Thread.new { }
3575 * thr.join #=> #<Thread:0x401b3fb0 dead>
3576 * Thread.current.alive? #=> true
3577 * thr.alive? #=> false
3578 *
3579 * See also #stop? and #status.
3580 */
3581
3582static VALUE
3583rb_thread_alive_p(VALUE thread)
3584{
3585 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3586}
3587
3588/*
3589 * call-seq:
3590 * thr.stop? -> true or false
3591 *
3592 * Returns +true+ if +thr+ is dead or sleeping.
3593 *
3594 * a = Thread.new { Thread.stop }
3595 * b = Thread.current
3596 * a.stop? #=> true
3597 * b.stop? #=> false
3598 *
3599 * See also #alive? and #status.
3600 */
3601
3602static VALUE
3603rb_thread_stop_p(VALUE thread)
3604{
3605 rb_thread_t *th = rb_thread_ptr(thread);
3606
3607 if (rb_threadptr_dead(th)) {
3608 return Qtrue;
3609 }
3610 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3611}
3612
3613/*
3614 * call-seq:
3615 * thr.name -> string
3616 *
3617 * show the name of the thread.
3618 */
3619
3620static VALUE
3621rb_thread_getname(VALUE thread)
3622{
3623 return rb_thread_ptr(thread)->name;
3624}
3625
3626/*
3627 * call-seq:
3628 * thr.name=(name) -> string
3629 *
3630 * set given name to the ruby thread.
3631 * On some platform, it may set the name to pthread and/or kernel.
3632 */
3633
3634static VALUE
3635rb_thread_setname(VALUE thread, VALUE name)
3636{
3637 rb_thread_t *target_th = rb_thread_ptr(thread);
3638
3639 if (!NIL_P(name)) {
3640 rb_encoding *enc;
3641 StringValueCStr(name);
3642 enc = rb_enc_get(name);
3643 if (!rb_enc_asciicompat(enc)) {
3644 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3645 rb_enc_name(enc));
3646 }
3647 name = rb_str_new_frozen(name);
3648 }
3649 target_th->name = name;
3650 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3651 native_set_another_thread_name(target_th->nt->thread_id, name);
3652 }
3653 return name;
3654}
3655
3656#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3657/*
3658 * call-seq:
3659 * thr.native_thread_id -> integer
3660 *
3661 * Return the native thread ID which is used by the Ruby thread.
3662 *
3663 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3664 * * On Linux it is TID returned by gettid(2).
3665 * * On macOS it is the system-wide unique integral ID of thread returned
3666 * by pthread_threadid_np(3).
3667 * * On FreeBSD it is the unique integral ID of the thread returned by
3668 * pthread_getthreadid_np(3).
3669 * * On Windows it is the thread identifier returned by GetThreadId().
3670 * * On other platforms, it raises NotImplementedError.
3671 *
3672 * NOTE:
3673 * If the thread is not associated yet or already deassociated with a native
3674 * thread, it returns _nil_.
3675 * If the Ruby implementation uses M:N thread model, the ID may change
3676 * depending on the timing.
3677 */
3678
3679static VALUE
3680rb_thread_native_thread_id(VALUE thread)
3681{
3682 rb_thread_t *target_th = rb_thread_ptr(thread);
3683 if (rb_threadptr_dead(target_th)) return Qnil;
3684 return native_thread_native_thread_id(target_th);
3685}
3686#else
3687# define rb_thread_native_thread_id rb_f_notimplement
3688#endif
3689
3690/*
3691 * call-seq:
3692 * thr.to_s -> string
3693 *
3694 * Dump the name, id, and status of _thr_ to a string.
3695 */
3696
3697static VALUE
3698rb_thread_to_s(VALUE thread)
3699{
3700 VALUE cname = rb_class_path(rb_obj_class(thread));
3701 rb_thread_t *target_th = rb_thread_ptr(thread);
3702 const char *status;
3703 VALUE str, loc;
3704
3705 status = thread_status_name(target_th, TRUE);
3706 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3707 if (!NIL_P(target_th->name)) {
3708 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3709 }
3710 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3711 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3712 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3713 }
3714 rb_str_catf(str, " %s>", status);
3715
3716 return str;
3717}
3718
3719/* variables for recursive traversals */
3720#define recursive_key id__recursive_key__
3721
3722static VALUE
3723threadptr_local_aref(rb_thread_t *th, ID id)
3724{
3725 if (id == recursive_key) {
3726 return th->ec->local_storage_recursive_hash;
3727 }
3728 else {
3729 VALUE val;
3730 struct rb_id_table *local_storage = th->ec->local_storage;
3731
3732 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3733 return val;
3734 }
3735 else {
3736 return Qnil;
3737 }
3738 }
3739}
3740
3742rb_thread_local_aref(VALUE thread, ID id)
3743{
3744 return threadptr_local_aref(rb_thread_ptr(thread), id);
3745}
3746
3747/*
3748 * call-seq:
3749 * thr[sym] -> obj or nil
3750 *
3751 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3752 * if not explicitly inside a Fiber), using either a symbol or a string name.
3753 * If the specified variable does not exist, returns +nil+.
3754 *
3755 * [
3756 * Thread.new { Thread.current["name"] = "A" },
3757 * Thread.new { Thread.current[:name] = "B" },
3758 * Thread.new { Thread.current["name"] = "C" }
3759 * ].each do |th|
3760 * th.join
3761 * puts "#{th.inspect}: #{th[:name]}"
3762 * end
3763 *
3764 * This will produce:
3765 *
3766 * #<Thread:0x00000002a54220 dead>: A
3767 * #<Thread:0x00000002a541a8 dead>: B
3768 * #<Thread:0x00000002a54130 dead>: C
3769 *
3770 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3771 * This confusion did not exist in Ruby 1.8 because
3772 * fibers are only available since Ruby 1.9.
3773 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3774 * following idiom for dynamic scope.
3775 *
3776 * def meth(newvalue)
3777 * begin
3778 * oldvalue = Thread.current[:name]
3779 * Thread.current[:name] = newvalue
3780 * yield
3781 * ensure
3782 * Thread.current[:name] = oldvalue
3783 * end
3784 * end
3785 *
3786 * The idiom may not work as dynamic scope if the methods are thread-local
3787 * and a given block switches fiber.
3788 *
3789 * f = Fiber.new {
3790 * meth(1) {
3791 * Fiber.yield
3792 * }
3793 * }
3794 * meth(2) {
3795 * f.resume
3796 * }
3797 * f.resume
3798 * p Thread.current[:name]
3799 * #=> nil if fiber-local
3800 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3801 *
3802 * For thread-local variables, please see #thread_variable_get and
3803 * #thread_variable_set.
3804 *
3805 */
3806
3807static VALUE
3808rb_thread_aref(VALUE thread, VALUE key)
3809{
3810 ID id = rb_check_id(&key);
3811 if (!id) return Qnil;
3812 return rb_thread_local_aref(thread, id);
3813}
3814
3815/*
3816 * call-seq:
3817 * thr.fetch(sym) -> obj
3818 * thr.fetch(sym) { } -> obj
3819 * thr.fetch(sym, default) -> obj
3820 *
3821 * Returns a fiber-local for the given key. If the key can't be
3822 * found, there are several options: With no other arguments, it will
3823 * raise a KeyError exception; if <i>default</i> is given, then that
3824 * will be returned; if the optional code block is specified, then
3825 * that will be run and its result returned. See Thread#[] and
3826 * Hash#fetch.
3827 */
3828static VALUE
3829rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3830{
3831 VALUE key, val;
3832 ID id;
3833 rb_thread_t *target_th = rb_thread_ptr(self);
3834 int block_given;
3835
3836 rb_check_arity(argc, 1, 2);
3837 key = argv[0];
3838
3839 block_given = rb_block_given_p();
3840 if (block_given && argc == 2) {
3841 rb_warn("block supersedes default value argument");
3842 }
3843
3844 id = rb_check_id(&key);
3845
3846 if (id == recursive_key) {
3847 return target_th->ec->local_storage_recursive_hash;
3848 }
3849 else if (id && target_th->ec->local_storage &&
3850 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3851 return val;
3852 }
3853 else if (block_given) {
3854 return rb_yield(key);
3855 }
3856 else if (argc == 1) {
3857 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3858 }
3859 else {
3860 return argv[1];
3861 }
3862}
3863
3864static VALUE
3865threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3866{
3867 if (id == recursive_key) {
3868 th->ec->local_storage_recursive_hash = val;
3869 return val;
3870 }
3871 else {
3872 struct rb_id_table *local_storage = th->ec->local_storage;
3873
3874 if (NIL_P(val)) {
3875 if (!local_storage) return Qnil;
3876 rb_id_table_delete(local_storage, id);
3877 return Qnil;
3878 }
3879 else {
3880 if (local_storage == NULL) {
3881 th->ec->local_storage = local_storage = rb_id_table_create(0);
3882 }
3883 rb_id_table_insert(local_storage, id, val);
3884 return val;
3885 }
3886 }
3887}
3888
3890rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3891{
3892 if (OBJ_FROZEN(thread)) {
3893 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3894 }
3895
3896 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3897}
3898
3899/*
3900 * call-seq:
3901 * thr[sym] = obj -> obj
3902 *
3903 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3904 * using either a symbol or a string.
3905 *
3906 * See also Thread#[].
3907 *
3908 * For thread-local variables, please see #thread_variable_set and
3909 * #thread_variable_get.
3910 */
3911
3912static VALUE
3913rb_thread_aset(VALUE self, VALUE id, VALUE val)
3914{
3915 return rb_thread_local_aset(self, rb_to_id(id), val);
3916}
3917
3918/*
3919 * call-seq:
3920 * thr.thread_variable_get(key) -> obj or nil
3921 *
3922 * Returns the value of a thread local variable that has been set. Note that
3923 * these are different than fiber local values. For fiber local values,
3924 * please see Thread#[] and Thread#[]=.
3925 *
3926 * Thread local values are carried along with threads, and do not respect
3927 * fibers. For example:
3928 *
3929 * Thread.new {
3930 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3931 * Thread.current["foo"] = "bar" # set a fiber local
3932 *
3933 * Fiber.new {
3934 * Fiber.yield [
3935 * Thread.current.thread_variable_get("foo"), # get the thread local
3936 * Thread.current["foo"], # get the fiber local
3937 * ]
3938 * }.resume
3939 * }.join.value # => ['bar', nil]
3940 *
3941 * The value "bar" is returned for the thread local, where nil is returned
3942 * for the fiber local. The fiber is executed in the same thread, so the
3943 * thread local values are available.
3944 */
3945
3946static VALUE
3947rb_thread_variable_get(VALUE thread, VALUE key)
3948{
3949 VALUE locals;
3950 VALUE symbol = rb_to_symbol(key);
3951
3952 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3953 return Qnil;
3954 }
3955 locals = rb_thread_local_storage(thread);
3956 return rb_hash_aref(locals, symbol);
3957}
3958
3959/*
3960 * call-seq:
3961 * thr.thread_variable_set(key, value)
3962 *
3963 * Sets a thread local with +key+ to +value+. Note that these are local to
3964 * threads, and not to fibers. Please see Thread#thread_variable_get and
3965 * Thread#[] for more information.
3966 */
3967
3968static VALUE
3969rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3970{
3971 VALUE locals;
3972
3973 if (OBJ_FROZEN(thread)) {
3974 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3975 }
3976
3977 locals = rb_thread_local_storage(thread);
3978 return rb_hash_aset(locals, rb_to_symbol(key), val);
3979}
3980
3981/*
3982 * call-seq:
3983 * thr.key?(sym) -> true or false
3984 *
3985 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3986 * variable.
3987 *
3988 * me = Thread.current
3989 * me[:oliver] = "a"
3990 * me.key?(:oliver) #=> true
3991 * me.key?(:stanley) #=> false
3992 */
3993
3994static VALUE
3995rb_thread_key_p(VALUE self, VALUE key)
3996{
3997 VALUE val;
3998 ID id = rb_check_id(&key);
3999 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4000
4001 if (!id || local_storage == NULL) {
4002 return Qfalse;
4003 }
4004 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
4005}
4006
4007static enum rb_id_table_iterator_result
4008thread_keys_i(ID key, VALUE value, void *ary)
4009{
4010 rb_ary_push((VALUE)ary, ID2SYM(key));
4011 return ID_TABLE_CONTINUE;
4012}
4013
4015rb_thread_alone(void)
4016{
4017 // TODO
4018 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
4019}
4020
4021/*
4022 * call-seq:
4023 * thr.keys -> array
4024 *
4025 * Returns an array of the names of the fiber-local variables (as Symbols).
4026 *
4027 * thr = Thread.new do
4028 * Thread.current[:cat] = 'meow'
4029 * Thread.current["dog"] = 'woof'
4030 * end
4031 * thr.join #=> #<Thread:0x401b3f10 dead>
4032 * thr.keys #=> [:dog, :cat]
4033 */
4034
4035static VALUE
4036rb_thread_keys(VALUE self)
4037{
4038 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4039 VALUE ary = rb_ary_new();
4040
4041 if (local_storage) {
4042 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
4043 }
4044 return ary;
4045}
4046
4047static int
4048keys_i(VALUE key, VALUE value, VALUE ary)
4049{
4050 rb_ary_push(ary, key);
4051 return ST_CONTINUE;
4052}
4053
4054/*
4055 * call-seq:
4056 * thr.thread_variables -> array
4057 *
4058 * Returns an array of the names of the thread-local variables (as Symbols).
4059 *
4060 * thr = Thread.new do
4061 * Thread.current.thread_variable_set(:cat, 'meow')
4062 * Thread.current.thread_variable_set("dog", 'woof')
4063 * end
4064 * thr.join #=> #<Thread:0x401b3f10 dead>
4065 * thr.thread_variables #=> [:dog, :cat]
4066 *
4067 * Note that these are not fiber local variables. Please see Thread#[] and
4068 * Thread#thread_variable_get for more details.
4069 */
4070
4071static VALUE
4072rb_thread_variables(VALUE thread)
4073{
4074 VALUE locals;
4075 VALUE ary;
4076
4077 ary = rb_ary_new();
4078 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4079 return ary;
4080 }
4081 locals = rb_thread_local_storage(thread);
4082 rb_hash_foreach(locals, keys_i, ary);
4083
4084 return ary;
4085}
4086
4087/*
4088 * call-seq:
4089 * thr.thread_variable?(key) -> true or false
4090 *
4091 * Returns +true+ if the given string (or symbol) exists as a thread-local
4092 * variable.
4093 *
4094 * me = Thread.current
4095 * me.thread_variable_set(:oliver, "a")
4096 * me.thread_variable?(:oliver) #=> true
4097 * me.thread_variable?(:stanley) #=> false
4098 *
4099 * Note that these are not fiber local variables. Please see Thread#[] and
4100 * Thread#thread_variable_get for more details.
4101 */
4102
4103static VALUE
4104rb_thread_variable_p(VALUE thread, VALUE key)
4105{
4106 VALUE locals;
4107 VALUE symbol = rb_to_symbol(key);
4108
4109 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4110 return Qfalse;
4111 }
4112 locals = rb_thread_local_storage(thread);
4113
4114 return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
4115}
4116
4117/*
4118 * call-seq:
4119 * thr.priority -> integer
4120 *
4121 * Returns the priority of <i>thr</i>. Default is inherited from the
4122 * current thread which creating the new thread, or zero for the
4123 * initial main thread; higher-priority thread will run more frequently
4124 * than lower-priority threads (but lower-priority threads can also run).
4125 *
4126 * This is just hint for Ruby thread scheduler. It may be ignored on some
4127 * platform.
4128 *
4129 * Thread.current.priority #=> 0
4130 */
4131
4132static VALUE
4133rb_thread_priority(VALUE thread)
4134{
4135 return INT2NUM(rb_thread_ptr(thread)->priority);
4136}
4137
4138
4139/*
4140 * call-seq:
4141 * thr.priority= integer -> thr
4142 *
4143 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
4144 * will run more frequently than lower-priority threads (but lower-priority
4145 * threads can also run).
4146 *
4147 * This is just hint for Ruby thread scheduler. It may be ignored on some
4148 * platform.
4149 *
4150 * count1 = count2 = 0
4151 * a = Thread.new do
4152 * loop { count1 += 1 }
4153 * end
4154 * a.priority = -1
4155 *
4156 * b = Thread.new do
4157 * loop { count2 += 1 }
4158 * end
4159 * b.priority = -2
4160 * sleep 1 #=> 1
4161 * count1 #=> 622504
4162 * count2 #=> 5832
4163 */
4164
4165static VALUE
4166rb_thread_priority_set(VALUE thread, VALUE prio)
4167{
4168 rb_thread_t *target_th = rb_thread_ptr(thread);
4169 int priority;
4170
4171#if USE_NATIVE_THREAD_PRIORITY
4172 target_th->priority = NUM2INT(prio);
4173 native_thread_apply_priority(th);
4174#else
4175 priority = NUM2INT(prio);
4176 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4177 priority = RUBY_THREAD_PRIORITY_MAX;
4178 }
4179 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4180 priority = RUBY_THREAD_PRIORITY_MIN;
4181 }
4182 target_th->priority = (int8_t)priority;
4183#endif
4184 return INT2NUM(target_th->priority);
4185}
4186
4187/* for IO */
4188
4189#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4190
4191/*
4192 * several Unix platforms support file descriptors bigger than FD_SETSIZE
4193 * in select(2) system call.
4194 *
4195 * - Linux 2.2.12 (?)
4196 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4197 * select(2) documents how to allocate fd_set dynamically.
4198 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4199 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4200 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4201 * select(2) documents how to allocate fd_set dynamically.
4202 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4203 * - Solaris 8 has select_large_fdset
4204 * - Mac OS X 10.7 (Lion)
4205 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4206 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4207 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4208 *
4209 * When fd_set is not big enough to hold big file descriptors,
4210 * it should be allocated dynamically.
4211 * Note that this assumes fd_set is structured as bitmap.
4212 *
4213 * rb_fd_init allocates the memory.
4214 * rb_fd_term free the memory.
4215 * rb_fd_set may re-allocates bitmap.
4216 *
4217 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4218 */
4219
4220void
4222{
4223 fds->maxfd = 0;
4224 fds->fdset = ALLOC(fd_set);
4225 FD_ZERO(fds->fdset);
4226}
4227
4228void
4229rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4230{
4231 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4232
4233 if (size < sizeof(fd_set))
4234 size = sizeof(fd_set);
4235 dst->maxfd = src->maxfd;
4236 dst->fdset = xmalloc(size);
4237 memcpy(dst->fdset, src->fdset, size);
4238}
4239
4240void
4242{
4243 xfree(fds->fdset);
4244 fds->maxfd = 0;
4245 fds->fdset = 0;
4246}
4247
4248void
4250{
4251 if (fds->fdset)
4252 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4253}
4254
4255static void
4256rb_fd_resize(int n, rb_fdset_t *fds)
4257{
4258 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4259 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4260
4261 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4262 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4263
4264 if (m > o) {
4265 fds->fdset = xrealloc(fds->fdset, m);
4266 memset((char *)fds->fdset + o, 0, m - o);
4267 }
4268 if (n >= fds->maxfd) fds->maxfd = n + 1;
4269}
4270
4271void
4272rb_fd_set(int n, rb_fdset_t *fds)
4273{
4274 rb_fd_resize(n, fds);
4275 FD_SET(n, fds->fdset);
4276}
4277
4278void
4279rb_fd_clr(int n, rb_fdset_t *fds)
4280{
4281 if (n >= fds->maxfd) return;
4282 FD_CLR(n, fds->fdset);
4283}
4284
4285int
4286rb_fd_isset(int n, const rb_fdset_t *fds)
4287{
4288 if (n >= fds->maxfd) return 0;
4289 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4290}
4291
4292void
4293rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4294{
4295 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4296
4297 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4298 dst->maxfd = max;
4299 dst->fdset = xrealloc(dst->fdset, size);
4300 memcpy(dst->fdset, src, size);
4301}
4302
4303void
4304rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4305{
4306 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4307
4308 if (size < sizeof(fd_set))
4309 size = sizeof(fd_set);
4310 dst->maxfd = src->maxfd;
4311 dst->fdset = xrealloc(dst->fdset, size);
4312 memcpy(dst->fdset, src->fdset, size);
4313}
4314
4315int
4316rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4317{
4318 fd_set *r = NULL, *w = NULL, *e = NULL;
4319 if (readfds) {
4320 rb_fd_resize(n - 1, readfds);
4321 r = rb_fd_ptr(readfds);
4322 }
4323 if (writefds) {
4324 rb_fd_resize(n - 1, writefds);
4325 w = rb_fd_ptr(writefds);
4326 }
4327 if (exceptfds) {
4328 rb_fd_resize(n - 1, exceptfds);
4329 e = rb_fd_ptr(exceptfds);
4330 }
4331 return select(n, r, w, e, timeout);
4332}
4333
4334#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4335
4336#undef FD_ZERO
4337#undef FD_SET
4338#undef FD_CLR
4339#undef FD_ISSET
4340
4341#define FD_ZERO(f) rb_fd_zero(f)
4342#define FD_SET(i, f) rb_fd_set((i), (f))
4343#define FD_CLR(i, f) rb_fd_clr((i), (f))
4344#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4345
4346#elif defined(_WIN32)
4347
4348void
4350{
4351 set->capa = FD_SETSIZE;
4352 set->fdset = ALLOC(fd_set);
4353 FD_ZERO(set->fdset);
4354}
4355
4356void
4357rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4358{
4359 rb_fd_init(dst);
4360 rb_fd_dup(dst, src);
4361}
4362
4363void
4365{
4366 xfree(set->fdset);
4367 set->fdset = NULL;
4368 set->capa = 0;
4369}
4370
4371void
4372rb_fd_set(int fd, rb_fdset_t *set)
4373{
4374 unsigned int i;
4375 SOCKET s = rb_w32_get_osfhandle(fd);
4376
4377 for (i = 0; i < set->fdset->fd_count; i++) {
4378 if (set->fdset->fd_array[i] == s) {
4379 return;
4380 }
4381 }
4382 if (set->fdset->fd_count >= (unsigned)set->capa) {
4383 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4384 set->fdset =
4385 rb_xrealloc_mul_add(
4386 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4387 }
4388 set->fdset->fd_array[set->fdset->fd_count++] = s;
4389}
4390
4391#undef FD_ZERO
4392#undef FD_SET
4393#undef FD_CLR
4394#undef FD_ISSET
4395
4396#define FD_ZERO(f) rb_fd_zero(f)
4397#define FD_SET(i, f) rb_fd_set((i), (f))
4398#define FD_CLR(i, f) rb_fd_clr((i), (f))
4399#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4400
4401#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4402
4403#endif
4404
4405#ifndef rb_fd_no_init
4406#define rb_fd_no_init(fds) (void)(fds)
4407#endif
4408
4409static int
4410wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4411{
4412 int r = *result;
4413 if (r < 0) {
4414 switch (errnum) {
4415 case EINTR:
4416#ifdef ERESTART
4417 case ERESTART:
4418#endif
4419 *result = 0;
4420 if (rel && hrtime_update_expire(rel, end)) {
4421 *rel = 0;
4422 }
4423 return TRUE;
4424 }
4425 return FALSE;
4426 }
4427 else if (r == 0) {
4428 /* check for spurious wakeup */
4429 if (rel) {
4430 return !hrtime_update_expire(rel, end);
4431 }
4432 return TRUE;
4433 }
4434 return FALSE;
4435}
4437struct select_set {
4438 int max;
4439 rb_thread_t *th;
4440 rb_fdset_t *rset;
4441 rb_fdset_t *wset;
4442 rb_fdset_t *eset;
4443 rb_fdset_t orig_rset;
4444 rb_fdset_t orig_wset;
4445 rb_fdset_t orig_eset;
4446 struct timeval *timeout;
4447};
4448
4449static VALUE
4450select_set_free(VALUE p)
4451{
4452 struct select_set *set = (struct select_set *)p;
4453
4454 rb_fd_term(&set->orig_rset);
4455 rb_fd_term(&set->orig_wset);
4456 rb_fd_term(&set->orig_eset);
4457
4458 return Qfalse;
4459}
4460
4461static VALUE
4462do_select(VALUE p)
4463{
4464 struct select_set *set = (struct select_set *)p;
4465 volatile int result = 0;
4466 int lerrno;
4467 rb_hrtime_t *to, rel, end = 0;
4468
4469 timeout_prepare(&to, &rel, &end, set->timeout);
4470 volatile rb_hrtime_t endtime = end;
4471#define restore_fdset(dst, src) \
4472 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4473#define do_select_update() \
4474 (restore_fdset(set->rset, &set->orig_rset), \
4475 restore_fdset(set->wset, &set->orig_wset), \
4476 restore_fdset(set->eset, &set->orig_eset), \
4477 TRUE)
4478
4479 do {
4480 lerrno = 0;
4481
4482 BLOCKING_REGION(set->th, {
4483 struct timeval tv;
4484
4485 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4486 result = native_fd_select(set->max,
4487 set->rset, set->wset, set->eset,
4488 rb_hrtime2timeval(&tv, to), set->th);
4489 if (result < 0) lerrno = errno;
4490 }
4491 }, ubf_select, set->th, TRUE);
4492
4493 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4494 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4495
4496 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4497
4498 if (result < 0) {
4499 errno = lerrno;
4500 }
4501
4502 return (VALUE)result;
4503}
4504
4506rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4507 struct timeval *timeout)
4508{
4509 struct select_set set;
4510
4511 set.th = GET_THREAD();
4512 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4513 set.max = max;
4514 set.rset = read;
4515 set.wset = write;
4516 set.eset = except;
4517 set.timeout = timeout;
4518
4519 if (!set.rset && !set.wset && !set.eset) {
4520 if (!timeout) {
4522 return 0;
4523 }
4524 rb_thread_wait_for(*timeout);
4525 return 0;
4526 }
4527
4528#define fd_init_copy(f) do { \
4529 if (set.f) { \
4530 rb_fd_resize(set.max - 1, set.f); \
4531 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4532 rb_fd_init_copy(&set.orig_##f, set.f); \
4533 } \
4534 } \
4535 else { \
4536 rb_fd_no_init(&set.orig_##f); \
4537 } \
4538 } while (0)
4539 fd_init_copy(rset);
4540 fd_init_copy(wset);
4541 fd_init_copy(eset);
4542#undef fd_init_copy
4543
4544 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4545}
4546
4547#ifdef USE_POLL
4548
4549/* The same with linux kernel. TODO: make platform independent definition. */
4550#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4551#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4552#define POLLEX_SET (POLLPRI)
4553
4554#ifndef POLLERR_SET /* defined for FreeBSD for now */
4555# define POLLERR_SET (0)
4556#endif
4557
4558static int
4559wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4560 rb_hrtime_t *const to, volatile int *lerrno)
4561{
4562 struct timespec ts;
4563 volatile int result = 0;
4564
4565 *lerrno = 0;
4566 BLOCKING_REGION(th, {
4567 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4568 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4569 if (result < 0) *lerrno = errno;
4570 }
4571 }, ubf_select, th, TRUE);
4572 return result;
4573}
4574
4575/*
4576 * returns a mask of events
4577 */
4578static int
4579thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4580{
4581 struct pollfd fds[1] = {{
4582 .fd = fd,
4583 .events = (short)events,
4584 .revents = 0,
4585 }};
4586 volatile int result = 0;
4587 nfds_t nfds;
4588 struct rb_io_blocking_operation blocking_operation;
4589 enum ruby_tag_type state;
4590 volatile int lerrno;
4591
4592 rb_execution_context_t *ec = GET_EC();
4593 rb_thread_t *th = rb_ec_thread_ptr(ec);
4594
4595 if (io) {
4596 blocking_operation.ec = ec;
4597 rb_io_blocking_operation_enter(io, &blocking_operation);
4598 }
4599
4600 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4601 // fd is readable
4602 state = 0;
4603 fds[0].revents = events;
4604 errno = 0;
4605 }
4606 else {
4607 EC_PUSH_TAG(ec);
4608 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4609 rb_hrtime_t *to, rel, end = 0;
4610 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4611 timeout_prepare(&to, &rel, &end, timeout);
4612 do {
4613 nfds = numberof(fds);
4614 result = wait_for_single_fd_blocking_region(th, fds, nfds, to, &lerrno);
4615
4616 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4617 } while (wait_retryable(&result, lerrno, to, end));
4618
4619 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4620 }
4621
4622 EC_POP_TAG();
4623 }
4624
4625 if (io) {
4626 rb_io_blocking_operation_exit(io, &blocking_operation);
4627 }
4628
4629 if (state) {
4630 EC_JUMP_TAG(ec, state);
4631 }
4632
4633 if (result < 0) {
4634 errno = lerrno;
4635 return -1;
4636 }
4637
4638 if (fds[0].revents & POLLNVAL) {
4639 errno = EBADF;
4640 return -1;
4641 }
4642
4643 /*
4644 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4645 * Therefore we need to fix it up.
4646 */
4647 result = 0;
4648 if (fds[0].revents & POLLIN_SET)
4649 result |= RB_WAITFD_IN;
4650 if (fds[0].revents & POLLOUT_SET)
4651 result |= RB_WAITFD_OUT;
4652 if (fds[0].revents & POLLEX_SET)
4653 result |= RB_WAITFD_PRI;
4654
4655 /* all requested events are ready if there is an error */
4656 if (fds[0].revents & POLLERR_SET)
4657 result |= events;
4658
4659 return result;
4660}
4661#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4662struct select_args {
4663 struct rb_io *io;
4664 struct rb_io_blocking_operation *blocking_operation;
4665
4666 union {
4667 int fd;
4668 int error;
4669 } as;
4670 rb_fdset_t *read;
4671 rb_fdset_t *write;
4672 rb_fdset_t *except;
4673 struct timeval *tv;
4674};
4675
4676static VALUE
4677select_single(VALUE ptr)
4678{
4679 struct select_args *args = (struct select_args *)ptr;
4680 int r;
4681
4682 r = rb_thread_fd_select(args->as.fd + 1,
4683 args->read, args->write, args->except, args->tv);
4684 if (r == -1)
4685 args->as.error = errno;
4686 if (r > 0) {
4687 r = 0;
4688 if (args->read && rb_fd_isset(args->as.fd, args->read))
4689 r |= RB_WAITFD_IN;
4690 if (args->write && rb_fd_isset(args->as.fd, args->write))
4691 r |= RB_WAITFD_OUT;
4692 if (args->except && rb_fd_isset(args->as.fd, args->except))
4693 r |= RB_WAITFD_PRI;
4694 }
4695 return (VALUE)r;
4696}
4697
4698static VALUE
4699select_single_cleanup(VALUE ptr)
4700{
4701 struct select_args *args = (struct select_args *)ptr;
4702
4703 if (args->blocking_operation) {
4704 rb_io_blocking_operation_exit(args->io, args->blocking_operation);
4705 }
4706
4707 if (args->read) rb_fd_term(args->read);
4708 if (args->write) rb_fd_term(args->write);
4709 if (args->except) rb_fd_term(args->except);
4710
4711 return (VALUE)-1;
4712}
4713
4714static rb_fdset_t *
4715init_set_fd(int fd, rb_fdset_t *fds)
4716{
4717 if (fd < 0) {
4718 return 0;
4719 }
4720 rb_fd_init(fds);
4721 rb_fd_set(fd, fds);
4722
4723 return fds;
4724}
4725
4726static int
4727thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4728{
4729 rb_fdset_t rfds, wfds, efds;
4730 struct select_args args;
4731 VALUE ptr = (VALUE)&args;
4732
4733 struct rb_io_blocking_operation blocking_operation;
4734 if (io) {
4735 args.io = io;
4736 blocking_operation.ec = GET_EC();
4737 rb_io_blocking_operation_enter(io, &blocking_operation);
4738 args.blocking_operation = &blocking_operation;
4739 }
4740 else {
4741 args.io = NULL;
4742 blocking_operation.ec = NULL;
4743 args.blocking_operation = NULL;
4744 }
4745
4746 args.as.fd = fd;
4747 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4748 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4749 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4750 args.tv = timeout;
4751
4752 int result = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4753 if (result == -1)
4754 errno = args.as.error;
4755
4756 return result;
4757}
4758#endif /* ! USE_POLL */
4759
4760int
4761rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4762{
4763 return thread_io_wait(NULL, fd, events, timeout);
4764}
4765
4766int
4767rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout)
4768{
4769 return thread_io_wait(io, io->fd, events, timeout);
4770}
4771
4772/*
4773 * for GC
4774 */
4775
4776#ifdef USE_CONSERVATIVE_STACK_END
4777void
4778rb_gc_set_stack_end(VALUE **stack_end_p)
4779{
4780 VALUE stack_end;
4781COMPILER_WARNING_PUSH
4782#ifdef __GNUC__
4783COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4784#endif
4785 *stack_end_p = &stack_end;
4786COMPILER_WARNING_POP
4787}
4788#endif
4789
4790/*
4791 *
4792 */
4793
4794void
4795rb_threadptr_check_signal(rb_thread_t *mth)
4796{
4797 /* mth must be main_thread */
4798 if (rb_signal_buff_size() > 0) {
4799 /* wakeup main thread */
4800 threadptr_trap_interrupt(mth);
4801 }
4802}
4803
4804static void
4805async_bug_fd(const char *mesg, int errno_arg, int fd)
4806{
4807 char buff[64];
4808 size_t n = strlcpy(buff, mesg, sizeof(buff));
4809 if (n < sizeof(buff)-3) {
4810 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4811 }
4812 rb_async_bug_errno(buff, errno_arg);
4813}
4814
4815/* VM-dependent API is not available for this function */
4816static int
4817consume_communication_pipe(int fd)
4818{
4819#if USE_EVENTFD
4820 uint64_t buff[1];
4821#else
4822 /* buffer can be shared because no one refers to them. */
4823 static char buff[1024];
4824#endif
4825 ssize_t result;
4826 int ret = FALSE; /* for rb_sigwait_sleep */
4827
4828 while (1) {
4829 result = read(fd, buff, sizeof(buff));
4830#if USE_EVENTFD
4831 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4832#else
4833 RUBY_DEBUG_LOG("result:%d", (int)result);
4834#endif
4835 if (result > 0) {
4836 ret = TRUE;
4837 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4838 return ret;
4839 }
4840 }
4841 else if (result == 0) {
4842 return ret;
4843 }
4844 else if (result < 0) {
4845 int e = errno;
4846 switch (e) {
4847 case EINTR:
4848 continue; /* retry */
4849 case EAGAIN:
4850#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4851 case EWOULDBLOCK:
4852#endif
4853 return ret;
4854 default:
4855 async_bug_fd("consume_communication_pipe: read", e, fd);
4856 }
4857 }
4858 }
4859}
4860
4861void
4862rb_thread_stop_timer_thread(void)
4863{
4864 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4865 native_reset_timer_thread();
4866 }
4867}
4868
4869void
4870rb_thread_reset_timer_thread(void)
4871{
4872 native_reset_timer_thread();
4873}
4874
4875void
4876rb_thread_start_timer_thread(void)
4877{
4878 system_working = 1;
4879 rb_thread_create_timer_thread();
4880}
4881
4882static int
4883clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4884{
4885 int i;
4886 VALUE coverage = (VALUE)val;
4887 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4888 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4889
4890 if (lines) {
4891 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4892 rb_ary_clear(lines);
4893 }
4894 else {
4895 int i;
4896 for (i = 0; i < RARRAY_LEN(lines); i++) {
4897 if (RARRAY_AREF(lines, i) != Qnil)
4898 RARRAY_ASET(lines, i, INT2FIX(0));
4899 }
4900 }
4901 }
4902 if (branches) {
4903 VALUE counters = RARRAY_AREF(branches, 1);
4904 for (i = 0; i < RARRAY_LEN(counters); i++) {
4905 RARRAY_ASET(counters, i, INT2FIX(0));
4906 }
4907 }
4908
4909 return ST_CONTINUE;
4910}
4911
4912void
4913rb_clear_coverages(void)
4914{
4915 VALUE coverages = rb_get_coverages();
4916 if (RTEST(coverages)) {
4917 rb_hash_foreach(coverages, clear_coverage_i, 0);
4918 }
4919}
4920
4921#if defined(HAVE_WORKING_FORK)
4922
4923static void
4924rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4925{
4926 rb_thread_t *i = 0;
4927 rb_vm_t *vm = th->vm;
4928 rb_ractor_t *r = th->ractor;
4929 vm->ractor.main_ractor = r;
4930 vm->ractor.main_thread = th;
4931 r->threads.main = th;
4932 r->status_ = ractor_created;
4933
4934 thread_sched_atfork(TH_SCHED(th));
4935 ubf_list_atfork();
4936
4937 // OK. Only this thread accesses:
4938 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4939 if (r != vm->ractor.main_ractor) {
4940 rb_ractor_terminate_atfork(vm, r);
4941 }
4942 ccan_list_for_each(&r->threads.set, i, lt_node) {
4943 atfork(i, th);
4944 }
4945 }
4946 rb_vm_living_threads_init(vm);
4947
4948 rb_ractor_atfork(vm, th);
4949 rb_vm_postponed_job_atfork();
4950
4951 /* may be held by any thread in parent */
4952 rb_native_mutex_initialize(&th->interrupt_lock);
4953 ccan_list_head_init(&th->interrupt_exec_tasks);
4954
4955 vm->fork_gen++;
4956 rb_ractor_sleeper_threads_clear(th->ractor);
4957 rb_clear_coverages();
4958
4959 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4960 rb_thread_reset_timer_thread();
4961 rb_thread_start_timer_thread();
4962
4963 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4964 VM_ASSERT(vm->ractor.cnt == 1);
4965}
4966
4967static void
4968terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4969{
4970 if (th != current_th) {
4971 rb_native_mutex_initialize(&th->interrupt_lock);
4972 rb_mutex_abandon_keeping_mutexes(th);
4973 rb_mutex_abandon_locking_mutex(th);
4974 thread_cleanup_func(th, TRUE);
4975 }
4976}
4977
4978void rb_fiber_atfork(rb_thread_t *);
4979void
4980rb_thread_atfork(void)
4981{
4982 rb_thread_t *th = GET_THREAD();
4983 rb_threadptr_pending_interrupt_clear(th);
4984 rb_thread_atfork_internal(th, terminate_atfork_i);
4985 th->join_list = NULL;
4986 rb_fiber_atfork(th);
4987
4988 /* We don't want reproduce CVE-2003-0900. */
4990}
4991
4992static void
4993terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4994{
4995 if (th != current_th) {
4996 thread_cleanup_func_before_exec(th);
4997 }
4998}
4999
5000void
5002{
5003 rb_thread_t *th = GET_THREAD();
5004 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
5005}
5006#else
5007void
5008rb_thread_atfork(void)
5009{
5010}
5011
5012void
5014{
5015}
5016#endif
5018struct thgroup {
5019 int enclosed;
5020};
5021
5022static const rb_data_type_t thgroup_data_type = {
5023 "thgroup",
5024 {
5025 0,
5027 NULL, // No external memory to report
5028 },
5029 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
5030};
5031
5032/*
5033 * Document-class: ThreadGroup
5034 *
5035 * ThreadGroup provides a means of keeping track of a number of threads as a
5036 * group.
5037 *
5038 * A given Thread object can only belong to one ThreadGroup at a time; adding
5039 * a thread to a new group will remove it from any previous group.
5040 *
5041 * Newly created threads belong to the same group as the thread from which they
5042 * were created.
5043 */
5044
5045/*
5046 * Document-const: Default
5047 *
5048 * The default ThreadGroup created when Ruby starts; all Threads belong to it
5049 * by default.
5050 */
5051static VALUE
5052thgroup_s_alloc(VALUE klass)
5053{
5054 VALUE group;
5055 struct thgroup *data;
5056
5057 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
5058 data->enclosed = 0;
5059
5060 return group;
5061}
5062
5063/*
5064 * call-seq:
5065 * thgrp.list -> array
5066 *
5067 * Returns an array of all existing Thread objects that belong to this group.
5068 *
5069 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
5070 */
5071
5072static VALUE
5073thgroup_list(VALUE group)
5074{
5075 VALUE ary = rb_ary_new();
5076 rb_thread_t *th = 0;
5077 rb_ractor_t *r = GET_RACTOR();
5078
5079 ccan_list_for_each(&r->threads.set, th, lt_node) {
5080 if (th->thgroup == group) {
5081 rb_ary_push(ary, th->self);
5082 }
5083 }
5084 return ary;
5085}
5086
5087
5088/*
5089 * call-seq:
5090 * thgrp.enclose -> thgrp
5091 *
5092 * Prevents threads from being added to or removed from the receiving
5093 * ThreadGroup.
5094 *
5095 * New threads can still be started in an enclosed ThreadGroup.
5096 *
5097 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
5098 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
5099 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
5100 * tg.add thr
5101 * #=> ThreadError: can't move from the enclosed thread group
5102 */
5103
5104static VALUE
5105thgroup_enclose(VALUE group)
5106{
5107 struct thgroup *data;
5108
5109 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5110 data->enclosed = 1;
5111
5112 return group;
5113}
5114
5115
5116/*
5117 * call-seq:
5118 * thgrp.enclosed? -> true or false
5119 *
5120 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
5121 */
5122
5123static VALUE
5124thgroup_enclosed_p(VALUE group)
5125{
5126 struct thgroup *data;
5127
5128 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5129 return RBOOL(data->enclosed);
5130}
5131
5132
5133/*
5134 * call-seq:
5135 * thgrp.add(thread) -> thgrp
5136 *
5137 * Adds the given +thread+ to this group, removing it from any other
5138 * group to which it may have previously been a member.
5139 *
5140 * puts "Initial group is #{ThreadGroup::Default.list}"
5141 * tg = ThreadGroup.new
5142 * t1 = Thread.new { sleep }
5143 * t2 = Thread.new { sleep }
5144 * puts "t1 is #{t1}"
5145 * puts "t2 is #{t2}"
5146 * tg.add(t1)
5147 * puts "Initial group now #{ThreadGroup::Default.list}"
5148 * puts "tg group now #{tg.list}"
5149 *
5150 * This will produce:
5151 *
5152 * Initial group is #<Thread:0x401bdf4c>
5153 * t1 is #<Thread:0x401b3c90>
5154 * t2 is #<Thread:0x401b3c18>
5155 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
5156 * tg group now #<Thread:0x401b3c90>
5157 */
5158
5159static VALUE
5160thgroup_add(VALUE group, VALUE thread)
5161{
5162 rb_thread_t *target_th = rb_thread_ptr(thread);
5163 struct thgroup *data;
5164
5165 if (OBJ_FROZEN(group)) {
5166 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
5167 }
5168 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5169 if (data->enclosed) {
5170 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
5171 }
5172
5173 if (OBJ_FROZEN(target_th->thgroup)) {
5174 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
5175 }
5176 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
5177 if (data->enclosed) {
5178 rb_raise(rb_eThreadError,
5179 "can't move from the enclosed thread group");
5180 }
5181
5182 target_th->thgroup = group;
5183 return group;
5184}
5185
5186/*
5187 * Document-class: ThreadShield
5188 */
5189static void
5190thread_shield_mark(void *ptr)
5191{
5192 rb_gc_mark((VALUE)ptr);
5193}
5194
5195static const rb_data_type_t thread_shield_data_type = {
5196 "thread_shield",
5197 {thread_shield_mark, 0, 0,},
5198 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
5199};
5200
5201static VALUE
5202thread_shield_alloc(VALUE klass)
5203{
5204 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
5205}
5206
5207#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5208#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5209#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5210#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5211STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5212static inline unsigned int
5213rb_thread_shield_waiting(VALUE b)
5214{
5215 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5216}
5217
5218static inline void
5219rb_thread_shield_waiting_inc(VALUE b)
5220{
5221 unsigned int w = rb_thread_shield_waiting(b);
5222 w++;
5223 if (w > THREAD_SHIELD_WAITING_MAX)
5224 rb_raise(rb_eRuntimeError, "waiting count overflow");
5225 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5226 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5227}
5228
5229static inline void
5230rb_thread_shield_waiting_dec(VALUE b)
5231{
5232 unsigned int w = rb_thread_shield_waiting(b);
5233 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5234 w--;
5235 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5236 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5237}
5238
5239VALUE
5240rb_thread_shield_new(void)
5241{
5242 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5243 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5244 return thread_shield;
5245}
5246
5247bool
5248rb_thread_shield_owned(VALUE self)
5249{
5250 VALUE mutex = GetThreadShieldPtr(self);
5251 if (!mutex) return false;
5252
5253 rb_mutex_t *m = mutex_ptr(mutex);
5254
5255 return m->fiber == GET_EC()->fiber_ptr;
5256}
5257
5258/*
5259 * Wait a thread shield.
5260 *
5261 * Returns
5262 * true: acquired the thread shield
5263 * false: the thread shield was destroyed and no other threads waiting
5264 * nil: the thread shield was destroyed but still in use
5265 */
5266VALUE
5267rb_thread_shield_wait(VALUE self)
5268{
5269 VALUE mutex = GetThreadShieldPtr(self);
5270 rb_mutex_t *m;
5271
5272 if (!mutex) return Qfalse;
5273 m = mutex_ptr(mutex);
5274 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5275 rb_thread_shield_waiting_inc(self);
5276 rb_mutex_lock(mutex);
5277 rb_thread_shield_waiting_dec(self);
5278 if (DATA_PTR(self)) return Qtrue;
5279 rb_mutex_unlock(mutex);
5280 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5281}
5282
5283static VALUE
5284thread_shield_get_mutex(VALUE self)
5285{
5286 VALUE mutex = GetThreadShieldPtr(self);
5287 if (!mutex)
5288 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5289 return mutex;
5290}
5291
5292/*
5293 * Release a thread shield, and return true if it has waiting threads.
5294 */
5295VALUE
5296rb_thread_shield_release(VALUE self)
5297{
5298 VALUE mutex = thread_shield_get_mutex(self);
5299 rb_mutex_unlock(mutex);
5300 return RBOOL(rb_thread_shield_waiting(self) > 0);
5301}
5302
5303/*
5304 * Release and destroy a thread shield, and return true if it has waiting threads.
5305 */
5306VALUE
5307rb_thread_shield_destroy(VALUE self)
5308{
5309 VALUE mutex = thread_shield_get_mutex(self);
5310 DATA_PTR(self) = 0;
5311 rb_mutex_unlock(mutex);
5312 return RBOOL(rb_thread_shield_waiting(self) > 0);
5313}
5314
5315static VALUE
5316threadptr_recursive_hash(rb_thread_t *th)
5317{
5318 return th->ec->local_storage_recursive_hash;
5319}
5320
5321static void
5322threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5323{
5324 th->ec->local_storage_recursive_hash = hash;
5325}
5326
5328
5329/*
5330 * Returns the current "recursive list" used to detect recursion.
5331 * This list is a hash table, unique for the current thread and for
5332 * the current __callee__.
5333 */
5334
5335static VALUE
5336recursive_list_access(VALUE sym)
5337{
5338 rb_thread_t *th = GET_THREAD();
5339 VALUE hash = threadptr_recursive_hash(th);
5340 VALUE list;
5341 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5342 hash = rb_ident_hash_new();
5343 threadptr_recursive_hash_set(th, hash);
5344 list = Qnil;
5345 }
5346 else {
5347 list = rb_hash_aref(hash, sym);
5348 }
5349 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5350 list = rb_ident_hash_new();
5351 rb_hash_aset(hash, sym, list);
5352 }
5353 return list;
5354}
5355
5356/*
5357 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5358 * in the recursion list.
5359 * Assumes the recursion list is valid.
5360 */
5361
5362static bool
5363recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5364{
5365#if SIZEOF_LONG == SIZEOF_VOIDP
5366 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5367#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5368 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5369 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5370#endif
5371
5372 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5373 if (UNDEF_P(pair_list))
5374 return false;
5375 if (paired_obj_id) {
5376 if (!RB_TYPE_P(pair_list, T_HASH)) {
5377 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5378 return false;
5379 }
5380 else {
5381 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5382 return false;
5383 }
5384 }
5385 return true;
5386}
5387
5388/*
5389 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5390 * For a single obj, it sets list[obj] to Qtrue.
5391 * For a pair, it sets list[obj] to paired_obj_id if possible,
5392 * otherwise list[obj] becomes a hash like:
5393 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5394 * Assumes the recursion list is valid.
5395 */
5396
5397static void
5398recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5399{
5400 VALUE pair_list;
5401
5402 if (!paired_obj) {
5403 rb_hash_aset(list, obj, Qtrue);
5404 }
5405 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5406 rb_hash_aset(list, obj, paired_obj);
5407 }
5408 else {
5409 if (!RB_TYPE_P(pair_list, T_HASH)){
5410 VALUE other_paired_obj = pair_list;
5411 pair_list = rb_hash_new();
5412 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5413 rb_hash_aset(list, obj, pair_list);
5414 }
5415 rb_hash_aset(pair_list, paired_obj, Qtrue);
5416 }
5417}
5418
5419/*
5420 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5421 * For a pair, if list[obj] is a hash, then paired_obj_id is
5422 * removed from the hash and no attempt is made to simplify
5423 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5424 * Assumes the recursion list is valid.
5425 */
5426
5427static int
5428recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5429{
5430 if (paired_obj) {
5431 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5432 if (UNDEF_P(pair_list)) {
5433 return 0;
5434 }
5435 if (RB_TYPE_P(pair_list, T_HASH)) {
5436 rb_hash_delete_entry(pair_list, paired_obj);
5437 if (!RHASH_EMPTY_P(pair_list)) {
5438 return 1; /* keep hash until is empty */
5439 }
5440 }
5441 }
5442 rb_hash_delete_entry(list, obj);
5443 return 1;
5444}
5446struct exec_recursive_params {
5447 VALUE (*func) (VALUE, VALUE, int);
5448 VALUE list;
5449 VALUE obj;
5450 VALUE pairid;
5451 VALUE arg;
5452};
5453
5454static VALUE
5455exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5456{
5457 struct exec_recursive_params *p = (void *)data;
5458 return (*p->func)(p->obj, p->arg, FALSE);
5459}
5460
5461/*
5462 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5463 * current method is called recursively on obj, or on the pair <obj, pairid>
5464 * If outer is 0, then the innermost func will be called with recursive set
5465 * to true, otherwise the outermost func will be called. In the latter case,
5466 * all inner func are short-circuited by throw.
5467 * Implementation details: the value thrown is the recursive list which is
5468 * proper to the current method and unlikely to be caught anywhere else.
5469 * list[recursive_key] is used as a flag for the outermost call.
5470 */
5471
5472static VALUE
5473exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5474{
5475 VALUE result = Qundef;
5476 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5477 struct exec_recursive_params p;
5478 int outermost;
5479 p.list = recursive_list_access(sym);
5480 p.obj = obj;
5481 p.pairid = pairid;
5482 p.arg = arg;
5483 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5484
5485 if (recursive_check(p.list, p.obj, pairid)) {
5486 if (outer && !outermost) {
5487 rb_throw_obj(p.list, p.list);
5488 }
5489 return (*func)(obj, arg, TRUE);
5490 }
5491 else {
5492 enum ruby_tag_type state;
5493
5494 p.func = func;
5495
5496 if (outermost) {
5497 recursive_push(p.list, ID2SYM(recursive_key), 0);
5498 recursive_push(p.list, p.obj, p.pairid);
5499 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5500 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5501 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5502 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5503 if (result == p.list) {
5504 result = (*func)(obj, arg, TRUE);
5505 }
5506 }
5507 else {
5508 volatile VALUE ret = Qundef;
5509 recursive_push(p.list, p.obj, p.pairid);
5510 EC_PUSH_TAG(GET_EC());
5511 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5512 ret = (*func)(obj, arg, FALSE);
5513 }
5514 EC_POP_TAG();
5515 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5516 goto invalid;
5517 }
5518 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5519 result = ret;
5520 }
5521 }
5522 *(volatile struct exec_recursive_params *)&p;
5523 return result;
5524
5525 invalid:
5526 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5527 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5528 sym, rb_thread_current());
5530}
5531
5532/*
5533 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5534 * current method is called recursively on obj
5535 */
5536
5537VALUE
5538rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5539{
5540 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5541}
5542
5543/*
5544 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5545 * current method is called recursively on the ordered pair <obj, paired_obj>
5546 */
5547
5548VALUE
5549rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5550{
5551 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5552}
5553
5554/*
5555 * If recursion is detected on the current method and obj, the outermost
5556 * func will be called with (obj, arg, true). All inner func will be
5557 * short-circuited using throw.
5558 */
5559
5560VALUE
5561rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5562{
5563 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5564}
5565
5566VALUE
5567rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5568{
5569 return exec_recursive(func, obj, 0, arg, 1, mid);
5570}
5571
5572/*
5573 * If recursion is detected on the current method, obj and paired_obj,
5574 * the outermost func will be called with (obj, arg, true). All inner
5575 * func will be short-circuited using throw.
5576 */
5577
5578VALUE
5579rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5580{
5581 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5582}
5583
5584/*
5585 * call-seq:
5586 * thread.backtrace -> array or nil
5587 *
5588 * Returns the current backtrace of the target thread.
5589 *
5590 */
5591
5592static VALUE
5593rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5594{
5595 return rb_vm_thread_backtrace(argc, argv, thval);
5596}
5597
5598/* call-seq:
5599 * thread.backtrace_locations(*args) -> array or nil
5600 *
5601 * Returns the execution stack for the target thread---an array containing
5602 * backtrace location objects.
5603 *
5604 * See Thread::Backtrace::Location for more information.
5605 *
5606 * This method behaves similarly to Kernel#caller_locations except it applies
5607 * to a specific thread.
5608 */
5609static VALUE
5610rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5611{
5612 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5613}
5614
5615void
5616Init_Thread_Mutex(void)
5617{
5618 rb_thread_t *th = GET_THREAD();
5619
5620 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5621 rb_native_mutex_initialize(&th->interrupt_lock);
5622}
5623
5624/*
5625 * Document-class: ThreadError
5626 *
5627 * Raised when an invalid operation is attempted on a thread.
5628 *
5629 * For example, when no other thread has been started:
5630 *
5631 * Thread.stop
5632 *
5633 * This will raises the following exception:
5634 *
5635 * ThreadError: stopping only thread
5636 * note: use sleep to stop forever
5637 */
5638
5639void
5640Init_Thread(void)
5641{
5642 rb_thread_t *th = GET_THREAD();
5643
5644 sym_never = ID2SYM(rb_intern_const("never"));
5645 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5646 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5647
5648 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5649 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5650 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5651 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5652 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5653 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5654 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5655 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5656 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5657 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5658 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5659 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5660 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5661 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5662 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5663 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5664 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5665 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5666 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5667
5668 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5669 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5670 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5671 rb_define_method(rb_cThread, "value", thread_value, 0);
5672 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5673 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5674 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5675 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5676 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5677 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5678 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5679 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5680 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5681 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5682 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5683 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5684 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5685 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5686 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5687 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5688 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5689 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5690 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5691 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5692 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5693 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5694 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5695 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5696 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5697 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5698
5699 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5700 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5701 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5702 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5703 rb_define_alias(rb_cThread, "inspect", "to_s");
5704
5705 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5706 "stream closed in another thread");
5707
5708 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5709 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5710 rb_define_method(cThGroup, "list", thgroup_list, 0);
5711 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5712 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5713 rb_define_method(cThGroup, "add", thgroup_add, 1);
5714
5715 const char * ptr = getenv("RUBY_THREAD_TIMESLICE");
5716
5717 if (ptr) {
5718 long quantum = strtol(ptr, NULL, 0);
5719 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5720 thread_default_quantum_ms = (uint32_t)quantum;
5721 }
5722 else if (0) {
5723 fprintf(stderr, "Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5724 }
5725 }
5726
5727 {
5728 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5729 rb_define_const(cThGroup, "Default", th->thgroup);
5730 }
5731
5733
5734 /* init thread core */
5735 {
5736 /* main thread setting */
5737 {
5738 /* acquire global vm lock */
5739#ifdef HAVE_PTHREAD_NP_H
5740 VM_ASSERT(TH_SCHED(th)->running == th);
5741#endif
5742 // thread_sched_to_running() should not be called because
5743 // it assumes blocked by thread_sched_to_waiting().
5744 // thread_sched_to_running(sched, th);
5745
5746 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5747 th->pending_interrupt_queue_checked = 0;
5748 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5749 }
5750 }
5751
5752 rb_thread_create_timer_thread();
5753
5754 Init_thread_sync();
5755
5756 // TODO: Suppress unused function warning for now
5757 // if (0) rb_thread_sched_destroy(NULL);
5758}
5759
5762{
5763 rb_thread_t *th = ruby_thread_from_native();
5764
5765 return th != 0;
5766}
5767
5768#ifdef NON_SCALAR_THREAD_ID
5769 #define thread_id_str(th) (NULL)
5770#else
5771 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5772#endif
5773
5774static void
5775debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5776{
5777 rb_thread_t *th = 0;
5778 VALUE sep = rb_str_new_cstr("\n ");
5779
5780 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5781 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5782 (void *)GET_THREAD(), (void *)r->threads.main);
5783
5784 ccan_list_for_each(&r->threads.set, th, lt_node) {
5785 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5786 "native:%p int:%u",
5787 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5788
5789 if (th->locking_mutex) {
5790 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5791 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5792 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5793 }
5794
5795 {
5796 struct rb_waiting_list *list = th->join_list;
5797 while (list) {
5798 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5799 list = list->next;
5800 }
5801 }
5802 rb_str_catf(msg, "\n ");
5803 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5804 rb_str_catf(msg, "\n");
5805 }
5806}
5807
5808static void
5809rb_check_deadlock(rb_ractor_t *r)
5810{
5811 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5812
5813#ifdef RUBY_THREAD_PTHREAD_H
5814 if (r->threads.sched.readyq_cnt > 0) return;
5815#endif
5816
5817 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5818 int ltnum = rb_ractor_living_thread_num(r);
5819
5820 if (ltnum > sleeper_num) return;
5821 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5822
5823 int found = 0;
5824 rb_thread_t *th = NULL;
5825
5826 ccan_list_for_each(&r->threads.set, th, lt_node) {
5827 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5828 found = 1;
5829 }
5830 else if (th->locking_mutex) {
5831 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5832 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5833 found = 1;
5834 }
5835 }
5836 if (found)
5837 break;
5838 }
5839
5840 if (!found) {
5841 VALUE argv[2];
5842 argv[0] = rb_eFatal;
5843 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5844 debug_deadlock_check(r, argv[1]);
5845 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5846 rb_threadptr_raise(r->threads.main, 2, argv);
5847 }
5848}
5849
5850static void
5851update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5852{
5853 const rb_control_frame_t *cfp = GET_EC()->cfp;
5854 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5855 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5856 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5857 if (lines) {
5858 long line = rb_sourceline() - 1;
5859 VM_ASSERT(line >= 0);
5860 long count;
5861 VALUE num;
5862 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5863 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5864 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5865 rb_ary_push(lines, LONG2FIX(line + 1));
5866 return;
5867 }
5868 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5869 return;
5870 }
5871 num = RARRAY_AREF(lines, line);
5872 if (!FIXNUM_P(num)) return;
5873 count = FIX2LONG(num) + 1;
5874 if (POSFIXABLE(count)) {
5875 RARRAY_ASET(lines, line, LONG2FIX(count));
5876 }
5877 }
5878 }
5879}
5880
5881static void
5882update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5883{
5884 const rb_control_frame_t *cfp = GET_EC()->cfp;
5885 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5886 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5887 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5888 if (branches) {
5889 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5890 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5891 VALUE counters = RARRAY_AREF(branches, 1);
5892 VALUE num = RARRAY_AREF(counters, idx);
5893 count = FIX2LONG(num) + 1;
5894 if (POSFIXABLE(count)) {
5895 RARRAY_ASET(counters, idx, LONG2FIX(count));
5896 }
5897 }
5898 }
5899}
5900
5901const rb_method_entry_t *
5902rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5903{
5904 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5905
5906 if (!me->def) return NULL; // negative cme
5907
5908 retry:
5909 switch (me->def->type) {
5910 case VM_METHOD_TYPE_ISEQ: {
5911 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5912 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5913 path = rb_iseq_path(iseq);
5914 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5915 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5916 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5917 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5918 break;
5919 }
5920 case VM_METHOD_TYPE_BMETHOD: {
5921 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5922 if (iseq) {
5923 rb_iseq_location_t *loc;
5924 rb_iseq_check(iseq);
5925 path = rb_iseq_path(iseq);
5926 loc = &ISEQ_BODY(iseq)->location;
5927 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5928 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5929 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5930 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5931 break;
5932 }
5933 return NULL;
5934 }
5935 case VM_METHOD_TYPE_ALIAS:
5936 me = me->def->body.alias.original_me;
5937 goto retry;
5938 case VM_METHOD_TYPE_REFINED:
5939 me = me->def->body.refined.orig_me;
5940 if (!me) return NULL;
5941 goto retry;
5942 default:
5943 return NULL;
5944 }
5945
5946 /* found */
5947 if (RB_TYPE_P(path, T_ARRAY)) {
5948 path = rb_ary_entry(path, 1);
5949 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5950 }
5951 if (resolved_location) {
5952 resolved_location[0] = path;
5953 resolved_location[1] = beg_pos_lineno;
5954 resolved_location[2] = beg_pos_column;
5955 resolved_location[3] = end_pos_lineno;
5956 resolved_location[4] = end_pos_column;
5957 }
5958 return me;
5959}
5960
5961static void
5962update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5963{
5964 const rb_control_frame_t *cfp = GET_EC()->cfp;
5965 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5966 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5967 VALUE rcount;
5968 long count;
5969
5970 me = rb_resolve_me_location(me, 0);
5971 if (!me) return;
5972
5973 rcount = rb_hash_aref(me2counter, (VALUE) me);
5974 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5975 if (POSFIXABLE(count)) {
5976 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5977 }
5978}
5979
5980VALUE
5981rb_get_coverages(void)
5982{
5983 return GET_VM()->coverages;
5984}
5985
5986int
5987rb_get_coverage_mode(void)
5988{
5989 return GET_VM()->coverage_mode;
5990}
5991
5992void
5993rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5994{
5995 GET_VM()->coverages = coverages;
5996 GET_VM()->me2counter = me2counter;
5997 GET_VM()->coverage_mode = mode;
5998}
5999
6000void
6001rb_resume_coverages(void)
6002{
6003 int mode = GET_VM()->coverage_mode;
6004 VALUE me2counter = GET_VM()->me2counter;
6005 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6006 if (mode & COVERAGE_TARGET_BRANCHES) {
6007 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6008 }
6009 if (mode & COVERAGE_TARGET_METHODS) {
6010 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6011 }
6012}
6013
6014void
6015rb_suspend_coverages(void)
6016{
6017 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
6018 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
6019 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
6020 }
6021 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
6022 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
6023 }
6024}
6025
6026/* Make coverage arrays empty so old covered files are no longer tracked. */
6027void
6028rb_reset_coverages(void)
6029{
6030 rb_clear_coverages();
6031 rb_iseq_remove_coverage_all();
6032 GET_VM()->coverages = Qfalse;
6033}
6034
6035VALUE
6036rb_default_coverage(int n)
6037{
6038 VALUE coverage = rb_ary_hidden_new_fill(3);
6039 VALUE lines = Qfalse, branches = Qfalse;
6040 int mode = GET_VM()->coverage_mode;
6041
6042 if (mode & COVERAGE_TARGET_LINES) {
6043 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
6044 }
6045 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
6046
6047 if (mode & COVERAGE_TARGET_BRANCHES) {
6048 branches = rb_ary_hidden_new_fill(2);
6049 /* internal data structures for branch coverage:
6050 *
6051 * { branch base node =>
6052 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
6053 * branch target id =>
6054 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
6055 * ...
6056 * }],
6057 * ...
6058 * }
6059 *
6060 * Example:
6061 * { NODE_CASE =>
6062 * [1, 0, 4, 3, {
6063 * NODE_WHEN => [2, 8, 2, 9, 0],
6064 * NODE_WHEN => [3, 8, 3, 9, 1],
6065 * ...
6066 * }],
6067 * ...
6068 * }
6069 */
6070 VALUE structure = rb_hash_new();
6071 rb_obj_hide(structure);
6072 RARRAY_ASET(branches, 0, structure);
6073 /* branch execution counters */
6074 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
6075 }
6076 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
6077
6078 return coverage;
6079}
6080
6081static VALUE
6082uninterruptible_exit(VALUE v)
6083{
6084 rb_thread_t *cur_th = GET_THREAD();
6085 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
6086
6087 cur_th->pending_interrupt_queue_checked = 0;
6088 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
6089 RUBY_VM_SET_INTERRUPT(cur_th->ec);
6090 }
6091 return Qnil;
6092}
6093
6094VALUE
6095rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
6096{
6097 VALUE interrupt_mask = rb_ident_hash_new();
6098 rb_thread_t *cur_th = GET_THREAD();
6099
6100 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
6101 OBJ_FREEZE(interrupt_mask);
6102 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
6103
6104 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
6105
6106 RUBY_VM_CHECK_INTS(cur_th->ec);
6107 return ret;
6108}
6109
6110static void
6111thread_specific_storage_alloc(rb_thread_t *th)
6112{
6113 VM_ASSERT(th->specific_storage == NULL);
6114
6115 if (UNLIKELY(specific_key_count > 0)) {
6116 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6117 }
6118}
6119
6120rb_internal_thread_specific_key_t
6122{
6123 rb_vm_t *vm = GET_VM();
6124
6125 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
6126 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
6127 }
6128 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
6129 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6130 }
6131 else {
6132 rb_internal_thread_specific_key_t key = specific_key_count++;
6133
6134 if (key == 0) {
6135 // allocate
6136 rb_ractor_t *cr = GET_RACTOR();
6137 rb_thread_t *th;
6138
6139 ccan_list_for_each(&cr->threads.set, th, lt_node) {
6140 thread_specific_storage_alloc(th);
6141 }
6142 }
6143 return key;
6144 }
6145}
6146
6147// async and native thread safe.
6148void *
6149rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
6150{
6151 rb_thread_t *th = DATA_PTR(thread_val);
6152
6153 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6154 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6155 VM_ASSERT(th->specific_storage);
6156
6157 return th->specific_storage[key];
6158}
6159
6160// async and native thread safe.
6161void
6162rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
6163{
6164 rb_thread_t *th = DATA_PTR(thread_val);
6165
6166 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6167 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6168 VM_ASSERT(th->specific_storage);
6169
6170 th->specific_storage[key] = data;
6171}
6172
6173// interrupt_exec
6176 struct ccan_list_node node;
6177
6178 rb_interrupt_exec_func_t *func;
6179 void *data;
6180 enum rb_interrupt_exec_flag flags;
6181};
6182
6183void
6184rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
6185{
6186 struct rb_interrupt_exec_task *task;
6187
6188 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
6189 if (task->flags & rb_interrupt_exec_flag_value_data) {
6190 rb_gc_mark((VALUE)task->data);
6191 }
6192 }
6193}
6194
6195// native thread safe
6196// th should be available
6197void
6198rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6199{
6200 // should not use ALLOC
6202 *task = (struct rb_interrupt_exec_task) {
6203 .flags = flags,
6204 .func = func,
6205 .data = data,
6206 };
6207
6208 rb_native_mutex_lock(&th->interrupt_lock);
6209 {
6210 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6211 threadptr_set_interrupt_locked(th, true);
6212 }
6213 rb_native_mutex_unlock(&th->interrupt_lock);
6214}
6215
6216static void
6217threadptr_interrupt_exec_exec(rb_thread_t *th)
6218{
6219 while (1) {
6220 struct rb_interrupt_exec_task *task;
6221
6222 rb_native_mutex_lock(&th->interrupt_lock);
6223 {
6224 task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6225 }
6226 rb_native_mutex_unlock(&th->interrupt_lock);
6227
6228 RUBY_DEBUG_LOG("task:%p", task);
6229
6230 if (task) {
6231 if (task->flags & rb_interrupt_exec_flag_new_thread) {
6232 rb_thread_create(task->func, task->data);
6233 }
6234 else {
6235 (*task->func)(task->data);
6236 }
6237 ruby_xfree(task);
6238 }
6239 else {
6240 break;
6241 }
6242 }
6243}
6244
6245static void
6246threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6247{
6248 rb_native_mutex_lock(&th->interrupt_lock);
6249 {
6250 struct rb_interrupt_exec_task *task;
6251
6252 while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6253 ruby_xfree(task);
6254 }
6255 }
6256 rb_native_mutex_unlock(&th->interrupt_lock);
6257}
6258
6259// native thread safe
6260// func/data should be native thread safe
6261void
6262rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6263 rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6264{
6265 RUBY_DEBUG_LOG("flags:%d", (int)flags);
6266
6267 rb_thread_t *main_th = target_r->threads.main;
6268 rb_threadptr_interrupt_exec(main_th, func, data, flags | rb_interrupt_exec_flag_new_thread);
6269}
6270
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:313
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:600
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1484
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2848
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1166
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:956
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:943
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:136
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:289
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:486
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:682
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1423
VALUE rb_eIOError
IOError exception.
Definition io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1427
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:4117
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1468
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1422
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:961
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4360
VALUE rb_eSignal
SignalException exception.
Definition error.c:1425
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2125
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:101
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:243
VALUE rb_cThread
Thread class.
Definition vm.c:550
VALUE rb_cModule
Module class.
Definition object.c:63
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3746
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:880
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:847
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1803
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1839
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4351
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1455
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3741
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2934
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:3173
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1385
void rb_thread_fd_close(int fd)
This funciton is now a no-op.
Definition thread.c:2879
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1417
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:3085
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:5012
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1438
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:3076
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:3029
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1392
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:5007
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:3152
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:4014
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3889
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1486
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:3038
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1461
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:2016
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2968
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2079
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1443
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:374
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1912
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1117
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:13010
ID rb_to_id(VALUE str)
Definition string.c:13000
#define RB_IO_POINTER(obj, fp)
Queries the underlying IO pointer.
Definition io.h:436
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:190
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:6148
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:6161
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:6120
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1566
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2032
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition thread.h:73
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1703
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2518
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:450
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5760
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:1046
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:463
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
Definition scheduler.c:1077
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:627
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:424
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:646
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4505
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
@ RUBY_Qundef
Represents so-called undef.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:63
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:203
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Ruby's IO, metadata and buffers.
Definition io.h:295
VALUE self
The IO's Ruby level counterpart.
Definition io.h:298
int fd
file descriptor.
Definition io.h:306
struct ccan_list_head blocking_operations
Threads that are performing a blocking operation without the GVL using this IO.
Definition io.h:131
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:295
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:301
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:283
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:289
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376