Ruby 3.5.0dev (2025-06-06 revision dde9fca63bf4bf56050c734adca3eaae70506179)
thread.c (dde9fca63bf4bf56050c734adca3eaae70506179)
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "ruby/debug.h"
93#include "ruby/io.h"
94#include "ruby/thread.h"
95#include "ruby/thread_native.h"
96#include "timev.h"
97#include "vm_core.h"
98#include "ractor_core.h"
99#include "vm_debug.h"
100#include "vm_sync.h"
101
102#include "ccan/list/list.h"
103
104#ifndef USE_NATIVE_THREAD_PRIORITY
105#define USE_NATIVE_THREAD_PRIORITY 0
106#define RUBY_THREAD_PRIORITY_MAX 3
107#define RUBY_THREAD_PRIORITY_MIN -3
108#endif
109
110static VALUE rb_cThreadShield;
111static VALUE cThGroup;
112
113static VALUE sym_immediate;
114static VALUE sym_on_blocking;
115static VALUE sym_never;
116
117static uint32_t thread_default_quantum_ms = 100;
118
119#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
120#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
121
122static inline VALUE
123rb_thread_local_storage(VALUE thread)
124{
125 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
126 rb_ivar_set(thread, idLocals, rb_hash_new());
127 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
128 }
129 return rb_ivar_get(thread, idLocals);
130}
131
132enum SLEEP_FLAGS {
133 SLEEP_DEADLOCKABLE = 0x01,
134 SLEEP_SPURIOUS_CHECK = 0x02,
135 SLEEP_ALLOW_SPURIOUS = 0x04,
136 SLEEP_NO_CHECKINTS = 0x08,
137};
138
139static void sleep_forever(rb_thread_t *th, unsigned int fl);
140static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
141
142static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
143static int rb_threadptr_dead(rb_thread_t *th);
144static void rb_check_deadlock(rb_ractor_t *r);
145static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
146static const char *thread_status_name(rb_thread_t *th, int detail);
147static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
148NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
149MAYBE_UNUSED(static int consume_communication_pipe(int fd));
150
151static rb_atomic_t system_working = 1;
152static rb_internal_thread_specific_key_t specific_key_count;
153
154/********************************************************************************/
155
156#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
157
159 enum rb_thread_status prev_status;
160};
161
162static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
163static void unblock_function_clear(rb_thread_t *th);
164
165static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
166 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
167static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
168
169#define THREAD_BLOCKING_BEGIN(th) do { \
170 struct rb_thread_sched * const sched = TH_SCHED(th); \
171 RB_VM_SAVE_MACHINE_CONTEXT(th); \
172 thread_sched_to_waiting((sched), (th));
173
174#define THREAD_BLOCKING_END(th) \
175 thread_sched_to_running((sched), (th)); \
176 rb_ractor_thread_switch(th->ractor, th, false); \
177} while(0)
178
179#ifdef __GNUC__
180#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
181#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
182#else
183#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
184#endif
185#else
186#define only_if_constant(expr, notconst) notconst
187#endif
188#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
189 struct rb_blocking_region_buffer __region; \
190 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
191 /* always return true unless fail_if_interrupted */ \
192 !only_if_constant(fail_if_interrupted, TRUE)) { \
193 /* Important that this is inlined into the macro, and not part of \
194 * blocking_region_begin - see bug #20493 */ \
195 RB_VM_SAVE_MACHINE_CONTEXT(th); \
196 thread_sched_to_waiting(TH_SCHED(th), th); \
197 exec; \
198 blocking_region_end(th, &__region); \
199 }; \
200} while(0)
201
202/*
203 * returns true if this thread was spuriously interrupted, false otherwise
204 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
205 */
206#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
207static inline int
208vm_check_ints_blocking(rb_execution_context_t *ec)
209{
210#ifdef RUBY_ASSERT_CRITICAL_SECTION
211 VM_ASSERT(ruby_assert_critical_section_entered == 0);
212#endif
213
214 rb_thread_t *th = rb_ec_thread_ptr(ec);
215
216 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
217 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
218 }
219 else {
220 th->pending_interrupt_queue_checked = 0;
221 RUBY_VM_SET_INTERRUPT(ec);
222 }
223 return rb_threadptr_execute_interrupts(th, 1);
224}
225
226int
227rb_vm_check_ints_blocking(rb_execution_context_t *ec)
228{
229 return vm_check_ints_blocking(ec);
230}
231
232/*
233 * poll() is supported by many OSes, but so far Linux is the only
234 * one we know of that supports using poll() in all places select()
235 * would work.
236 */
237#if defined(HAVE_POLL)
238# if defined(__linux__)
239# define USE_POLL
240# endif
241# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
242# define USE_POLL
243 /* FreeBSD does not set POLLOUT when POLLHUP happens */
244# define POLLERR_SET (POLLHUP | POLLERR)
245# endif
246#endif
247
248static void
249timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
250 const struct timeval *timeout)
251{
252 if (timeout) {
253 *rel = rb_timeval2hrtime(timeout);
254 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
255 *to = rel;
256 }
257 else {
258 *to = 0;
259 }
260}
261
262MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
263MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
264MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
265
266#include THREAD_IMPL_SRC
267
268/*
269 * TODO: somebody with win32 knowledge should be able to get rid of
270 * timer-thread by busy-waiting on signals. And it should be possible
271 * to make the GVL in thread_pthread.c be platform-independent.
272 */
273#ifndef BUSY_WAIT_SIGNALS
274# define BUSY_WAIT_SIGNALS (0)
275#endif
276
277#ifndef USE_EVENTFD
278# define USE_EVENTFD (0)
279#endif
280
281#include "thread_sync.c"
282
283void
284rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
285{
287}
288
289void
290rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
291{
293}
294
295void
296rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
297{
299}
300
301void
302rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
303{
305}
306
307static int
308unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
309{
310 do {
311 if (fail_if_interrupted) {
312 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
313 return FALSE;
314 }
315 }
316 else {
317 RUBY_VM_CHECK_INTS(th->ec);
318 }
319
320 rb_native_mutex_lock(&th->interrupt_lock);
321 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
322 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
323
324 VM_ASSERT(th->unblock.func == NULL);
325
326 th->unblock.func = func;
327 th->unblock.arg = arg;
328 rb_native_mutex_unlock(&th->interrupt_lock);
329
330 return TRUE;
331}
332
333static void
334unblock_function_clear(rb_thread_t *th)
335{
336 rb_native_mutex_lock(&th->interrupt_lock);
337 th->unblock.func = 0;
338 rb_native_mutex_unlock(&th->interrupt_lock);
339}
340
341static void
342threadptr_set_interrupt_locked(rb_thread_t *th, bool trap)
343{
344 // th->interrupt_lock should be acquired here
345
346 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
347
348 if (trap) {
349 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
350 }
351 else {
352 RUBY_VM_SET_INTERRUPT(th->ec);
353 }
354
355 if (th->unblock.func != NULL) {
356 (th->unblock.func)(th->unblock.arg);
357 }
358 else {
359 /* none */
360 }
361}
362
363static void
364threadptr_set_interrupt(rb_thread_t *th, int trap)
365{
366 rb_native_mutex_lock(&th->interrupt_lock);
367 {
368 threadptr_set_interrupt_locked(th, trap);
369 }
370 rb_native_mutex_unlock(&th->interrupt_lock);
371}
372
373/* Set interrupt flag on another thread or current thread, and call its UBF if it has one set */
374void
375rb_threadptr_interrupt(rb_thread_t *th)
376{
377 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
378 threadptr_set_interrupt(th, false);
379}
380
381static void
382threadptr_trap_interrupt(rb_thread_t *th)
383{
384 threadptr_set_interrupt(th, true);
385}
386
387static void
388terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
389{
390 rb_thread_t *th = 0;
391
392 ccan_list_for_each(&r->threads.set, th, lt_node) {
393 if (th != main_thread) {
394 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
395
396 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
397 rb_threadptr_interrupt(th);
398
399 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
400 }
401 else {
402 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
403 }
404 }
405}
406
407static void
408rb_threadptr_join_list_wakeup(rb_thread_t *thread)
409{
410 while (thread->join_list) {
411 struct rb_waiting_list *join_list = thread->join_list;
412
413 // Consume the entry from the join list:
414 thread->join_list = join_list->next;
415
416 rb_thread_t *target_thread = join_list->thread;
417
418 if (target_thread->scheduler != Qnil && join_list->fiber) {
419 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
420 }
421 else {
422 rb_threadptr_interrupt(target_thread);
423
424 switch (target_thread->status) {
425 case THREAD_STOPPED:
426 case THREAD_STOPPED_FOREVER:
427 target_thread->status = THREAD_RUNNABLE;
428 break;
429 default:
430 break;
431 }
432 }
433 }
434}
435
436void
437rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
438{
439 while (th->keeping_mutexes) {
440 rb_mutex_t *mutex = th->keeping_mutexes;
441 th->keeping_mutexes = mutex->next_mutex;
442
443 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
444
445 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
446 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
447 }
448}
449
450void
451rb_thread_terminate_all(rb_thread_t *th)
452{
453 rb_ractor_t *cr = th->ractor;
454 rb_execution_context_t * volatile ec = th->ec;
455 volatile int sleeping = 0;
456
457 if (cr->threads.main != th) {
458 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
459 (void *)cr->threads.main, (void *)th);
460 }
461
462 /* unlock all locking mutexes */
463 rb_threadptr_unlock_all_locking_mutexes(th);
464
465 EC_PUSH_TAG(ec);
466 if (EC_EXEC_TAG() == TAG_NONE) {
467 retry:
468 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
469
470 terminate_all(cr, th);
471
472 while (rb_ractor_living_thread_num(cr) > 1) {
473 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
474 /*q
475 * Thread exiting routine in thread_start_func_2 notify
476 * me when the last sub-thread exit.
477 */
478 sleeping = 1;
479 native_sleep(th, &rel);
480 RUBY_VM_CHECK_INTS_BLOCKING(ec);
481 sleeping = 0;
482 }
483 }
484 else {
485 /*
486 * When caught an exception (e.g. Ctrl+C), let's broadcast
487 * kill request again to ensure killing all threads even
488 * if they are blocked on sleep, mutex, etc.
489 */
490 if (sleeping) {
491 sleeping = 0;
492 goto retry;
493 }
494 }
495 EC_POP_TAG();
496}
497
498void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
499static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
500
501static void
502thread_cleanup_func_before_exec(void *th_ptr)
503{
504 rb_thread_t *th = th_ptr;
505 th->status = THREAD_KILLED;
506
507 // The thread stack doesn't exist in the forked process:
508 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
509
510 threadptr_interrupt_exec_cleanup(th);
511 rb_threadptr_root_fiber_terminate(th);
512}
513
514static void
515thread_cleanup_func(void *th_ptr, int atfork)
516{
517 rb_thread_t *th = th_ptr;
518
519 th->locking_mutex = Qfalse;
520 thread_cleanup_func_before_exec(th_ptr);
521
522 /*
523 * Unfortunately, we can't release native threading resource at fork
524 * because libc may have unstable locking state therefore touching
525 * a threading resource may cause a deadlock.
526 */
527 if (atfork) {
528 th->nt = NULL;
529 return;
530 }
531
532 rb_native_mutex_destroy(&th->interrupt_lock);
533}
534
535static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
536static VALUE rb_thread_to_s(VALUE thread);
537
538void
539ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
540{
541 native_thread_init_stack(th, local_in_parent_frame);
542}
543
544const VALUE *
545rb_vm_proc_local_ep(VALUE proc)
546{
547 const VALUE *ep = vm_proc_ep(proc);
548
549 if (ep) {
550 return rb_vm_ep_local_ep(ep);
551 }
552 else {
553 return NULL;
554 }
555}
556
557// for ractor, defined in vm.c
558VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
559 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
560
561static VALUE
562thread_do_start_proc(rb_thread_t *th)
563{
564 VALUE args = th->invoke_arg.proc.args;
565 const VALUE *args_ptr;
566 int args_len;
567 VALUE procval = th->invoke_arg.proc.proc;
568 rb_proc_t *proc;
569 GetProcPtr(procval, proc);
570
571 th->ec->errinfo = Qnil;
572 th->ec->root_lep = rb_vm_proc_local_ep(procval);
573 th->ec->root_svar = Qfalse;
574
575 vm_check_ints_blocking(th->ec);
576
577 if (th->invoke_type == thread_invoke_type_ractor_proc) {
578 VALUE self = rb_ractor_self(th->ractor);
579 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
580
581 VM_ASSERT(FIXNUM_P(args));
582 args_len = FIX2INT(args);
583 args_ptr = ALLOCA_N(VALUE, args_len);
584 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
585 vm_check_ints_blocking(th->ec);
586
587 return rb_vm_invoke_proc_with_self(
588 th->ec, proc, self,
589 args_len, args_ptr,
590 th->invoke_arg.proc.kw_splat,
591 VM_BLOCK_HANDLER_NONE
592 );
593 }
594 else {
595 args_len = RARRAY_LENINT(args);
596 if (args_len < 8) {
597 /* free proc.args if the length is enough small */
598 args_ptr = ALLOCA_N(VALUE, args_len);
599 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
600 th->invoke_arg.proc.args = Qnil;
601 }
602 else {
603 args_ptr = RARRAY_CONST_PTR(args);
604 }
605
606 vm_check_ints_blocking(th->ec);
607
608 return rb_vm_invoke_proc(
609 th->ec, proc,
610 args_len, args_ptr,
611 th->invoke_arg.proc.kw_splat,
612 VM_BLOCK_HANDLER_NONE
613 );
614 }
615}
616
617static VALUE
618thread_do_start(rb_thread_t *th)
619{
620 native_set_thread_name(th);
621 VALUE result = Qundef;
622
623 switch (th->invoke_type) {
624 case thread_invoke_type_proc:
625 result = thread_do_start_proc(th);
626 break;
627
628 case thread_invoke_type_ractor_proc:
629 result = thread_do_start_proc(th);
630 rb_ractor_atexit(th->ec, result);
631 break;
632
633 case thread_invoke_type_func:
634 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
635 break;
636
637 case thread_invoke_type_none:
638 rb_bug("unreachable");
639 }
640
641 return result;
642}
643
644void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
645
646static int
647thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
648{
649 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
650 VM_ASSERT(th != th->vm->ractor.main_thread);
651
652 enum ruby_tag_type state;
653 VALUE errinfo = Qnil;
654 rb_thread_t *ractor_main_th = th->ractor->threads.main;
655
656 // setup ractor
657 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
658 RB_VM_LOCK();
659 {
660 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
661 rb_ractor_t *r = th->ractor;
662 r->r_stdin = rb_io_prep_stdin();
663 r->r_stdout = rb_io_prep_stdout();
664 r->r_stderr = rb_io_prep_stderr();
665 }
666 RB_VM_UNLOCK();
667 }
668
669 // Ensure that we are not joinable.
670 VM_ASSERT(UNDEF_P(th->value));
671
672 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
673 VALUE result = Qundef;
674
675 EC_PUSH_TAG(th->ec);
676
677 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
678 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
679
680 result = thread_do_start(th);
681 }
682
683 if (!fiber_scheduler_closed) {
684 fiber_scheduler_closed = 1;
686 }
687
688 if (!event_thread_end_hooked) {
689 event_thread_end_hooked = 1;
690 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
691 }
692
693 if (state == TAG_NONE) {
694 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
695 th->value = result;
696 }
697 else {
698 errinfo = th->ec->errinfo;
699
700 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
701 if (!NIL_P(exc)) errinfo = exc;
702
703 if (state == TAG_FATAL) {
704 if (th->invoke_type == thread_invoke_type_ractor_proc) {
705 rb_ractor_atexit(th->ec, Qnil);
706 }
707 /* fatal error within this thread, need to stop whole script */
708 }
709 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
710 /* exit on main_thread. */
711 }
712 else {
713 if (th->report_on_exception) {
714 VALUE mesg = rb_thread_to_s(th->self);
715 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
716 rb_write_error_str(mesg);
717 rb_ec_error_print(th->ec, errinfo);
718 }
719
720 if (th->invoke_type == thread_invoke_type_ractor_proc) {
721 rb_ractor_atexit_exception(th->ec);
722 }
723
724 if (th->vm->thread_abort_on_exception ||
725 th->abort_on_exception || RTEST(ruby_debug)) {
726 /* exit on main_thread */
727 }
728 else {
729 errinfo = Qnil;
730 }
731 }
732 th->value = Qnil;
733 }
734
735 // The thread is effectively finished and can be joined.
736 VM_ASSERT(!UNDEF_P(th->value));
737
738 rb_threadptr_join_list_wakeup(th);
739 rb_threadptr_unlock_all_locking_mutexes(th);
740
741 if (th->invoke_type == thread_invoke_type_ractor_proc) {
742 rb_thread_terminate_all(th);
743 rb_ractor_teardown(th->ec);
744 }
745
746 th->status = THREAD_KILLED;
747 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
748
749 if (th->vm->ractor.main_thread == th) {
750 ruby_stop(0);
751 }
752
753 if (RB_TYPE_P(errinfo, T_OBJECT)) {
754 /* treat with normal error object */
755 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
756 }
757
758 EC_POP_TAG();
759
760 rb_ec_clear_current_thread_trace_func(th->ec);
761
762 /* locking_mutex must be Qfalse */
763 if (th->locking_mutex != Qfalse) {
764 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
765 (void *)th, th->locking_mutex);
766 }
767
768 if (ractor_main_th->status == THREAD_KILLED &&
769 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
770 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
771 rb_threadptr_interrupt(ractor_main_th);
772 }
773
774 rb_check_deadlock(th->ractor);
775
776 rb_fiber_close(th->ec->fiber_ptr);
777
778 thread_cleanup_func(th, FALSE);
779 VM_ASSERT(th->ec->vm_stack == NULL);
780
781 if (th->invoke_type == thread_invoke_type_ractor_proc) {
782 // after rb_ractor_living_threads_remove()
783 // GC will happen anytime and this ractor can be collected (and destroy GVL).
784 // So gvl_release() should be before it.
785 thread_sched_to_dead(TH_SCHED(th), th);
786 rb_ractor_living_threads_remove(th->ractor, th);
787 }
788 else {
789 rb_ractor_living_threads_remove(th->ractor, th);
790 thread_sched_to_dead(TH_SCHED(th), th);
791 }
792
793 return 0;
794}
797 enum thread_invoke_type type;
798
799 // for normal proc thread
800 VALUE args;
801 VALUE proc;
802
803 // for ractor
804 rb_ractor_t *g;
805
806 // for func
807 VALUE (*fn)(void *);
808};
809
810static void thread_specific_storage_alloc(rb_thread_t *th);
811
812static VALUE
813thread_create_core(VALUE thval, struct thread_create_params *params)
814{
815 rb_execution_context_t *ec = GET_EC();
816 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
817 int err;
818
819 thread_specific_storage_alloc(th);
820
821 if (OBJ_FROZEN(current_th->thgroup)) {
822 rb_raise(rb_eThreadError,
823 "can't start a new thread (frozen ThreadGroup)");
824 }
825
826 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
827
828 switch (params->type) {
829 case thread_invoke_type_proc:
830 th->invoke_type = thread_invoke_type_proc;
831 th->invoke_arg.proc.args = params->args;
832 th->invoke_arg.proc.proc = params->proc;
833 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
834 break;
835
836 case thread_invoke_type_ractor_proc:
837#if RACTOR_CHECK_MODE > 0
838 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
839#endif
840 th->invoke_type = thread_invoke_type_ractor_proc;
841 th->ractor = params->g;
842 th->ractor->threads.main = th;
843 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
844 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
845 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
846 rb_ractor_send_parameters(ec, params->g, params->args);
847 break;
848
849 case thread_invoke_type_func:
850 th->invoke_type = thread_invoke_type_func;
851 th->invoke_arg.func.func = params->fn;
852 th->invoke_arg.func.arg = (void *)params->args;
853 break;
854
855 default:
856 rb_bug("unreachable");
857 }
858
859 th->priority = current_th->priority;
860 th->thgroup = current_th->thgroup;
861
862 th->pending_interrupt_queue = rb_ary_hidden_new(0);
863 th->pending_interrupt_queue_checked = 0;
864 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
865 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
866
867 rb_native_mutex_initialize(&th->interrupt_lock);
868
869 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
870
871 rb_ractor_living_threads_insert(th->ractor, th);
872
873 /* kick thread */
874 err = native_thread_create(th);
875 if (err) {
876 th->status = THREAD_KILLED;
877 rb_ractor_living_threads_remove(th->ractor, th);
878 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
879 }
880 return thval;
881}
882
883#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
884
885/*
886 * call-seq:
887 * Thread.new { ... } -> thread
888 * Thread.new(*args, &proc) -> thread
889 * Thread.new(*args) { |args| ... } -> thread
890 *
891 * Creates a new thread executing the given block.
892 *
893 * Any +args+ given to ::new will be passed to the block:
894 *
895 * arr = []
896 * a, b, c = 1, 2, 3
897 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
898 * arr #=> [1, 2, 3]
899 *
900 * A ThreadError exception is raised if ::new is called without a block.
901 *
902 * If you're going to subclass Thread, be sure to call super in your
903 * +initialize+ method, otherwise a ThreadError will be raised.
904 */
905static VALUE
906thread_s_new(int argc, VALUE *argv, VALUE klass)
907{
908 rb_thread_t *th;
909 VALUE thread = rb_thread_alloc(klass);
910
911 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
912 rb_raise(rb_eThreadError, "can't alloc thread");
913 }
914
915 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
916 th = rb_thread_ptr(thread);
917 if (!threadptr_initialized(th)) {
918 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
919 klass);
920 }
921 return thread;
922}
923
924/*
925 * call-seq:
926 * Thread.start([args]*) {|args| block } -> thread
927 * Thread.fork([args]*) {|args| block } -> thread
928 *
929 * Basically the same as ::new. However, if class Thread is subclassed, then
930 * calling +start+ in that subclass will not invoke the subclass's
931 * +initialize+ method.
932 */
933
934static VALUE
935thread_start(VALUE klass, VALUE args)
936{
937 struct thread_create_params params = {
938 .type = thread_invoke_type_proc,
939 .args = args,
940 .proc = rb_block_proc(),
941 };
942 return thread_create_core(rb_thread_alloc(klass), &params);
943}
944
945static VALUE
946threadptr_invoke_proc_location(rb_thread_t *th)
947{
948 if (th->invoke_type == thread_invoke_type_proc) {
949 return rb_proc_location(th->invoke_arg.proc.proc);
950 }
951 else {
952 return Qnil;
953 }
954}
955
956/* :nodoc: */
957static VALUE
958thread_initialize(VALUE thread, VALUE args)
959{
960 rb_thread_t *th = rb_thread_ptr(thread);
961
962 if (!rb_block_given_p()) {
963 rb_raise(rb_eThreadError, "must be called with a block");
964 }
965 else if (th->invoke_type != thread_invoke_type_none) {
966 VALUE loc = threadptr_invoke_proc_location(th);
967 if (!NIL_P(loc)) {
968 rb_raise(rb_eThreadError,
969 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
970 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
971 }
972 else {
973 rb_raise(rb_eThreadError, "already initialized thread");
974 }
975 }
976 else {
977 struct thread_create_params params = {
978 .type = thread_invoke_type_proc,
979 .args = args,
980 .proc = rb_block_proc(),
981 };
982 return thread_create_core(thread, &params);
983 }
984}
985
986VALUE
987rb_thread_create(VALUE (*fn)(void *), void *arg)
988{
989 struct thread_create_params params = {
990 .type = thread_invoke_type_func,
991 .fn = fn,
992 .args = (VALUE)arg,
993 };
994 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
995}
996
997VALUE
998rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
999{
1000 struct thread_create_params params = {
1001 .type = thread_invoke_type_ractor_proc,
1002 .g = r,
1003 .args = args,
1004 .proc = proc,
1005 };
1006 return thread_create_core(rb_thread_alloc(rb_cThread), &params);;
1007}
1008
1010struct join_arg {
1011 struct rb_waiting_list *waiter;
1012 rb_thread_t *target;
1013 VALUE timeout;
1014 rb_hrtime_t *limit;
1015};
1016
1017static VALUE
1018remove_from_join_list(VALUE arg)
1019{
1020 struct join_arg *p = (struct join_arg *)arg;
1021 rb_thread_t *target_thread = p->target;
1022
1023 if (target_thread->status != THREAD_KILLED) {
1024 struct rb_waiting_list **join_list = &target_thread->join_list;
1025
1026 while (*join_list) {
1027 if (*join_list == p->waiter) {
1028 *join_list = (*join_list)->next;
1029 break;
1030 }
1031
1032 join_list = &(*join_list)->next;
1033 }
1034 }
1035
1036 return Qnil;
1037}
1038
1039static int
1040thread_finished(rb_thread_t *th)
1041{
1042 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1043}
1044
1045static VALUE
1046thread_join_sleep(VALUE arg)
1047{
1048 struct join_arg *p = (struct join_arg *)arg;
1049 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1050 rb_hrtime_t end = 0, *limit = p->limit;
1051
1052 if (limit) {
1053 end = rb_hrtime_add(*limit, rb_hrtime_now());
1054 }
1055
1056 while (!thread_finished(target_th)) {
1057 VALUE scheduler = rb_fiber_scheduler_current();
1058
1059 if (!limit) {
1060 if (scheduler != Qnil) {
1061 rb_fiber_scheduler_block(scheduler, target_th->self, Qnil);
1062 }
1063 else {
1064 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1065 }
1066 }
1067 else {
1068 if (hrtime_update_expire(limit, end)) {
1069 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1070 return Qfalse;
1071 }
1072
1073 if (scheduler != Qnil) {
1074 VALUE timeout = rb_float_new(hrtime2double(*limit));
1075 rb_fiber_scheduler_block(scheduler, target_th->self, timeout);
1076 }
1077 else {
1078 th->status = THREAD_STOPPED;
1079 native_sleep(th, limit);
1080 }
1081 }
1082 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1083 th->status = THREAD_RUNNABLE;
1084
1085 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1086 }
1087
1088 return Qtrue;
1089}
1090
1091static VALUE
1092thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1093{
1094 rb_execution_context_t *ec = GET_EC();
1095 rb_thread_t *th = ec->thread_ptr;
1096 rb_fiber_t *fiber = ec->fiber_ptr;
1097
1098 if (th == target_th) {
1099 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1100 }
1101
1102 if (th->ractor->threads.main == target_th) {
1103 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1104 }
1105
1106 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1107
1108 if (target_th->status != THREAD_KILLED) {
1109 struct rb_waiting_list waiter;
1110 waiter.next = target_th->join_list;
1111 waiter.thread = th;
1112 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1113 target_th->join_list = &waiter;
1114
1115 struct join_arg arg;
1116 arg.waiter = &waiter;
1117 arg.target = target_th;
1118 arg.timeout = timeout;
1119 arg.limit = limit;
1120
1121 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1122 return Qnil;
1123 }
1124 }
1125
1126 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1127
1128 if (target_th->ec->errinfo != Qnil) {
1129 VALUE err = target_th->ec->errinfo;
1130
1131 if (FIXNUM_P(err)) {
1132 switch (err) {
1133 case INT2FIX(TAG_FATAL):
1134 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1135
1136 /* OK. killed. */
1137 break;
1138 default:
1139 if (err == RUBY_FATAL_FIBER_KILLED) { // not integer constant so can't be a case expression
1140 // root fiber killed in non-main thread
1141 break;
1142 }
1143 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1144 }
1145 }
1146 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1147 rb_bug("thread_join: THROW_DATA should not reach here.");
1148 }
1149 else {
1150 /* normal exception */
1151 rb_exc_raise(err);
1152 }
1153 }
1154 return target_th->self;
1155}
1156
1157/*
1158 * call-seq:
1159 * thr.join -> thr
1160 * thr.join(limit) -> thr
1161 *
1162 * The calling thread will suspend execution and run this +thr+.
1163 *
1164 * Does not return until +thr+ exits or until the given +limit+ seconds have
1165 * passed.
1166 *
1167 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1168 * returned.
1169 *
1170 * Any threads not joined will be killed when the main program exits.
1171 *
1172 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1173 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1174 * will be processed at this time.
1175 *
1176 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1177 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1178 * x.join # Let thread x finish, thread a will be killed on exit.
1179 * #=> "axyz"
1180 *
1181 * The following example illustrates the +limit+ parameter.
1182 *
1183 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1184 * puts "Waiting" until y.join(0.15)
1185 *
1186 * This will produce:
1187 *
1188 * tick...
1189 * Waiting
1190 * tick...
1191 * Waiting
1192 * tick...
1193 * tick...
1194 */
1195
1196static VALUE
1197thread_join_m(int argc, VALUE *argv, VALUE self)
1198{
1199 VALUE timeout = Qnil;
1200 rb_hrtime_t rel = 0, *limit = 0;
1201
1202 if (rb_check_arity(argc, 0, 1)) {
1203 timeout = argv[0];
1204 }
1205
1206 // Convert the timeout eagerly, so it's always converted and deterministic
1207 /*
1208 * This supports INFINITY and negative values, so we can't use
1209 * rb_time_interval right now...
1210 */
1211 if (NIL_P(timeout)) {
1212 /* unlimited */
1213 }
1214 else if (FIXNUM_P(timeout)) {
1215 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1216 limit = &rel;
1217 }
1218 else {
1219 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1220 }
1221
1222 return thread_join(rb_thread_ptr(self), timeout, limit);
1223}
1224
1225/*
1226 * call-seq:
1227 * thr.value -> obj
1228 *
1229 * Waits for +thr+ to complete, using #join, and returns its value or raises
1230 * the exception which terminated the thread.
1231 *
1232 * a = Thread.new { 2 + 2 }
1233 * a.value #=> 4
1234 *
1235 * b = Thread.new { raise 'something went wrong' }
1236 * b.value #=> RuntimeError: something went wrong
1237 */
1238
1239static VALUE
1240thread_value(VALUE self)
1241{
1242 rb_thread_t *th = rb_thread_ptr(self);
1243 thread_join(th, Qnil, 0);
1244 if (UNDEF_P(th->value)) {
1245 // If the thread is dead because we forked th->value is still Qundef.
1246 return Qnil;
1247 }
1248 return th->value;
1249}
1250
1251/*
1252 * Thread Scheduling
1253 */
1254
1255static void
1256getclockofday(struct timespec *ts)
1257{
1258#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1259 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1260 return;
1261#endif
1262 rb_timespec_now(ts);
1263}
1264
1265/*
1266 * Don't inline this, since library call is already time consuming
1267 * and we don't want "struct timespec" on stack too long for GC
1268 */
1269NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1270rb_hrtime_t
1271rb_hrtime_now(void)
1272{
1273 struct timespec ts;
1274
1275 getclockofday(&ts);
1276 return rb_timespec2hrtime(&ts);
1277}
1278
1279/*
1280 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1281 * being uninitialized, maybe other versions, too.
1282 */
1283COMPILER_WARNING_PUSH
1284#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1285COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1286#endif
1287#ifndef PRIu64
1288#define PRIu64 PRI_64_PREFIX "u"
1289#endif
1290/*
1291 * @end is the absolute time when @ts is set to expire
1292 * Returns true if @end has past
1293 * Updates @ts and returns false otherwise
1294 */
1295static int
1296hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1297{
1298 rb_hrtime_t now = rb_hrtime_now();
1299
1300 if (now > end) return 1;
1301
1302 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1303
1304 *timeout = end - now;
1305 return 0;
1306}
1307COMPILER_WARNING_POP
1308
1309static int
1310sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1311{
1312 enum rb_thread_status prev_status = th->status;
1313 int woke;
1314 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1315
1316 th->status = THREAD_STOPPED;
1317 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1318 while (th->status == THREAD_STOPPED) {
1319 native_sleep(th, &rel);
1320 woke = vm_check_ints_blocking(th->ec);
1321 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1322 break;
1323 if (hrtime_update_expire(&rel, end))
1324 break;
1325 woke = 1;
1326 }
1327 th->status = prev_status;
1328 return woke;
1329}
1330
1331static int
1332sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1333{
1334 enum rb_thread_status prev_status = th->status;
1335 int woke;
1336 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1337
1338 th->status = THREAD_STOPPED;
1339 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1340 while (th->status == THREAD_STOPPED) {
1341 native_sleep(th, &rel);
1342 woke = vm_check_ints_blocking(th->ec);
1343 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1344 break;
1345 if (hrtime_update_expire(&rel, end))
1346 break;
1347 woke = 1;
1348 }
1349 th->status = prev_status;
1350 return woke;
1351}
1352
1353static void
1354sleep_forever(rb_thread_t *th, unsigned int fl)
1355{
1356 enum rb_thread_status prev_status = th->status;
1357 enum rb_thread_status status;
1358 int woke;
1359
1360 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1361 th->status = status;
1362
1363 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1364
1365 while (th->status == status) {
1366 if (fl & SLEEP_DEADLOCKABLE) {
1367 rb_ractor_sleeper_threads_inc(th->ractor);
1368 rb_check_deadlock(th->ractor);
1369 }
1370 {
1371 native_sleep(th, 0);
1372 }
1373 if (fl & SLEEP_DEADLOCKABLE) {
1374 rb_ractor_sleeper_threads_dec(th->ractor);
1375 }
1376 if (fl & SLEEP_ALLOW_SPURIOUS) {
1377 break;
1378 }
1379
1380 woke = vm_check_ints_blocking(th->ec);
1381
1382 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1383 break;
1384 }
1385 }
1386 th->status = prev_status;
1387}
1388
1389void
1391{
1392 RUBY_DEBUG_LOG("forever");
1393 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1394}
1395
1396void
1398{
1399 RUBY_DEBUG_LOG("deadly");
1400 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1401}
1402
1403static void
1404rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1405{
1406 VALUE scheduler = rb_fiber_scheduler_current();
1407 if (scheduler != Qnil) {
1408 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1409 }
1410 else {
1411 RUBY_DEBUG_LOG("...");
1412 if (end) {
1413 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1414 }
1415 else {
1416 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1417 }
1418 }
1419}
1420
1421void
1422rb_thread_wait_for(struct timeval time)
1423{
1424 rb_thread_t *th = GET_THREAD();
1425
1426 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1427}
1428
1429void
1430rb_ec_check_ints(rb_execution_context_t *ec)
1431{
1432 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1433}
1434
1435/*
1436 * CAUTION: This function causes thread switching.
1437 * rb_thread_check_ints() check ruby's interrupts.
1438 * some interrupt needs thread switching/invoke handlers,
1439 * and so on.
1440 */
1441
1442void
1444{
1445 rb_ec_check_ints(GET_EC());
1446}
1447
1448/*
1449 * Hidden API for tcl/tk wrapper.
1450 * There is no guarantee to perpetuate it.
1451 */
1452int
1453rb_thread_check_trap_pending(void)
1454{
1455 return rb_signal_buff_size() != 0;
1456}
1457
1458/* This function can be called in blocking region. */
1461{
1462 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1463}
1464
1465void
1466rb_thread_sleep(int sec)
1467{
1469}
1470
1471static void
1472rb_thread_schedule_limits(uint32_t limits_us)
1473{
1474 if (!rb_thread_alone()) {
1475 rb_thread_t *th = GET_THREAD();
1476 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1477
1478 if (th->running_time_us >= limits_us) {
1479 RUBY_DEBUG_LOG("switch %s", "start");
1480
1481 RB_VM_SAVE_MACHINE_CONTEXT(th);
1482 thread_sched_yield(TH_SCHED(th), th);
1483 rb_ractor_thread_switch(th->ractor, th, true);
1484
1485 RUBY_DEBUG_LOG("switch %s", "done");
1486 }
1487 }
1488}
1489
1490void
1492{
1493 rb_thread_schedule_limits(0);
1494 RUBY_VM_CHECK_INTS(GET_EC());
1495}
1496
1497/* blocking region */
1498
1499static inline int
1500blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1501 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1502{
1503#ifdef RUBY_ASSERT_CRITICAL_SECTION
1504 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1505#endif
1506 VM_ASSERT(th == GET_THREAD());
1507
1508 region->prev_status = th->status;
1509 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1510 th->blocking_region_buffer = region;
1511 th->status = THREAD_STOPPED;
1512 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1513
1514 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1515 return TRUE;
1516 }
1517 else {
1518 return FALSE;
1519 }
1520}
1521
1522static inline void
1523blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1524{
1525 /* entry to ubf_list still permitted at this point, make it impossible: */
1526 unblock_function_clear(th);
1527 /* entry to ubf_list impossible at this point, so unregister is safe: */
1528 unregister_ubf_list(th);
1529
1530 thread_sched_to_running(TH_SCHED(th), th);
1531 rb_ractor_thread_switch(th->ractor, th, false);
1532
1533 th->blocking_region_buffer = 0;
1534 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1535 if (th->status == THREAD_STOPPED) {
1536 th->status = region->prev_status;
1537 }
1538
1539 RUBY_DEBUG_LOG("end");
1540
1541#ifndef _WIN32
1542 // GET_THREAD() clears WSAGetLastError()
1543 VM_ASSERT(th == GET_THREAD());
1544#endif
1545}
1546
1547void *
1548rb_nogvl(void *(*func)(void *), void *data1,
1549 rb_unblock_function_t *ubf, void *data2,
1550 int flags)
1551{
1552 if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1553 VALUE scheduler = rb_fiber_scheduler_current();
1554 if (scheduler != Qnil) {
1556
1557 VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1558
1559 if (!UNDEF_P(result)) {
1560 rb_errno_set(state.saved_errno);
1561 return state.result;
1562 }
1563 }
1564 }
1565
1566 void *val = 0;
1567 rb_execution_context_t *ec = GET_EC();
1568 rb_thread_t *th = rb_ec_thread_ptr(ec);
1569 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1570 bool is_main_thread = vm->ractor.main_thread == th;
1571 int saved_errno = 0;
1572
1573 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1574 ubf = ubf_select;
1575 data2 = th;
1576 }
1577 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1578 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1579 vm->ubf_async_safe = 1;
1580 }
1581 }
1582
1583 rb_vm_t *volatile saved_vm = vm;
1584 BLOCKING_REGION(th, {
1585 val = func(data1);
1586 saved_errno = rb_errno();
1587 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1588 vm = saved_vm;
1589
1590 if (is_main_thread) vm->ubf_async_safe = 0;
1591
1592 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1593 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1594 }
1595
1596 rb_errno_set(saved_errno);
1597
1598 return val;
1599}
1600
1601/*
1602 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1603 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1604 * without interrupt process.
1605 *
1606 * rb_thread_call_without_gvl() does:
1607 * (1) Check interrupts.
1608 * (2) release GVL.
1609 * Other Ruby threads may run in parallel.
1610 * (3) call func with data1
1611 * (4) acquire GVL.
1612 * Other Ruby threads can not run in parallel any more.
1613 * (5) Check interrupts.
1614 *
1615 * rb_thread_call_without_gvl2() does:
1616 * (1) Check interrupt and return if interrupted.
1617 * (2) release GVL.
1618 * (3) call func with data1 and a pointer to the flags.
1619 * (4) acquire GVL.
1620 *
1621 * If another thread interrupts this thread (Thread#kill, signal delivery,
1622 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1623 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1624 * toggling a cancellation flag, canceling the invocation of a call inside
1625 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1626 *
1627 * There are built-in ubfs and you can specify these ubfs:
1628 *
1629 * * RUBY_UBF_IO: ubf for IO operation
1630 * * RUBY_UBF_PROCESS: ubf for process operation
1631 *
1632 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1633 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1634 * provide proper ubf(), your program will not stop for Control+C or other
1635 * shutdown events.
1636 *
1637 * "Check interrupts" on above list means checking asynchronous
1638 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1639 * request, and so on) and calling corresponding procedures
1640 * (such as `trap' for signals, raise an exception for Thread#raise).
1641 * If `func()' finished and received interrupts, you may skip interrupt
1642 * checking. For example, assume the following func() it reads data from file.
1643 *
1644 * read_func(...) {
1645 * // (a) before read
1646 * read(buffer); // (b) reading
1647 * // (c) after read
1648 * }
1649 *
1650 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1651 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1652 * at (c), after *read* operation is completed, checking interrupts is harmful
1653 * because it causes irrevocable side-effect, the read data will vanish. To
1654 * avoid such problem, the `read_func()' should be used with
1655 * `rb_thread_call_without_gvl2()'.
1656 *
1657 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1658 * immediately. This function does not show when the execution was interrupted.
1659 * For example, there are 4 possible timing (a), (b), (c) and before calling
1660 * read_func(). You need to record progress of a read_func() and check
1661 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1662 * `rb_thread_check_ints()' correctly or your program can not process proper
1663 * process such as `trap' and so on.
1664 *
1665 * NOTE: You can not execute most of Ruby C API and touch Ruby
1666 * objects in `func()' and `ubf()', including raising an
1667 * exception, because current thread doesn't acquire GVL
1668 * (it causes synchronization problems). If you need to
1669 * call ruby functions either use rb_thread_call_with_gvl()
1670 * or read source code of C APIs and confirm safety by
1671 * yourself.
1672 *
1673 * NOTE: In short, this API is difficult to use safely. I recommend you
1674 * use other ways if you have. We lack experiences to use this API.
1675 * Please report your problem related on it.
1676 *
1677 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1678 * for a short running `func()'. Be sure to benchmark and use this
1679 * mechanism when `func()' consumes enough time.
1680 *
1681 * Safe C API:
1682 * * rb_thread_interrupted() - check interrupt flag
1683 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1684 * they will work without GVL, and may acquire GVL when GC is needed.
1685 */
1686void *
1687rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1688 rb_unblock_function_t *ubf, void *data2)
1689{
1690 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1691}
1692
1693void *
1694rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1695 rb_unblock_function_t *ubf, void *data2)
1696{
1697 return rb_nogvl(func, data1, ubf, data2, 0);
1698}
1699
1700static int
1701waitfd_to_waiting_flag(int wfd_event)
1702{
1703 return wfd_event << 1;
1704}
1705
1706static struct ccan_list_head *
1707rb_io_blocking_operations(struct rb_io *io)
1708{
1709 rb_serial_t fork_generation = GET_VM()->fork_gen;
1710
1711 // On fork, all existing entries in this list (which are stack allocated) become invalid.
1712 // Therefore, we re-initialize the list which clears it.
1713 if (io->fork_generation != fork_generation) {
1714 ccan_list_head_init(&io->blocking_operations);
1715 io->fork_generation = fork_generation;
1716 }
1717
1718 return &io->blocking_operations;
1719}
1720
1721/*
1722 * Registers a blocking operation for an IO object. This is used to track all threads and fibers
1723 * that are currently blocked on this IO for reading, writing or other operations.
1724 *
1725 * When the IO is closed, all blocking operations will be notified via rb_fiber_scheduler_fiber_interrupt
1726 * for fibers with a scheduler, or via rb_threadptr_interrupt for threads without a scheduler.
1727 *
1728 * @parameter io The IO object on which the operation will block
1729 * @parameter blocking_operation The operation details including the execution context that will be blocked
1730 */
1731static void
1732rb_io_blocking_operation_enter(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1733{
1734 ccan_list_add(rb_io_blocking_operations(io), &blocking_operation->list);
1735}
1736
1737static void
1738rb_io_blocking_operation_pop(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1739{
1740 ccan_list_del(&blocking_operation->list);
1741}
1744 struct rb_io *io;
1745 struct rb_io_blocking_operation *blocking_operation;
1746};
1747
1748static VALUE
1749io_blocking_operation_exit(VALUE _arguments)
1750{
1751 struct io_blocking_operation_arguments *arguments = (void*)_arguments;
1752 struct rb_io_blocking_operation *blocking_operation = arguments->blocking_operation;
1753
1754 rb_io_blocking_operation_pop(arguments->io, blocking_operation);
1755
1756 rb_io_t *io = arguments->io;
1757 rb_thread_t *thread = io->closing_ec->thread_ptr;
1758 rb_fiber_t *fiber = io->closing_ec->fiber_ptr;
1759
1760 if (thread->scheduler != Qnil) {
1761 // This can cause spurious wakeups...
1762 rb_fiber_scheduler_unblock(thread->scheduler, io->self, rb_fiberptr_self(fiber));
1763 }
1764 else {
1765 rb_thread_wakeup(thread->self);
1766 }
1767
1768 return Qnil;
1769}
1770
1771/*
1772 * Called when a blocking operation completes or is interrupted. Removes the operation from
1773 * the IO's blocking_operations list and wakes up any waiting threads/fibers.
1774 *
1775 * If there's a wakeup_mutex (meaning an IO close is in progress), synchronizes the cleanup
1776 * through that mutex to ensure proper coordination with the closing thread.
1777 *
1778 * @parameter io The IO object the operation was performed on
1779 * @parameter blocking_operation The completed operation to clean up
1780 */
1781static void
1782rb_io_blocking_operation_exit(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1783{
1784 VALUE wakeup_mutex = io->wakeup_mutex;
1785
1786 // Indicate that the blocking operation is no longer active:
1787 blocking_operation->ec = NULL;
1788
1789 if (RB_TEST(wakeup_mutex)) {
1790 struct io_blocking_operation_arguments arguments = {
1791 .io = io,
1792 .blocking_operation = blocking_operation
1793 };
1794
1795 rb_mutex_synchronize(wakeup_mutex, io_blocking_operation_exit, (VALUE)&arguments);
1796 }
1797 else {
1798 // If there's no wakeup_mutex, we can safely remove the operation directly:
1799 rb_io_blocking_operation_pop(io, blocking_operation);
1800 }
1801}
1802
1803static VALUE
1804rb_thread_io_blocking_operation_ensure(VALUE _argument)
1805{
1806 struct io_blocking_operation_arguments *arguments = (void*)_argument;
1807
1808 rb_io_blocking_operation_exit(arguments->io, arguments->blocking_operation);
1809
1810 return Qnil;
1811}
1812
1813/*
1814 * Executes a function that performs a blocking IO operation, while properly tracking
1815 * the operation in the IO's blocking_operations list. This ensures proper cleanup
1816 * and interruption handling if the IO is closed while blocked.
1817 *
1818 * The operation is automatically removed from the blocking_operations list when the function
1819 * returns, whether normally or due to an exception.
1820 *
1821 * @parameter self The IO object
1822 * @parameter function The function to execute that will perform the blocking operation
1823 * @parameter argument The argument to pass to the function
1824 * @returns The result of the blocking operation function
1825 */
1826VALUE
1827rb_thread_io_blocking_operation(VALUE self, VALUE(*function)(VALUE), VALUE argument)
1828{
1829 struct rb_io *io;
1830 RB_IO_POINTER(self, io);
1831
1832 rb_execution_context_t *ec = GET_EC();
1833 struct rb_io_blocking_operation blocking_operation = {
1834 .ec = ec,
1835 };
1836 rb_io_blocking_operation_enter(io, &blocking_operation);
1837
1839 .io = io,
1840 .blocking_operation = &blocking_operation
1841 };
1842
1843 return rb_ensure(function, argument, rb_thread_io_blocking_operation_ensure, (VALUE)&io_blocking_operation_arguments);
1844}
1845
1846static bool
1847thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1848{
1849#if defined(USE_MN_THREADS) && USE_MN_THREADS
1850 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1851#else
1852 return false;
1853#endif
1854}
1855
1856// true if need retry
1857static bool
1858thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1859{
1860#if defined(USE_MN_THREADS) && USE_MN_THREADS
1861 if (thread_io_mn_schedulable(th, events, timeout)) {
1862 rb_hrtime_t rel, *prel;
1863
1864 if (timeout) {
1865 rel = rb_timeval2hrtime(timeout);
1866 prel = &rel;
1867 }
1868 else {
1869 prel = NULL;
1870 }
1871
1872 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1873
1874 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1875 // timeout
1876 return false;
1877 }
1878 else {
1879 return true;
1880 }
1881 }
1882#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1883 return false;
1884}
1885
1886// assume read/write
1887static bool
1888blocking_call_retryable_p(int r, int eno)
1889{
1890 if (r != -1) return false;
1891
1892 switch (eno) {
1893 case EAGAIN:
1894#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1895 case EWOULDBLOCK:
1896#endif
1897 return true;
1898 default:
1899 return false;
1900 }
1901}
1902
1903bool
1904rb_thread_mn_schedulable(VALUE thval)
1905{
1906 rb_thread_t *th = rb_thread_ptr(thval);
1907 return th->mn_schedulable;
1908}
1909
1910VALUE
1911rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void *data1, int events)
1912{
1913 rb_execution_context_t * volatile ec = GET_EC();
1914 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
1915
1916 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), io->fd, events);
1917
1918 volatile VALUE val = Qundef; /* shouldn't be used */
1919 volatile int saved_errno = 0;
1920 enum ruby_tag_type state;
1921 volatile bool prev_mn_schedulable = th->mn_schedulable;
1922 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1923
1924 int fd = io->fd;
1925
1926 // `errno` is only valid when there is an actual error - but we can't
1927 // extract that from the return value of `func` alone, so we clear any
1928 // prior `errno` value here so that we can later check if it was set by
1929 // `func` or not (as opposed to some previously set value).
1930 errno = 0;
1931
1932 struct rb_io_blocking_operation blocking_operation = {
1933 .ec = ec,
1934 };
1935 rb_io_blocking_operation_enter(io, &blocking_operation);
1936
1937 {
1938 EC_PUSH_TAG(ec);
1939 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1940 volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1941 retry:
1942 BLOCKING_REGION(th, {
1943 val = func(data1);
1944 saved_errno = errno;
1945 }, ubf_select, th, FALSE);
1946
1947 RUBY_ASSERT(th == rb_ec_thread_ptr(ec));
1948 if (events &&
1949 blocking_call_retryable_p((int)val, saved_errno) &&
1950 thread_io_wait_events(th, fd, events, NULL)) {
1951 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1952 goto retry;
1953 }
1954
1955 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1956
1957 state = saved_state;
1958 }
1959 EC_POP_TAG();
1960
1961 th = rb_ec_thread_ptr(ec);
1962 th->mn_schedulable = prev_mn_schedulable;
1963 }
1964
1965 rb_io_blocking_operation_exit(io, &blocking_operation);
1966
1967 if (state) {
1968 EC_JUMP_TAG(ec, state);
1969 }
1970
1971 // If the error was a timeout, we raise a specific exception for that:
1972 if (saved_errno == ETIMEDOUT) {
1973 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1974 }
1975
1976 errno = saved_errno;
1977
1978 return val;
1979}
1980
1981VALUE
1982rb_thread_io_blocking_region(struct rb_io *io, rb_blocking_function_t *func, void *data1)
1983{
1984 return rb_thread_io_blocking_call(io, func, data1, 0);
1985}
1986
1987/*
1988 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1989 *
1990 * After releasing GVL using
1991 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1992 * methods. If you need to access Ruby you must use this function
1993 * rb_thread_call_with_gvl().
1994 *
1995 * This function rb_thread_call_with_gvl() does:
1996 * (1) acquire GVL.
1997 * (2) call passed function `func'.
1998 * (3) release GVL.
1999 * (4) return a value which is returned at (2).
2000 *
2001 * NOTE: You should not return Ruby object at (2) because such Object
2002 * will not be marked.
2003 *
2004 * NOTE: If an exception is raised in `func', this function DOES NOT
2005 * protect (catch) the exception. If you have any resources
2006 * which should free before throwing exception, you need use
2007 * rb_protect() in `func' and return a value which represents
2008 * exception was raised.
2009 *
2010 * NOTE: This function should not be called by a thread which was not
2011 * created as Ruby thread (created by Thread.new or so). In other
2012 * words, this function *DOES NOT* associate or convert a NON-Ruby
2013 * thread to a Ruby thread.
2014 */
2015void *
2016rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
2017{
2018 rb_thread_t *th = ruby_thread_from_native();
2019 struct rb_blocking_region_buffer *brb;
2020 struct rb_unblock_callback prev_unblock;
2021 void *r;
2022
2023 if (th == 0) {
2024 /* Error has occurred, but we can't use rb_bug()
2025 * because this thread is not Ruby's thread.
2026 * What should we do?
2027 */
2028 bp();
2029 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
2030 exit(EXIT_FAILURE);
2031 }
2032
2033 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
2034 prev_unblock = th->unblock;
2035
2036 if (brb == 0) {
2037 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
2038 }
2039
2040 blocking_region_end(th, brb);
2041 /* enter to Ruby world: You can access Ruby values, methods and so on. */
2042 r = (*func)(data1);
2043 /* leave from Ruby world: You can not access Ruby values, etc. */
2044 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
2045 RUBY_ASSERT_ALWAYS(released);
2046 RB_VM_SAVE_MACHINE_CONTEXT(th);
2047 thread_sched_to_waiting(TH_SCHED(th), th);
2048 return r;
2049}
2050
2051/*
2052 * ruby_thread_has_gvl_p - check if current native thread has GVL.
2053 */
2054
2056ruby_thread_has_gvl_p(void)
2057{
2058 rb_thread_t *th = ruby_thread_from_native();
2059
2060 if (th && th->blocking_region_buffer == 0) {
2061 return 1;
2062 }
2063 else {
2064 return 0;
2065 }
2066}
2067
2068/*
2069 * call-seq:
2070 * Thread.pass -> nil
2071 *
2072 * Give the thread scheduler a hint to pass execution to another thread.
2073 * A running thread may or may not switch, it depends on OS and processor.
2074 */
2075
2076static VALUE
2077thread_s_pass(VALUE klass)
2078{
2080 return Qnil;
2081}
2082
2083/*****************************************************/
2084
2085/*
2086 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
2087 *
2088 * Async events such as an exception thrown by Thread#raise,
2089 * Thread#kill and thread termination (after main thread termination)
2090 * will be queued to th->pending_interrupt_queue.
2091 * - clear: clear the queue.
2092 * - enque: enqueue err object into queue.
2093 * - deque: dequeue err object from queue.
2094 * - active_p: return 1 if the queue should be checked.
2095 *
2096 * All rb_threadptr_pending_interrupt_* functions are called by
2097 * a GVL acquired thread, of course.
2098 * Note that all "rb_" prefix APIs need GVL to call.
2099 */
2100
2101void
2102rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
2103{
2104 rb_ary_clear(th->pending_interrupt_queue);
2105}
2106
2107void
2108rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
2109{
2110 rb_ary_push(th->pending_interrupt_queue, v);
2111 th->pending_interrupt_queue_checked = 0;
2112}
2113
2114static void
2115threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2116{
2117 if (!th->pending_interrupt_queue) {
2118 rb_raise(rb_eThreadError, "uninitialized thread");
2119 }
2120}
2121
2122enum handle_interrupt_timing {
2123 INTERRUPT_NONE,
2124 INTERRUPT_IMMEDIATE,
2125 INTERRUPT_ON_BLOCKING,
2126 INTERRUPT_NEVER
2127};
2128
2129static enum handle_interrupt_timing
2130rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2131{
2132 if (sym == sym_immediate) {
2133 return INTERRUPT_IMMEDIATE;
2134 }
2135 else if (sym == sym_on_blocking) {
2136 return INTERRUPT_ON_BLOCKING;
2137 }
2138 else if (sym == sym_never) {
2139 return INTERRUPT_NEVER;
2140 }
2141 else {
2142 rb_raise(rb_eThreadError, "unknown mask signature");
2143 }
2144}
2145
2146static enum handle_interrupt_timing
2147rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2148{
2149 VALUE mask;
2150 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2151 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2152 VALUE mod;
2153 long i;
2154
2155 for (i=0; i<mask_stack_len; i++) {
2156 mask = mask_stack[mask_stack_len-(i+1)];
2157
2158 if (SYMBOL_P(mask)) {
2159 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2160 if (err != rb_cInteger) {
2161 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2162 }
2163 else {
2164 continue;
2165 }
2166 }
2167
2168 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2169 VALUE klass = mod;
2170 VALUE sym;
2171
2172 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2173 klass = RBASIC(mod)->klass;
2174 }
2175 else if (mod != RCLASS_ORIGIN(mod)) {
2176 continue;
2177 }
2178
2179 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2180 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2181 }
2182 }
2183 /* try to next mask */
2184 }
2185 return INTERRUPT_NONE;
2186}
2187
2188static int
2189rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2190{
2191 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2192}
2193
2194static int
2195rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2196{
2197 int i;
2198 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2199 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2200 if (rb_obj_is_kind_of(e, err)) {
2201 return TRUE;
2202 }
2203 }
2204 return FALSE;
2205}
2206
2207static VALUE
2208rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2209{
2210#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2211 int i;
2212
2213 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2214 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2215
2216 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2217
2218 switch (mask_timing) {
2219 case INTERRUPT_ON_BLOCKING:
2220 if (timing != INTERRUPT_ON_BLOCKING) {
2221 break;
2222 }
2223 /* fall through */
2224 case INTERRUPT_NONE: /* default: IMMEDIATE */
2225 case INTERRUPT_IMMEDIATE:
2226 rb_ary_delete_at(th->pending_interrupt_queue, i);
2227 return err;
2228 case INTERRUPT_NEVER:
2229 break;
2230 }
2231 }
2232
2233 th->pending_interrupt_queue_checked = 1;
2234 return Qundef;
2235#else
2236 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2237 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2238 th->pending_interrupt_queue_checked = 1;
2239 }
2240 return err;
2241#endif
2242}
2243
2244static int
2245threadptr_pending_interrupt_active_p(rb_thread_t *th)
2246{
2247 /*
2248 * For optimization, we don't check async errinfo queue
2249 * if the queue and the thread interrupt mask were not changed
2250 * since last check.
2251 */
2252 if (th->pending_interrupt_queue_checked) {
2253 return 0;
2254 }
2255
2256 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2257 return 0;
2258 }
2259
2260 return 1;
2261}
2262
2263static int
2264handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2265{
2266 VALUE *maskp = (VALUE *)args;
2267
2268 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2269 rb_raise(rb_eArgError, "unknown mask signature");
2270 }
2271
2272 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2273 *maskp = val;
2274 return ST_CONTINUE;
2275 }
2276
2277 if (RTEST(*maskp)) {
2278 if (!RB_TYPE_P(*maskp, T_HASH)) {
2279 VALUE prev = *maskp;
2280 *maskp = rb_ident_hash_new();
2281 if (SYMBOL_P(prev)) {
2282 rb_hash_aset(*maskp, rb_eException, prev);
2283 }
2284 }
2285 rb_hash_aset(*maskp, key, val);
2286 }
2287 else {
2288 *maskp = Qfalse;
2289 }
2290
2291 return ST_CONTINUE;
2292}
2293
2294/*
2295 * call-seq:
2296 * Thread.handle_interrupt(hash) { ... } -> result of the block
2297 *
2298 * Changes asynchronous interrupt timing.
2299 *
2300 * _interrupt_ means asynchronous event and corresponding procedure
2301 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2302 * and main thread termination (if main thread terminates, then all
2303 * other thread will be killed).
2304 *
2305 * The given +hash+ has pairs like <code>ExceptionClass =>
2306 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2307 * the given block. The TimingSymbol can be one of the following symbols:
2308 *
2309 * [+:immediate+] Invoke interrupts immediately.
2310 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2311 * [+:never+] Never invoke all interrupts.
2312 *
2313 * _BlockingOperation_ means that the operation will block the calling thread,
2314 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2315 * operation executed without GVL.
2316 *
2317 * Masked asynchronous interrupts are delayed until they are enabled.
2318 * This method is similar to sigprocmask(3).
2319 *
2320 * === NOTE
2321 *
2322 * Asynchronous interrupts are difficult to use.
2323 *
2324 * If you need to communicate between threads, please consider to use another way such as Queue.
2325 *
2326 * Or use them with deep understanding about this method.
2327 *
2328 * === Usage
2329 *
2330 * In this example, we can guard from Thread#raise exceptions.
2331 *
2332 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2333 * ignored in the first block of the main thread. In the second
2334 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2335 *
2336 * th = Thread.new do
2337 * Thread.handle_interrupt(RuntimeError => :never) {
2338 * begin
2339 * # You can write resource allocation code safely.
2340 * Thread.handle_interrupt(RuntimeError => :immediate) {
2341 * # ...
2342 * }
2343 * ensure
2344 * # You can write resource deallocation code safely.
2345 * end
2346 * }
2347 * end
2348 * Thread.pass
2349 * # ...
2350 * th.raise "stop"
2351 *
2352 * While we are ignoring the RuntimeError exception, it's safe to write our
2353 * resource allocation code. Then, the ensure block is where we can safely
2354 * deallocate your resources.
2355 *
2356 * ==== Stack control settings
2357 *
2358 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2359 * to control more than one ExceptionClass and TimingSymbol at a time.
2360 *
2361 * Thread.handle_interrupt(FooError => :never) {
2362 * Thread.handle_interrupt(BarError => :never) {
2363 * # FooError and BarError are prohibited.
2364 * }
2365 * }
2366 *
2367 * ==== Inheritance with ExceptionClass
2368 *
2369 * All exceptions inherited from the ExceptionClass parameter will be considered.
2370 *
2371 * Thread.handle_interrupt(Exception => :never) {
2372 * # all exceptions inherited from Exception are prohibited.
2373 * }
2374 *
2375 * For handling all interrupts, use +Object+ and not +Exception+
2376 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2377 */
2378static VALUE
2379rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2380{
2381 VALUE mask = Qundef;
2382 rb_execution_context_t * volatile ec = GET_EC();
2383 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2384 volatile VALUE r = Qnil;
2385 enum ruby_tag_type state;
2386
2387 if (!rb_block_given_p()) {
2388 rb_raise(rb_eArgError, "block is needed.");
2389 }
2390
2391 mask_arg = rb_to_hash_type(mask_arg);
2392
2393 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2394 mask = Qnil;
2395 }
2396
2397 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2398
2399 if (UNDEF_P(mask)) {
2400 return rb_yield(Qnil);
2401 }
2402
2403 if (!RTEST(mask)) {
2404 mask = mask_arg;
2405 }
2406 else if (RB_TYPE_P(mask, T_HASH)) {
2407 OBJ_FREEZE(mask);
2408 }
2409
2410 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2411 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2412 th->pending_interrupt_queue_checked = 0;
2413 RUBY_VM_SET_INTERRUPT(th->ec);
2414 }
2415
2416 EC_PUSH_TAG(th->ec);
2417 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2418 r = rb_yield(Qnil);
2419 }
2420 EC_POP_TAG();
2421
2422 rb_ary_pop(th->pending_interrupt_mask_stack);
2423 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2424 th->pending_interrupt_queue_checked = 0;
2425 RUBY_VM_SET_INTERRUPT(th->ec);
2426 }
2427
2428 RUBY_VM_CHECK_INTS(th->ec);
2429
2430 if (state) {
2431 EC_JUMP_TAG(th->ec, state);
2432 }
2433
2434 return r;
2435}
2436
2437/*
2438 * call-seq:
2439 * target_thread.pending_interrupt?(error = nil) -> true/false
2440 *
2441 * Returns whether or not the asynchronous queue is empty for the target thread.
2442 *
2443 * If +error+ is given, then check only for +error+ type deferred events.
2444 *
2445 * See ::pending_interrupt? for more information.
2446 */
2447static VALUE
2448rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2449{
2450 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2451
2452 if (!target_th->pending_interrupt_queue) {
2453 return Qfalse;
2454 }
2455 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2456 return Qfalse;
2457 }
2458 if (rb_check_arity(argc, 0, 1)) {
2459 VALUE err = argv[0];
2460 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2461 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2462 }
2463 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2464 }
2465 else {
2466 return Qtrue;
2467 }
2468}
2469
2470/*
2471 * call-seq:
2472 * Thread.pending_interrupt?(error = nil) -> true/false
2473 *
2474 * Returns whether or not the asynchronous queue is empty.
2475 *
2476 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2477 * this method can be used to determine if there are any deferred events.
2478 *
2479 * If you find this method returns true, then you may finish +:never+ blocks.
2480 *
2481 * For example, the following method processes deferred asynchronous events
2482 * immediately.
2483 *
2484 * def Thread.kick_interrupt_immediately
2485 * Thread.handle_interrupt(Object => :immediate) {
2486 * Thread.pass
2487 * }
2488 * end
2489 *
2490 * If +error+ is given, then check only for +error+ type deferred events.
2491 *
2492 * === Usage
2493 *
2494 * th = Thread.new{
2495 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2496 * while true
2497 * ...
2498 * # reach safe point to invoke interrupt
2499 * if Thread.pending_interrupt?
2500 * Thread.handle_interrupt(Object => :immediate){}
2501 * end
2502 * ...
2503 * end
2504 * }
2505 * }
2506 * ...
2507 * th.raise # stop thread
2508 *
2509 * This example can also be written as the following, which you should use to
2510 * avoid asynchronous interrupts.
2511 *
2512 * flag = true
2513 * th = Thread.new{
2514 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2515 * while true
2516 * ...
2517 * # reach safe point to invoke interrupt
2518 * break if flag == false
2519 * ...
2520 * end
2521 * }
2522 * }
2523 * ...
2524 * flag = false # stop thread
2525 */
2526
2527static VALUE
2528rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2529{
2530 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2531}
2532
2533NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2534
2535static void
2536rb_threadptr_to_kill(rb_thread_t *th)
2537{
2538 VM_ASSERT(GET_THREAD() == th);
2539 rb_threadptr_pending_interrupt_clear(th);
2540 th->status = THREAD_RUNNABLE;
2541 th->to_kill = 1;
2542 th->ec->errinfo = INT2FIX(TAG_FATAL);
2543 EC_JUMP_TAG(th->ec, TAG_FATAL);
2544}
2545
2546static inline rb_atomic_t
2547threadptr_get_interrupts(rb_thread_t *th)
2548{
2549 rb_execution_context_t *ec = th->ec;
2550 rb_atomic_t interrupt;
2551 rb_atomic_t old;
2552
2553 old = ATOMIC_LOAD_RELAXED(ec->interrupt_flag);
2554 do {
2555 interrupt = old;
2556 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2557 } while (old != interrupt);
2558 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2559}
2560
2561static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2562
2563// Execute interrupts on currently running thread
2564// In certain situations, calling this function will raise an exception. Some examples are:
2565// * during VM shutdown (`rb_ractor_terminate_all`)
2566// * Call to Thread#exit for current thread (`rb_thread_kill`)
2567// * Call to Thread#raise for current thread
2568int
2569rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2570{
2571 rb_atomic_t interrupt;
2572 int postponed_job_interrupt = 0;
2573 int ret = FALSE;
2574
2575 VM_ASSERT(GET_THREAD() == th);
2576
2577 if (th->ec->raised_flag) return ret;
2578
2579 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2580 int sig;
2581 int timer_interrupt;
2582 int pending_interrupt;
2583 int trap_interrupt;
2584 int terminate_interrupt;
2585
2586 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2587 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2588 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2589 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2590 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2591
2592 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2593 RB_VM_LOCKING();
2594 }
2595
2596 if (postponed_job_interrupt) {
2597 rb_postponed_job_flush(th->vm);
2598 }
2599
2600 if (trap_interrupt) {
2601 /* signal handling */
2602 if (th == th->vm->ractor.main_thread) {
2603 enum rb_thread_status prev_status = th->status;
2604
2605 th->status = THREAD_RUNNABLE;
2606 {
2607 while ((sig = rb_get_next_signal()) != 0) {
2608 ret |= rb_signal_exec(th, sig);
2609 }
2610 }
2611 th->status = prev_status;
2612 }
2613
2614 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2615 enum rb_thread_status prev_status = th->status;
2616
2617 th->status = THREAD_RUNNABLE;
2618 {
2619 threadptr_interrupt_exec_exec(th);
2620 }
2621 th->status = prev_status;
2622 }
2623 }
2624
2625 /* exception from another thread */
2626 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2627 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2628 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2629 ret = TRUE;
2630
2631 if (UNDEF_P(err)) {
2632 /* no error */
2633 }
2634 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2635 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2636 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2637 terminate_interrupt = 1;
2638 }
2639 else {
2640 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2641 /* the only special exception to be queued across thread */
2642 err = ruby_vm_special_exception_copy(err);
2643 }
2644 /* set runnable if th was slept. */
2645 if (th->status == THREAD_STOPPED ||
2646 th->status == THREAD_STOPPED_FOREVER)
2647 th->status = THREAD_RUNNABLE;
2648 rb_exc_raise(err);
2649 }
2650 }
2651
2652 if (terminate_interrupt) {
2653 rb_threadptr_to_kill(th);
2654 }
2655
2656 if (timer_interrupt) {
2657 uint32_t limits_us = thread_default_quantum_ms * 1000;
2658
2659 if (th->priority > 0)
2660 limits_us <<= th->priority;
2661 else
2662 limits_us >>= -th->priority;
2663
2664 if (th->status == THREAD_RUNNABLE)
2665 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2666
2667 VM_ASSERT(th->ec->cfp);
2668 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2669 0, 0, 0, Qundef);
2670
2671 rb_thread_schedule_limits(limits_us);
2672 }
2673 }
2674 return ret;
2675}
2676
2677void
2678rb_thread_execute_interrupts(VALUE thval)
2679{
2680 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2681}
2682
2683static void
2684rb_threadptr_ready(rb_thread_t *th)
2685{
2686 rb_threadptr_interrupt(th);
2687}
2688
2689static VALUE
2690rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2691{
2692 VALUE exc;
2693
2694 if (rb_threadptr_dead(target_th)) {
2695 return Qnil;
2696 }
2697
2698 if (argc == 0) {
2699 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2700 }
2701 else {
2702 exc = rb_make_exception(argc, argv);
2703 }
2704
2705 /* making an exception object can switch thread,
2706 so we need to check thread deadness again */
2707 if (rb_threadptr_dead(target_th)) {
2708 return Qnil;
2709 }
2710
2711 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2712 rb_threadptr_pending_interrupt_enque(target_th, exc);
2713 rb_threadptr_interrupt(target_th);
2714 return Qnil;
2715}
2716
2717void
2718rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2719{
2720 VALUE argv[2];
2721
2722 argv[0] = rb_eSignal;
2723 argv[1] = INT2FIX(sig);
2724 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2725}
2726
2727void
2728rb_threadptr_signal_exit(rb_thread_t *th)
2729{
2730 VALUE argv[2];
2731
2732 argv[0] = rb_eSystemExit;
2733 argv[1] = rb_str_new2("exit");
2734
2735 // TODO: check signal raise deliverly
2736 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2737}
2738
2739int
2740rb_ec_set_raised(rb_execution_context_t *ec)
2741{
2742 if (ec->raised_flag & RAISED_EXCEPTION) {
2743 return 1;
2744 }
2745 ec->raised_flag |= RAISED_EXCEPTION;
2746 return 0;
2747}
2748
2749int
2750rb_ec_reset_raised(rb_execution_context_t *ec)
2751{
2752 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2753 return 0;
2754 }
2755 ec->raised_flag &= ~RAISED_EXCEPTION;
2756 return 1;
2757}
2758
2759/*
2760 * Thread-safe IO closing mechanism.
2761 *
2762 * When an IO is closed while other threads or fibers are blocked on it, we need to:
2763 * 1. Track and notify all blocking operations through io->blocking_operations
2764 * 2. Ensure only one thread can close at a time using io->closing_ec
2765 * 3. Synchronize cleanup using wakeup_mutex
2766 *
2767 * The close process works as follows:
2768 * - First check if any thread is already closing (io->closing_ec)
2769 * - Set up wakeup_mutex for synchronization
2770 * - Iterate through all blocking operations in io->blocking_operations
2771 * - For each blocked fiber with a scheduler:
2772 * - Notify via rb_fiber_scheduler_fiber_interrupt
2773 * - For each blocked thread without a scheduler:
2774 * - Enqueue IOError via rb_threadptr_pending_interrupt_enque
2775 * - Wake via rb_threadptr_interrupt
2776 * - Wait on wakeup_mutex until all operations are cleaned up
2777 * - Only then clear closing state and allow actual close to proceed
2778 */
2779static VALUE
2780thread_io_close_notify_all(VALUE _io)
2781{
2782 struct rb_io *io = (struct rb_io *)_io;
2783
2784 size_t count = 0;
2785 rb_vm_t *vm = io->closing_ec->thread_ptr->vm;
2786 VALUE error = vm->special_exceptions[ruby_error_stream_closed];
2787
2788 struct rb_io_blocking_operation *blocking_operation;
2789 ccan_list_for_each(rb_io_blocking_operations(io), blocking_operation, list) {
2790 rb_execution_context_t *ec = blocking_operation->ec;
2791
2792 // If the operation is in progress, we need to interrupt it:
2793 if (ec) {
2794 rb_thread_t *thread = ec->thread_ptr;
2795
2796 VALUE result = RUBY_Qundef;
2797 if (thread->scheduler != Qnil) {
2798 result = rb_fiber_scheduler_fiber_interrupt(thread->scheduler, rb_fiberptr_self(ec->fiber_ptr), error);
2799 }
2800
2801 if (result == RUBY_Qundef) {
2802 // If the thread is not the current thread, we need to enqueue an error:
2803 rb_threadptr_pending_interrupt_enque(thread, error);
2804 rb_threadptr_interrupt(thread);
2805 }
2806 }
2807
2808 count += 1;
2809 }
2810
2811 return (VALUE)count;
2812}
2813
2814size_t
2815rb_thread_io_close_interrupt(struct rb_io *io)
2816{
2817 // We guard this operation based on `io->closing_ec` -> only one thread will ever enter this function.
2818 if (io->closing_ec) {
2819 return 0;
2820 }
2821
2822 // If there are no blocking operations, we are done:
2823 if (ccan_list_empty(rb_io_blocking_operations(io))) {
2824 return 0;
2825 }
2826
2827 // Otherwise, we are now closing the IO:
2828 rb_execution_context_t *ec = GET_EC();
2829 io->closing_ec = ec;
2830
2831 // This is used to ensure the correct execution context is woken up after the blocking operation is interrupted:
2832 io->wakeup_mutex = rb_mutex_new();
2833
2834 // We need to use a mutex here as entering the fiber scheduler may cause a context switch:
2835 VALUE result = rb_mutex_synchronize(io->wakeup_mutex, thread_io_close_notify_all, (VALUE)io);
2836
2837 return (size_t)result;
2838}
2839
2840void
2841rb_thread_io_close_wait(struct rb_io* io)
2842{
2843 VALUE wakeup_mutex = io->wakeup_mutex;
2844
2845 if (!RB_TEST(wakeup_mutex)) {
2846 // There was nobody else using this file when we closed it, so we never bothered to allocate a mutex:
2847 return;
2848 }
2849
2850 rb_mutex_lock(wakeup_mutex);
2851 while (!ccan_list_empty(rb_io_blocking_operations(io))) {
2852 rb_mutex_sleep(wakeup_mutex, Qnil);
2853 }
2854 rb_mutex_unlock(wakeup_mutex);
2855
2856 // We are done closing:
2857 io->wakeup_mutex = Qnil;
2858 io->closing_ec = NULL;
2859}
2860
2861void
2862rb_thread_fd_close(int fd)
2863{
2864 rb_warn("rb_thread_fd_close is deprecated (and is now a no-op).");
2865}
2866
2867/*
2868 * call-seq:
2869 * thr.raise
2870 * thr.raise(string)
2871 * thr.raise(exception [, string [, array]])
2872 *
2873 * Raises an exception from the given thread. The caller does not have to be
2874 * +thr+. See Kernel#raise for more information.
2875 *
2876 * Thread.abort_on_exception = true
2877 * a = Thread.new { sleep(200) }
2878 * a.raise("Gotcha")
2879 *
2880 * This will produce:
2881 *
2882 * prog.rb:3: Gotcha (RuntimeError)
2883 * from prog.rb:2:in `initialize'
2884 * from prog.rb:2:in `new'
2885 * from prog.rb:2
2886 */
2887
2888static VALUE
2889thread_raise_m(int argc, VALUE *argv, VALUE self)
2890{
2891 rb_thread_t *target_th = rb_thread_ptr(self);
2892 const rb_thread_t *current_th = GET_THREAD();
2893
2894 threadptr_check_pending_interrupt_queue(target_th);
2895 rb_threadptr_raise(target_th, argc, argv);
2896
2897 /* To perform Thread.current.raise as Kernel.raise */
2898 if (current_th == target_th) {
2899 RUBY_VM_CHECK_INTS(target_th->ec);
2900 }
2901 return Qnil;
2902}
2903
2904
2905/*
2906 * call-seq:
2907 * thr.exit -> thr
2908 * thr.kill -> thr
2909 * thr.terminate -> thr
2910 *
2911 * Terminates +thr+ and schedules another thread to be run, returning
2912 * the terminated Thread. If this is the main thread, or the last
2913 * thread, exits the process.
2914 */
2915
2917rb_thread_kill(VALUE thread)
2918{
2919 rb_thread_t *target_th = rb_thread_ptr(thread);
2920
2921 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2922 return thread;
2923 }
2924 if (target_th == target_th->vm->ractor.main_thread) {
2925 rb_exit(EXIT_SUCCESS);
2926 }
2927
2928 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2929
2930 if (target_th == GET_THREAD()) {
2931 /* kill myself immediately */
2932 rb_threadptr_to_kill(target_th);
2933 }
2934 else {
2935 threadptr_check_pending_interrupt_queue(target_th);
2936 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2937 rb_threadptr_interrupt(target_th);
2938 }
2939
2940 return thread;
2941}
2942
2943int
2944rb_thread_to_be_killed(VALUE thread)
2945{
2946 rb_thread_t *target_th = rb_thread_ptr(thread);
2947
2948 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2949 return TRUE;
2950 }
2951 return FALSE;
2952}
2953
2954/*
2955 * call-seq:
2956 * Thread.kill(thread) -> thread
2957 *
2958 * Causes the given +thread+ to exit, see also Thread::exit.
2959 *
2960 * count = 0
2961 * a = Thread.new { loop { count += 1 } }
2962 * sleep(0.1) #=> 0
2963 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2964 * count #=> 93947
2965 * a.alive? #=> false
2966 */
2967
2968static VALUE
2969rb_thread_s_kill(VALUE obj, VALUE th)
2970{
2971 return rb_thread_kill(th);
2972}
2973
2974
2975/*
2976 * call-seq:
2977 * Thread.exit -> thread
2978 *
2979 * Terminates the currently running thread and schedules another thread to be
2980 * run.
2981 *
2982 * If this thread is already marked to be killed, ::exit returns the Thread.
2983 *
2984 * If this is the main thread, or the last thread, exit the process.
2985 */
2986
2987static VALUE
2988rb_thread_exit(VALUE _)
2989{
2990 rb_thread_t *th = GET_THREAD();
2991 return rb_thread_kill(th->self);
2992}
2993
2994
2995/*
2996 * call-seq:
2997 * thr.wakeup -> thr
2998 *
2999 * Marks a given thread as eligible for scheduling, however it may still
3000 * remain blocked on I/O.
3001 *
3002 * *Note:* This does not invoke the scheduler, see #run for more information.
3003 *
3004 * c = Thread.new { Thread.stop; puts "hey!" }
3005 * sleep 0.1 while c.status!='sleep'
3006 * c.wakeup
3007 * c.join
3008 * #=> "hey!"
3009 */
3010
3012rb_thread_wakeup(VALUE thread)
3013{
3014 if (!RTEST(rb_thread_wakeup_alive(thread))) {
3015 rb_raise(rb_eThreadError, "killed thread");
3016 }
3017 return thread;
3018}
3019
3022{
3023 rb_thread_t *target_th = rb_thread_ptr(thread);
3024 if (target_th->status == THREAD_KILLED) return Qnil;
3025
3026 rb_threadptr_ready(target_th);
3027
3028 if (target_th->status == THREAD_STOPPED ||
3029 target_th->status == THREAD_STOPPED_FOREVER) {
3030 target_th->status = THREAD_RUNNABLE;
3031 }
3032
3033 return thread;
3034}
3035
3036
3037/*
3038 * call-seq:
3039 * thr.run -> thr
3040 *
3041 * Wakes up +thr+, making it eligible for scheduling.
3042 *
3043 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
3044 * sleep 0.1 while a.status!='sleep'
3045 * puts "Got here"
3046 * a.run
3047 * a.join
3048 *
3049 * This will produce:
3050 *
3051 * a
3052 * Got here
3053 * c
3054 *
3055 * See also the instance method #wakeup.
3056 */
3057
3059rb_thread_run(VALUE thread)
3060{
3061 rb_thread_wakeup(thread);
3063 return thread;
3064}
3065
3066
3068rb_thread_stop(void)
3069{
3070 if (rb_thread_alone()) {
3071 rb_raise(rb_eThreadError,
3072 "stopping only thread\n\tnote: use sleep to stop forever");
3073 }
3075 return Qnil;
3076}
3077
3078/*
3079 * call-seq:
3080 * Thread.stop -> nil
3081 *
3082 * Stops execution of the current thread, putting it into a ``sleep'' state,
3083 * and schedules execution of another thread.
3084 *
3085 * a = Thread.new { print "a"; Thread.stop; print "c" }
3086 * sleep 0.1 while a.status!='sleep'
3087 * print "b"
3088 * a.run
3089 * a.join
3090 * #=> "abc"
3091 */
3092
3093static VALUE
3094thread_stop(VALUE _)
3095{
3096 return rb_thread_stop();
3097}
3098
3099/********************************************************************/
3100
3101VALUE
3102rb_thread_list(void)
3103{
3104 // TODO
3105 return rb_ractor_thread_list();
3106}
3107
3108/*
3109 * call-seq:
3110 * Thread.list -> array
3111 *
3112 * Returns an array of Thread objects for all threads that are either runnable
3113 * or stopped.
3114 *
3115 * Thread.new { sleep(200) }
3116 * Thread.new { 1000000.times {|i| i*i } }
3117 * Thread.new { Thread.stop }
3118 * Thread.list.each {|t| p t}
3119 *
3120 * This will produce:
3121 *
3122 * #<Thread:0x401b3e84 sleep>
3123 * #<Thread:0x401b3f38 run>
3124 * #<Thread:0x401b3fb0 sleep>
3125 * #<Thread:0x401bdf4c run>
3126 */
3127
3128static VALUE
3129thread_list(VALUE _)
3130{
3131 return rb_thread_list();
3132}
3133
3136{
3137 return GET_THREAD()->self;
3138}
3139
3140/*
3141 * call-seq:
3142 * Thread.current -> thread
3143 *
3144 * Returns the currently executing thread.
3145 *
3146 * Thread.current #=> #<Thread:0x401bdf4c run>
3147 */
3148
3149static VALUE
3150thread_s_current(VALUE klass)
3151{
3152 return rb_thread_current();
3153}
3154
3156rb_thread_main(void)
3157{
3158 return GET_RACTOR()->threads.main->self;
3159}
3160
3161/*
3162 * call-seq:
3163 * Thread.main -> thread
3164 *
3165 * Returns the main thread.
3166 */
3167
3168static VALUE
3169rb_thread_s_main(VALUE klass)
3170{
3171 return rb_thread_main();
3172}
3173
3174
3175/*
3176 * call-seq:
3177 * Thread.abort_on_exception -> true or false
3178 *
3179 * Returns the status of the global ``abort on exception'' condition.
3180 *
3181 * The default is +false+.
3182 *
3183 * When set to +true+, if any thread is aborted by an exception, the
3184 * raised exception will be re-raised in the main thread.
3185 *
3186 * Can also be specified by the global $DEBUG flag or command line option
3187 * +-d+.
3188 *
3189 * See also ::abort_on_exception=.
3190 *
3191 * There is also an instance level method to set this for a specific thread,
3192 * see #abort_on_exception.
3193 */
3194
3195static VALUE
3196rb_thread_s_abort_exc(VALUE _)
3197{
3198 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3199}
3200
3201
3202/*
3203 * call-seq:
3204 * Thread.abort_on_exception= boolean -> true or false
3205 *
3206 * When set to +true+, if any thread is aborted by an exception, the
3207 * raised exception will be re-raised in the main thread.
3208 * Returns the new state.
3209 *
3210 * Thread.abort_on_exception = true
3211 * t1 = Thread.new do
3212 * puts "In new thread"
3213 * raise "Exception from thread"
3214 * end
3215 * sleep(1)
3216 * puts "not reached"
3217 *
3218 * This will produce:
3219 *
3220 * In new thread
3221 * prog.rb:4: Exception from thread (RuntimeError)
3222 * from prog.rb:2:in `initialize'
3223 * from prog.rb:2:in `new'
3224 * from prog.rb:2
3225 *
3226 * See also ::abort_on_exception.
3227 *
3228 * There is also an instance level method to set this for a specific thread,
3229 * see #abort_on_exception=.
3230 */
3231
3232static VALUE
3233rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3234{
3235 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3236 return val;
3237}
3238
3239
3240/*
3241 * call-seq:
3242 * thr.abort_on_exception -> true or false
3243 *
3244 * Returns the status of the thread-local ``abort on exception'' condition for
3245 * this +thr+.
3246 *
3247 * The default is +false+.
3248 *
3249 * See also #abort_on_exception=.
3250 *
3251 * There is also a class level method to set this for all threads, see
3252 * ::abort_on_exception.
3253 */
3254
3255static VALUE
3256rb_thread_abort_exc(VALUE thread)
3257{
3258 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3259}
3260
3261
3262/*
3263 * call-seq:
3264 * thr.abort_on_exception= boolean -> true or false
3265 *
3266 * When set to +true+, if this +thr+ is aborted by an exception, the
3267 * raised exception will be re-raised in the main thread.
3268 *
3269 * See also #abort_on_exception.
3270 *
3271 * There is also a class level method to set this for all threads, see
3272 * ::abort_on_exception=.
3273 */
3274
3275static VALUE
3276rb_thread_abort_exc_set(VALUE thread, VALUE val)
3277{
3278 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3279 return val;
3280}
3281
3282
3283/*
3284 * call-seq:
3285 * Thread.report_on_exception -> true or false
3286 *
3287 * Returns the status of the global ``report on exception'' condition.
3288 *
3289 * The default is +true+ since Ruby 2.5.
3290 *
3291 * All threads created when this flag is true will report
3292 * a message on $stderr if an exception kills the thread.
3293 *
3294 * Thread.new { 1.times { raise } }
3295 *
3296 * will produce this output on $stderr:
3297 *
3298 * #<Thread:...> terminated with exception (report_on_exception is true):
3299 * Traceback (most recent call last):
3300 * 2: from -e:1:in `block in <main>'
3301 * 1: from -e:1:in `times'
3302 *
3303 * This is done to catch errors in threads early.
3304 * In some cases, you might not want this output.
3305 * There are multiple ways to avoid the extra output:
3306 *
3307 * * If the exception is not intended, the best is to fix the cause of
3308 * the exception so it does not happen anymore.
3309 * * If the exception is intended, it might be better to rescue it closer to
3310 * where it is raised rather then let it kill the Thread.
3311 * * If it is guaranteed the Thread will be joined with Thread#join or
3312 * Thread#value, then it is safe to disable this report with
3313 * <code>Thread.current.report_on_exception = false</code>
3314 * when starting the Thread.
3315 * However, this might handle the exception much later, or not at all
3316 * if the Thread is never joined due to the parent thread being blocked, etc.
3317 *
3318 * See also ::report_on_exception=.
3319 *
3320 * There is also an instance level method to set this for a specific thread,
3321 * see #report_on_exception=.
3322 *
3323 */
3324
3325static VALUE
3326rb_thread_s_report_exc(VALUE _)
3327{
3328 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3329}
3330
3331
3332/*
3333 * call-seq:
3334 * Thread.report_on_exception= boolean -> true or false
3335 *
3336 * Returns the new state.
3337 * When set to +true+, all threads created afterwards will inherit the
3338 * condition and report a message on $stderr if an exception kills a thread:
3339 *
3340 * Thread.report_on_exception = true
3341 * t1 = Thread.new do
3342 * puts "In new thread"
3343 * raise "Exception from thread"
3344 * end
3345 * sleep(1)
3346 * puts "In the main thread"
3347 *
3348 * This will produce:
3349 *
3350 * In new thread
3351 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3352 * Traceback (most recent call last):
3353 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3354 * In the main thread
3355 *
3356 * See also ::report_on_exception.
3357 *
3358 * There is also an instance level method to set this for a specific thread,
3359 * see #report_on_exception=.
3360 */
3361
3362static VALUE
3363rb_thread_s_report_exc_set(VALUE self, VALUE val)
3364{
3365 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3366 return val;
3367}
3368
3369
3370/*
3371 * call-seq:
3372 * Thread.ignore_deadlock -> true or false
3373 *
3374 * Returns the status of the global ``ignore deadlock'' condition.
3375 * The default is +false+, so that deadlock conditions are not ignored.
3376 *
3377 * See also ::ignore_deadlock=.
3378 *
3379 */
3380
3381static VALUE
3382rb_thread_s_ignore_deadlock(VALUE _)
3383{
3384 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3385}
3386
3387
3388/*
3389 * call-seq:
3390 * Thread.ignore_deadlock = boolean -> true or false
3391 *
3392 * Returns the new state.
3393 * When set to +true+, the VM will not check for deadlock conditions.
3394 * It is only useful to set this if your application can break a
3395 * deadlock condition via some other means, such as a signal.
3396 *
3397 * Thread.ignore_deadlock = true
3398 * queue = Thread::Queue.new
3399 *
3400 * trap(:SIGUSR1){queue.push "Received signal"}
3401 *
3402 * # raises fatal error unless ignoring deadlock
3403 * puts queue.pop
3404 *
3405 * See also ::ignore_deadlock.
3406 */
3407
3408static VALUE
3409rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3410{
3411 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3412 return val;
3413}
3414
3415
3416/*
3417 * call-seq:
3418 * thr.report_on_exception -> true or false
3419 *
3420 * Returns the status of the thread-local ``report on exception'' condition for
3421 * this +thr+.
3422 *
3423 * The default value when creating a Thread is the value of
3424 * the global flag Thread.report_on_exception.
3425 *
3426 * See also #report_on_exception=.
3427 *
3428 * There is also a class level method to set this for all new threads, see
3429 * ::report_on_exception=.
3430 */
3431
3432static VALUE
3433rb_thread_report_exc(VALUE thread)
3434{
3435 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3436}
3437
3438
3439/*
3440 * call-seq:
3441 * thr.report_on_exception= boolean -> true or false
3442 *
3443 * When set to +true+, a message is printed on $stderr if an exception
3444 * kills this +thr+. See ::report_on_exception for details.
3445 *
3446 * See also #report_on_exception.
3447 *
3448 * There is also a class level method to set this for all new threads, see
3449 * ::report_on_exception=.
3450 */
3451
3452static VALUE
3453rb_thread_report_exc_set(VALUE thread, VALUE val)
3454{
3455 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3456 return val;
3457}
3458
3459
3460/*
3461 * call-seq:
3462 * thr.group -> thgrp or nil
3463 *
3464 * Returns the ThreadGroup which contains the given thread.
3465 *
3466 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3467 */
3468
3469VALUE
3470rb_thread_group(VALUE thread)
3471{
3472 return rb_thread_ptr(thread)->thgroup;
3473}
3474
3475static const char *
3476thread_status_name(rb_thread_t *th, int detail)
3477{
3478 switch (th->status) {
3479 case THREAD_RUNNABLE:
3480 return th->to_kill ? "aborting" : "run";
3481 case THREAD_STOPPED_FOREVER:
3482 if (detail) return "sleep_forever";
3483 case THREAD_STOPPED:
3484 return "sleep";
3485 case THREAD_KILLED:
3486 return "dead";
3487 default:
3488 return "unknown";
3489 }
3490}
3491
3492static int
3493rb_threadptr_dead(rb_thread_t *th)
3494{
3495 return th->status == THREAD_KILLED;
3496}
3497
3498
3499/*
3500 * call-seq:
3501 * thr.status -> string, false or nil
3502 *
3503 * Returns the status of +thr+.
3504 *
3505 * [<tt>"sleep"</tt>]
3506 * Returned if this thread is sleeping or waiting on I/O
3507 * [<tt>"run"</tt>]
3508 * When this thread is executing
3509 * [<tt>"aborting"</tt>]
3510 * If this thread is aborting
3511 * [+false+]
3512 * When this thread is terminated normally
3513 * [+nil+]
3514 * If terminated with an exception.
3515 *
3516 * a = Thread.new { raise("die now") }
3517 * b = Thread.new { Thread.stop }
3518 * c = Thread.new { Thread.exit }
3519 * d = Thread.new { sleep }
3520 * d.kill #=> #<Thread:0x401b3678 aborting>
3521 * a.status #=> nil
3522 * b.status #=> "sleep"
3523 * c.status #=> false
3524 * d.status #=> "aborting"
3525 * Thread.current.status #=> "run"
3526 *
3527 * See also the instance methods #alive? and #stop?
3528 */
3529
3530static VALUE
3531rb_thread_status(VALUE thread)
3532{
3533 rb_thread_t *target_th = rb_thread_ptr(thread);
3534
3535 if (rb_threadptr_dead(target_th)) {
3536 if (!NIL_P(target_th->ec->errinfo) &&
3537 !FIXNUM_P(target_th->ec->errinfo)) {
3538 return Qnil;
3539 }
3540 else {
3541 return Qfalse;
3542 }
3543 }
3544 else {
3545 return rb_str_new2(thread_status_name(target_th, FALSE));
3546 }
3547}
3548
3549
3550/*
3551 * call-seq:
3552 * thr.alive? -> true or false
3553 *
3554 * Returns +true+ if +thr+ is running or sleeping.
3555 *
3556 * thr = Thread.new { }
3557 * thr.join #=> #<Thread:0x401b3fb0 dead>
3558 * Thread.current.alive? #=> true
3559 * thr.alive? #=> false
3560 *
3561 * See also #stop? and #status.
3562 */
3563
3564static VALUE
3565rb_thread_alive_p(VALUE thread)
3566{
3567 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3568}
3569
3570/*
3571 * call-seq:
3572 * thr.stop? -> true or false
3573 *
3574 * Returns +true+ if +thr+ is dead or sleeping.
3575 *
3576 * a = Thread.new { Thread.stop }
3577 * b = Thread.current
3578 * a.stop? #=> true
3579 * b.stop? #=> false
3580 *
3581 * See also #alive? and #status.
3582 */
3583
3584static VALUE
3585rb_thread_stop_p(VALUE thread)
3586{
3587 rb_thread_t *th = rb_thread_ptr(thread);
3588
3589 if (rb_threadptr_dead(th)) {
3590 return Qtrue;
3591 }
3592 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3593}
3594
3595/*
3596 * call-seq:
3597 * thr.name -> string
3598 *
3599 * show the name of the thread.
3600 */
3601
3602static VALUE
3603rb_thread_getname(VALUE thread)
3604{
3605 return rb_thread_ptr(thread)->name;
3606}
3607
3608/*
3609 * call-seq:
3610 * thr.name=(name) -> string
3611 *
3612 * set given name to the ruby thread.
3613 * On some platform, it may set the name to pthread and/or kernel.
3614 */
3615
3616static VALUE
3617rb_thread_setname(VALUE thread, VALUE name)
3618{
3619 rb_thread_t *target_th = rb_thread_ptr(thread);
3620
3621 if (!NIL_P(name)) {
3622 rb_encoding *enc;
3623 StringValueCStr(name);
3624 enc = rb_enc_get(name);
3625 if (!rb_enc_asciicompat(enc)) {
3626 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3627 rb_enc_name(enc));
3628 }
3629 name = rb_str_new_frozen(name);
3630 }
3631 target_th->name = name;
3632 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3633 native_set_another_thread_name(target_th->nt->thread_id, name);
3634 }
3635 return name;
3636}
3637
3638#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3639/*
3640 * call-seq:
3641 * thr.native_thread_id -> integer
3642 *
3643 * Return the native thread ID which is used by the Ruby thread.
3644 *
3645 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3646 * * On Linux it is TID returned by gettid(2).
3647 * * On macOS it is the system-wide unique integral ID of thread returned
3648 * by pthread_threadid_np(3).
3649 * * On FreeBSD it is the unique integral ID of the thread returned by
3650 * pthread_getthreadid_np(3).
3651 * * On Windows it is the thread identifier returned by GetThreadId().
3652 * * On other platforms, it raises NotImplementedError.
3653 *
3654 * NOTE:
3655 * If the thread is not associated yet or already deassociated with a native
3656 * thread, it returns _nil_.
3657 * If the Ruby implementation uses M:N thread model, the ID may change
3658 * depending on the timing.
3659 */
3660
3661static VALUE
3662rb_thread_native_thread_id(VALUE thread)
3663{
3664 rb_thread_t *target_th = rb_thread_ptr(thread);
3665 if (rb_threadptr_dead(target_th)) return Qnil;
3666 return native_thread_native_thread_id(target_th);
3667}
3668#else
3669# define rb_thread_native_thread_id rb_f_notimplement
3670#endif
3671
3672/*
3673 * call-seq:
3674 * thr.to_s -> string
3675 *
3676 * Dump the name, id, and status of _thr_ to a string.
3677 */
3678
3679static VALUE
3680rb_thread_to_s(VALUE thread)
3681{
3682 VALUE cname = rb_class_path(rb_obj_class(thread));
3683 rb_thread_t *target_th = rb_thread_ptr(thread);
3684 const char *status;
3685 VALUE str, loc;
3686
3687 status = thread_status_name(target_th, TRUE);
3688 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3689 if (!NIL_P(target_th->name)) {
3690 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3691 }
3692 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3693 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3694 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3695 }
3696 rb_str_catf(str, " %s>", status);
3697
3698 return str;
3699}
3700
3701/* variables for recursive traversals */
3702#define recursive_key id__recursive_key__
3703
3704static VALUE
3705threadptr_local_aref(rb_thread_t *th, ID id)
3706{
3707 if (id == recursive_key) {
3708 return th->ec->local_storage_recursive_hash;
3709 }
3710 else {
3711 VALUE val;
3712 struct rb_id_table *local_storage = th->ec->local_storage;
3713
3714 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3715 return val;
3716 }
3717 else {
3718 return Qnil;
3719 }
3720 }
3721}
3722
3724rb_thread_local_aref(VALUE thread, ID id)
3725{
3726 return threadptr_local_aref(rb_thread_ptr(thread), id);
3727}
3728
3729/*
3730 * call-seq:
3731 * thr[sym] -> obj or nil
3732 *
3733 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3734 * if not explicitly inside a Fiber), using either a symbol or a string name.
3735 * If the specified variable does not exist, returns +nil+.
3736 *
3737 * [
3738 * Thread.new { Thread.current["name"] = "A" },
3739 * Thread.new { Thread.current[:name] = "B" },
3740 * Thread.new { Thread.current["name"] = "C" }
3741 * ].each do |th|
3742 * th.join
3743 * puts "#{th.inspect}: #{th[:name]}"
3744 * end
3745 *
3746 * This will produce:
3747 *
3748 * #<Thread:0x00000002a54220 dead>: A
3749 * #<Thread:0x00000002a541a8 dead>: B
3750 * #<Thread:0x00000002a54130 dead>: C
3751 *
3752 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3753 * This confusion did not exist in Ruby 1.8 because
3754 * fibers are only available since Ruby 1.9.
3755 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3756 * following idiom for dynamic scope.
3757 *
3758 * def meth(newvalue)
3759 * begin
3760 * oldvalue = Thread.current[:name]
3761 * Thread.current[:name] = newvalue
3762 * yield
3763 * ensure
3764 * Thread.current[:name] = oldvalue
3765 * end
3766 * end
3767 *
3768 * The idiom may not work as dynamic scope if the methods are thread-local
3769 * and a given block switches fiber.
3770 *
3771 * f = Fiber.new {
3772 * meth(1) {
3773 * Fiber.yield
3774 * }
3775 * }
3776 * meth(2) {
3777 * f.resume
3778 * }
3779 * f.resume
3780 * p Thread.current[:name]
3781 * #=> nil if fiber-local
3782 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3783 *
3784 * For thread-local variables, please see #thread_variable_get and
3785 * #thread_variable_set.
3786 *
3787 */
3788
3789static VALUE
3790rb_thread_aref(VALUE thread, VALUE key)
3791{
3792 ID id = rb_check_id(&key);
3793 if (!id) return Qnil;
3794 return rb_thread_local_aref(thread, id);
3795}
3796
3797/*
3798 * call-seq:
3799 * thr.fetch(sym) -> obj
3800 * thr.fetch(sym) { } -> obj
3801 * thr.fetch(sym, default) -> obj
3802 *
3803 * Returns a fiber-local for the given key. If the key can't be
3804 * found, there are several options: With no other arguments, it will
3805 * raise a KeyError exception; if <i>default</i> is given, then that
3806 * will be returned; if the optional code block is specified, then
3807 * that will be run and its result returned. See Thread#[] and
3808 * Hash#fetch.
3809 */
3810static VALUE
3811rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3812{
3813 VALUE key, val;
3814 ID id;
3815 rb_thread_t *target_th = rb_thread_ptr(self);
3816 int block_given;
3817
3818 rb_check_arity(argc, 1, 2);
3819 key = argv[0];
3820
3821 block_given = rb_block_given_p();
3822 if (block_given && argc == 2) {
3823 rb_warn("block supersedes default value argument");
3824 }
3825
3826 id = rb_check_id(&key);
3827
3828 if (id == recursive_key) {
3829 return target_th->ec->local_storage_recursive_hash;
3830 }
3831 else if (id && target_th->ec->local_storage &&
3832 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3833 return val;
3834 }
3835 else if (block_given) {
3836 return rb_yield(key);
3837 }
3838 else if (argc == 1) {
3839 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3840 }
3841 else {
3842 return argv[1];
3843 }
3844}
3845
3846static VALUE
3847threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3848{
3849 if (id == recursive_key) {
3850 th->ec->local_storage_recursive_hash = val;
3851 return val;
3852 }
3853 else {
3854 struct rb_id_table *local_storage = th->ec->local_storage;
3855
3856 if (NIL_P(val)) {
3857 if (!local_storage) return Qnil;
3858 rb_id_table_delete(local_storage, id);
3859 return Qnil;
3860 }
3861 else {
3862 if (local_storage == NULL) {
3863 th->ec->local_storage = local_storage = rb_id_table_create(0);
3864 }
3865 rb_id_table_insert(local_storage, id, val);
3866 return val;
3867 }
3868 }
3869}
3870
3872rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3873{
3874 if (OBJ_FROZEN(thread)) {
3875 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3876 }
3877
3878 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3879}
3880
3881/*
3882 * call-seq:
3883 * thr[sym] = obj -> obj
3884 *
3885 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3886 * using either a symbol or a string.
3887 *
3888 * See also Thread#[].
3889 *
3890 * For thread-local variables, please see #thread_variable_set and
3891 * #thread_variable_get.
3892 */
3893
3894static VALUE
3895rb_thread_aset(VALUE self, VALUE id, VALUE val)
3896{
3897 return rb_thread_local_aset(self, rb_to_id(id), val);
3898}
3899
3900/*
3901 * call-seq:
3902 * thr.thread_variable_get(key) -> obj or nil
3903 *
3904 * Returns the value of a thread local variable that has been set. Note that
3905 * these are different than fiber local values. For fiber local values,
3906 * please see Thread#[] and Thread#[]=.
3907 *
3908 * Thread local values are carried along with threads, and do not respect
3909 * fibers. For example:
3910 *
3911 * Thread.new {
3912 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3913 * Thread.current["foo"] = "bar" # set a fiber local
3914 *
3915 * Fiber.new {
3916 * Fiber.yield [
3917 * Thread.current.thread_variable_get("foo"), # get the thread local
3918 * Thread.current["foo"], # get the fiber local
3919 * ]
3920 * }.resume
3921 * }.join.value # => ['bar', nil]
3922 *
3923 * The value "bar" is returned for the thread local, where nil is returned
3924 * for the fiber local. The fiber is executed in the same thread, so the
3925 * thread local values are available.
3926 */
3927
3928static VALUE
3929rb_thread_variable_get(VALUE thread, VALUE key)
3930{
3931 VALUE locals;
3932 VALUE symbol = rb_to_symbol(key);
3933
3934 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3935 return Qnil;
3936 }
3937 locals = rb_thread_local_storage(thread);
3938 return rb_hash_aref(locals, symbol);
3939}
3940
3941/*
3942 * call-seq:
3943 * thr.thread_variable_set(key, value)
3944 *
3945 * Sets a thread local with +key+ to +value+. Note that these are local to
3946 * threads, and not to fibers. Please see Thread#thread_variable_get and
3947 * Thread#[] for more information.
3948 */
3949
3950static VALUE
3951rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3952{
3953 VALUE locals;
3954
3955 if (OBJ_FROZEN(thread)) {
3956 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3957 }
3958
3959 locals = rb_thread_local_storage(thread);
3960 return rb_hash_aset(locals, rb_to_symbol(key), val);
3961}
3962
3963/*
3964 * call-seq:
3965 * thr.key?(sym) -> true or false
3966 *
3967 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3968 * variable.
3969 *
3970 * me = Thread.current
3971 * me[:oliver] = "a"
3972 * me.key?(:oliver) #=> true
3973 * me.key?(:stanley) #=> false
3974 */
3975
3976static VALUE
3977rb_thread_key_p(VALUE self, VALUE key)
3978{
3979 VALUE val;
3980 ID id = rb_check_id(&key);
3981 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3982
3983 if (!id || local_storage == NULL) {
3984 return Qfalse;
3985 }
3986 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3987}
3988
3989static enum rb_id_table_iterator_result
3990thread_keys_i(ID key, VALUE value, void *ary)
3991{
3992 rb_ary_push((VALUE)ary, ID2SYM(key));
3993 return ID_TABLE_CONTINUE;
3994}
3995
3997rb_thread_alone(void)
3998{
3999 // TODO
4000 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
4001}
4002
4003/*
4004 * call-seq:
4005 * thr.keys -> array
4006 *
4007 * Returns an array of the names of the fiber-local variables (as Symbols).
4008 *
4009 * thr = Thread.new do
4010 * Thread.current[:cat] = 'meow'
4011 * Thread.current["dog"] = 'woof'
4012 * end
4013 * thr.join #=> #<Thread:0x401b3f10 dead>
4014 * thr.keys #=> [:dog, :cat]
4015 */
4016
4017static VALUE
4018rb_thread_keys(VALUE self)
4019{
4020 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4021 VALUE ary = rb_ary_new();
4022
4023 if (local_storage) {
4024 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
4025 }
4026 return ary;
4027}
4028
4029static int
4030keys_i(VALUE key, VALUE value, VALUE ary)
4031{
4032 rb_ary_push(ary, key);
4033 return ST_CONTINUE;
4034}
4035
4036/*
4037 * call-seq:
4038 * thr.thread_variables -> array
4039 *
4040 * Returns an array of the names of the thread-local variables (as Symbols).
4041 *
4042 * thr = Thread.new do
4043 * Thread.current.thread_variable_set(:cat, 'meow')
4044 * Thread.current.thread_variable_set("dog", 'woof')
4045 * end
4046 * thr.join #=> #<Thread:0x401b3f10 dead>
4047 * thr.thread_variables #=> [:dog, :cat]
4048 *
4049 * Note that these are not fiber local variables. Please see Thread#[] and
4050 * Thread#thread_variable_get for more details.
4051 */
4052
4053static VALUE
4054rb_thread_variables(VALUE thread)
4055{
4056 VALUE locals;
4057 VALUE ary;
4058
4059 ary = rb_ary_new();
4060 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4061 return ary;
4062 }
4063 locals = rb_thread_local_storage(thread);
4064 rb_hash_foreach(locals, keys_i, ary);
4065
4066 return ary;
4067}
4068
4069/*
4070 * call-seq:
4071 * thr.thread_variable?(key) -> true or false
4072 *
4073 * Returns +true+ if the given string (or symbol) exists as a thread-local
4074 * variable.
4075 *
4076 * me = Thread.current
4077 * me.thread_variable_set(:oliver, "a")
4078 * me.thread_variable?(:oliver) #=> true
4079 * me.thread_variable?(:stanley) #=> false
4080 *
4081 * Note that these are not fiber local variables. Please see Thread#[] and
4082 * Thread#thread_variable_get for more details.
4083 */
4084
4085static VALUE
4086rb_thread_variable_p(VALUE thread, VALUE key)
4087{
4088 VALUE locals;
4089 VALUE symbol = rb_to_symbol(key);
4090
4091 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4092 return Qfalse;
4093 }
4094 locals = rb_thread_local_storage(thread);
4095
4096 return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
4097}
4098
4099/*
4100 * call-seq:
4101 * thr.priority -> integer
4102 *
4103 * Returns the priority of <i>thr</i>. Default is inherited from the
4104 * current thread which creating the new thread, or zero for the
4105 * initial main thread; higher-priority thread will run more frequently
4106 * than lower-priority threads (but lower-priority threads can also run).
4107 *
4108 * This is just hint for Ruby thread scheduler. It may be ignored on some
4109 * platform.
4110 *
4111 * Thread.current.priority #=> 0
4112 */
4113
4114static VALUE
4115rb_thread_priority(VALUE thread)
4116{
4117 return INT2NUM(rb_thread_ptr(thread)->priority);
4118}
4119
4120
4121/*
4122 * call-seq:
4123 * thr.priority= integer -> thr
4124 *
4125 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
4126 * will run more frequently than lower-priority threads (but lower-priority
4127 * threads can also run).
4128 *
4129 * This is just hint for Ruby thread scheduler. It may be ignored on some
4130 * platform.
4131 *
4132 * count1 = count2 = 0
4133 * a = Thread.new do
4134 * loop { count1 += 1 }
4135 * end
4136 * a.priority = -1
4137 *
4138 * b = Thread.new do
4139 * loop { count2 += 1 }
4140 * end
4141 * b.priority = -2
4142 * sleep 1 #=> 1
4143 * count1 #=> 622504
4144 * count2 #=> 5832
4145 */
4146
4147static VALUE
4148rb_thread_priority_set(VALUE thread, VALUE prio)
4149{
4150 rb_thread_t *target_th = rb_thread_ptr(thread);
4151 int priority;
4152
4153#if USE_NATIVE_THREAD_PRIORITY
4154 target_th->priority = NUM2INT(prio);
4155 native_thread_apply_priority(th);
4156#else
4157 priority = NUM2INT(prio);
4158 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4159 priority = RUBY_THREAD_PRIORITY_MAX;
4160 }
4161 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4162 priority = RUBY_THREAD_PRIORITY_MIN;
4163 }
4164 target_th->priority = (int8_t)priority;
4165#endif
4166 return INT2NUM(target_th->priority);
4167}
4168
4169/* for IO */
4170
4171#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4172
4173/*
4174 * several Unix platforms support file descriptors bigger than FD_SETSIZE
4175 * in select(2) system call.
4176 *
4177 * - Linux 2.2.12 (?)
4178 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4179 * select(2) documents how to allocate fd_set dynamically.
4180 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4181 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4182 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4183 * select(2) documents how to allocate fd_set dynamically.
4184 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4185 * - Solaris 8 has select_large_fdset
4186 * - Mac OS X 10.7 (Lion)
4187 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4188 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4189 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4190 *
4191 * When fd_set is not big enough to hold big file descriptors,
4192 * it should be allocated dynamically.
4193 * Note that this assumes fd_set is structured as bitmap.
4194 *
4195 * rb_fd_init allocates the memory.
4196 * rb_fd_term free the memory.
4197 * rb_fd_set may re-allocates bitmap.
4198 *
4199 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4200 */
4201
4202void
4204{
4205 fds->maxfd = 0;
4206 fds->fdset = ALLOC(fd_set);
4207 FD_ZERO(fds->fdset);
4208}
4209
4210void
4211rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4212{
4213 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4214
4215 if (size < sizeof(fd_set))
4216 size = sizeof(fd_set);
4217 dst->maxfd = src->maxfd;
4218 dst->fdset = xmalloc(size);
4219 memcpy(dst->fdset, src->fdset, size);
4220}
4221
4222void
4224{
4225 xfree(fds->fdset);
4226 fds->maxfd = 0;
4227 fds->fdset = 0;
4228}
4229
4230void
4232{
4233 if (fds->fdset)
4234 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4235}
4236
4237static void
4238rb_fd_resize(int n, rb_fdset_t *fds)
4239{
4240 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4241 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4242
4243 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4244 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4245
4246 if (m > o) {
4247 fds->fdset = xrealloc(fds->fdset, m);
4248 memset((char *)fds->fdset + o, 0, m - o);
4249 }
4250 if (n >= fds->maxfd) fds->maxfd = n + 1;
4251}
4252
4253void
4254rb_fd_set(int n, rb_fdset_t *fds)
4255{
4256 rb_fd_resize(n, fds);
4257 FD_SET(n, fds->fdset);
4258}
4259
4260void
4261rb_fd_clr(int n, rb_fdset_t *fds)
4262{
4263 if (n >= fds->maxfd) return;
4264 FD_CLR(n, fds->fdset);
4265}
4266
4267int
4268rb_fd_isset(int n, const rb_fdset_t *fds)
4269{
4270 if (n >= fds->maxfd) return 0;
4271 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4272}
4273
4274void
4275rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4276{
4277 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4278
4279 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4280 dst->maxfd = max;
4281 dst->fdset = xrealloc(dst->fdset, size);
4282 memcpy(dst->fdset, src, size);
4283}
4284
4285void
4286rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4287{
4288 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4289
4290 if (size < sizeof(fd_set))
4291 size = sizeof(fd_set);
4292 dst->maxfd = src->maxfd;
4293 dst->fdset = xrealloc(dst->fdset, size);
4294 memcpy(dst->fdset, src->fdset, size);
4295}
4296
4297int
4298rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4299{
4300 fd_set *r = NULL, *w = NULL, *e = NULL;
4301 if (readfds) {
4302 rb_fd_resize(n - 1, readfds);
4303 r = rb_fd_ptr(readfds);
4304 }
4305 if (writefds) {
4306 rb_fd_resize(n - 1, writefds);
4307 w = rb_fd_ptr(writefds);
4308 }
4309 if (exceptfds) {
4310 rb_fd_resize(n - 1, exceptfds);
4311 e = rb_fd_ptr(exceptfds);
4312 }
4313 return select(n, r, w, e, timeout);
4314}
4315
4316#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4317
4318#undef FD_ZERO
4319#undef FD_SET
4320#undef FD_CLR
4321#undef FD_ISSET
4322
4323#define FD_ZERO(f) rb_fd_zero(f)
4324#define FD_SET(i, f) rb_fd_set((i), (f))
4325#define FD_CLR(i, f) rb_fd_clr((i), (f))
4326#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4327
4328#elif defined(_WIN32)
4329
4330void
4332{
4333 set->capa = FD_SETSIZE;
4334 set->fdset = ALLOC(fd_set);
4335 FD_ZERO(set->fdset);
4336}
4337
4338void
4339rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4340{
4341 rb_fd_init(dst);
4342 rb_fd_dup(dst, src);
4343}
4344
4345void
4347{
4348 xfree(set->fdset);
4349 set->fdset = NULL;
4350 set->capa = 0;
4351}
4352
4353void
4354rb_fd_set(int fd, rb_fdset_t *set)
4355{
4356 unsigned int i;
4357 SOCKET s = rb_w32_get_osfhandle(fd);
4358
4359 for (i = 0; i < set->fdset->fd_count; i++) {
4360 if (set->fdset->fd_array[i] == s) {
4361 return;
4362 }
4363 }
4364 if (set->fdset->fd_count >= (unsigned)set->capa) {
4365 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4366 set->fdset =
4367 rb_xrealloc_mul_add(
4368 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4369 }
4370 set->fdset->fd_array[set->fdset->fd_count++] = s;
4371}
4372
4373#undef FD_ZERO
4374#undef FD_SET
4375#undef FD_CLR
4376#undef FD_ISSET
4377
4378#define FD_ZERO(f) rb_fd_zero(f)
4379#define FD_SET(i, f) rb_fd_set((i), (f))
4380#define FD_CLR(i, f) rb_fd_clr((i), (f))
4381#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4382
4383#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4384
4385#endif
4386
4387#ifndef rb_fd_no_init
4388#define rb_fd_no_init(fds) (void)(fds)
4389#endif
4390
4391static int
4392wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4393{
4394 int r = *result;
4395 if (r < 0) {
4396 switch (errnum) {
4397 case EINTR:
4398#ifdef ERESTART
4399 case ERESTART:
4400#endif
4401 *result = 0;
4402 if (rel && hrtime_update_expire(rel, end)) {
4403 *rel = 0;
4404 }
4405 return TRUE;
4406 }
4407 return FALSE;
4408 }
4409 else if (r == 0) {
4410 /* check for spurious wakeup */
4411 if (rel) {
4412 return !hrtime_update_expire(rel, end);
4413 }
4414 return TRUE;
4415 }
4416 return FALSE;
4417}
4419struct select_set {
4420 int max;
4421 rb_thread_t *th;
4422 rb_fdset_t *rset;
4423 rb_fdset_t *wset;
4424 rb_fdset_t *eset;
4425 rb_fdset_t orig_rset;
4426 rb_fdset_t orig_wset;
4427 rb_fdset_t orig_eset;
4428 struct timeval *timeout;
4429};
4430
4431static VALUE
4432select_set_free(VALUE p)
4433{
4434 struct select_set *set = (struct select_set *)p;
4435
4436 rb_fd_term(&set->orig_rset);
4437 rb_fd_term(&set->orig_wset);
4438 rb_fd_term(&set->orig_eset);
4439
4440 return Qfalse;
4441}
4442
4443static VALUE
4444do_select(VALUE p)
4445{
4446 struct select_set *set = (struct select_set *)p;
4447 volatile int result = 0;
4448 int lerrno;
4449 rb_hrtime_t *to, rel, end = 0;
4450
4451 timeout_prepare(&to, &rel, &end, set->timeout);
4452 volatile rb_hrtime_t endtime = end;
4453#define restore_fdset(dst, src) \
4454 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4455#define do_select_update() \
4456 (restore_fdset(set->rset, &set->orig_rset), \
4457 restore_fdset(set->wset, &set->orig_wset), \
4458 restore_fdset(set->eset, &set->orig_eset), \
4459 TRUE)
4460
4461 do {
4462 lerrno = 0;
4463
4464 BLOCKING_REGION(set->th, {
4465 struct timeval tv;
4466
4467 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4468 result = native_fd_select(set->max,
4469 set->rset, set->wset, set->eset,
4470 rb_hrtime2timeval(&tv, to), set->th);
4471 if (result < 0) lerrno = errno;
4472 }
4473 }, ubf_select, set->th, TRUE);
4474
4475 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4476 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4477
4478 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4479
4480 if (result < 0) {
4481 errno = lerrno;
4482 }
4483
4484 return (VALUE)result;
4485}
4486
4488rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4489 struct timeval *timeout)
4490{
4491 struct select_set set;
4492
4493 set.th = GET_THREAD();
4494 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4495 set.max = max;
4496 set.rset = read;
4497 set.wset = write;
4498 set.eset = except;
4499 set.timeout = timeout;
4500
4501 if (!set.rset && !set.wset && !set.eset) {
4502 if (!timeout) {
4504 return 0;
4505 }
4506 rb_thread_wait_for(*timeout);
4507 return 0;
4508 }
4509
4510#define fd_init_copy(f) do { \
4511 if (set.f) { \
4512 rb_fd_resize(set.max - 1, set.f); \
4513 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4514 rb_fd_init_copy(&set.orig_##f, set.f); \
4515 } \
4516 } \
4517 else { \
4518 rb_fd_no_init(&set.orig_##f); \
4519 } \
4520 } while (0)
4521 fd_init_copy(rset);
4522 fd_init_copy(wset);
4523 fd_init_copy(eset);
4524#undef fd_init_copy
4525
4526 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4527}
4528
4529#ifdef USE_POLL
4530
4531/* The same with linux kernel. TODO: make platform independent definition. */
4532#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4533#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4534#define POLLEX_SET (POLLPRI)
4535
4536#ifndef POLLERR_SET /* defined for FreeBSD for now */
4537# define POLLERR_SET (0)
4538#endif
4539
4540static int
4541wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4542 rb_hrtime_t *const to, volatile int *lerrno)
4543{
4544 struct timespec ts;
4545 volatile int result = 0;
4546
4547 *lerrno = 0;
4548 BLOCKING_REGION(th, {
4549 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4550 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4551 if (result < 0) *lerrno = errno;
4552 }
4553 }, ubf_select, th, TRUE);
4554 return result;
4555}
4556
4557/*
4558 * returns a mask of events
4559 */
4560static int
4561thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4562{
4563 struct pollfd fds[1] = {{
4564 .fd = fd,
4565 .events = (short)events,
4566 .revents = 0,
4567 }};
4568 volatile int result = 0;
4569 nfds_t nfds;
4570 struct rb_io_blocking_operation blocking_operation;
4571 enum ruby_tag_type state;
4572 volatile int lerrno;
4573
4574 rb_execution_context_t *ec = GET_EC();
4575 rb_thread_t *th = rb_ec_thread_ptr(ec);
4576
4577 if (io) {
4578 blocking_operation.ec = ec;
4579 rb_io_blocking_operation_enter(io, &blocking_operation);
4580 }
4581
4582 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4583 // fd is readable
4584 state = 0;
4585 fds[0].revents = events;
4586 errno = 0;
4587 }
4588 else {
4589 EC_PUSH_TAG(ec);
4590 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4591 rb_hrtime_t *to, rel, end = 0;
4592 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4593 timeout_prepare(&to, &rel, &end, timeout);
4594 do {
4595 nfds = numberof(fds);
4596 result = wait_for_single_fd_blocking_region(th, fds, nfds, to, &lerrno);
4597
4598 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4599 } while (wait_retryable(&result, lerrno, to, end));
4600
4601 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4602 }
4603
4604 EC_POP_TAG();
4605 }
4606
4607 if (io) {
4608 rb_io_blocking_operation_exit(io, &blocking_operation);
4609 }
4610
4611 if (state) {
4612 EC_JUMP_TAG(ec, state);
4613 }
4614
4615 if (result < 0) {
4616 errno = lerrno;
4617 return -1;
4618 }
4619
4620 if (fds[0].revents & POLLNVAL) {
4621 errno = EBADF;
4622 return -1;
4623 }
4624
4625 /*
4626 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4627 * Therefore we need to fix it up.
4628 */
4629 result = 0;
4630 if (fds[0].revents & POLLIN_SET)
4631 result |= RB_WAITFD_IN;
4632 if (fds[0].revents & POLLOUT_SET)
4633 result |= RB_WAITFD_OUT;
4634 if (fds[0].revents & POLLEX_SET)
4635 result |= RB_WAITFD_PRI;
4636
4637 /* all requested events are ready if there is an error */
4638 if (fds[0].revents & POLLERR_SET)
4639 result |= events;
4640
4641 return result;
4642}
4643#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4644struct select_args {
4645 struct rb_io *io;
4646 struct rb_io_blocking_operation *blocking_operation;
4647
4648 union {
4649 int fd;
4650 int error;
4651 } as;
4652 rb_fdset_t *read;
4653 rb_fdset_t *write;
4654 rb_fdset_t *except;
4655 struct timeval *tv;
4656};
4657
4658static VALUE
4659select_single(VALUE ptr)
4660{
4661 struct select_args *args = (struct select_args *)ptr;
4662 int r;
4663
4664 r = rb_thread_fd_select(args->as.fd + 1,
4665 args->read, args->write, args->except, args->tv);
4666 if (r == -1)
4667 args->as.error = errno;
4668 if (r > 0) {
4669 r = 0;
4670 if (args->read && rb_fd_isset(args->as.fd, args->read))
4671 r |= RB_WAITFD_IN;
4672 if (args->write && rb_fd_isset(args->as.fd, args->write))
4673 r |= RB_WAITFD_OUT;
4674 if (args->except && rb_fd_isset(args->as.fd, args->except))
4675 r |= RB_WAITFD_PRI;
4676 }
4677 return (VALUE)r;
4678}
4679
4680static VALUE
4681select_single_cleanup(VALUE ptr)
4682{
4683 struct select_args *args = (struct select_args *)ptr;
4684
4685 if (args->blocking_operation) {
4686 rb_io_blocking_operation_exit(args->io, args->blocking_operation);
4687 }
4688
4689 if (args->read) rb_fd_term(args->read);
4690 if (args->write) rb_fd_term(args->write);
4691 if (args->except) rb_fd_term(args->except);
4692
4693 return (VALUE)-1;
4694}
4695
4696static rb_fdset_t *
4697init_set_fd(int fd, rb_fdset_t *fds)
4698{
4699 if (fd < 0) {
4700 return 0;
4701 }
4702 rb_fd_init(fds);
4703 rb_fd_set(fd, fds);
4704
4705 return fds;
4706}
4707
4708static int
4709thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4710{
4711 rb_fdset_t rfds, wfds, efds;
4712 struct select_args args;
4713 VALUE ptr = (VALUE)&args;
4714
4715 struct rb_io_blocking_operation blocking_operation;
4716 if (io) {
4717 args.io = io;
4718 blocking_operation.ec = GET_EC();
4719 rb_io_blocking_operation_enter(io, &blocking_operation);
4720 args.blocking_operation = &blocking_operation;
4721 }
4722 else {
4723 args.io = NULL;
4724 blocking_operation.ec = NULL;
4725 args.blocking_operation = NULL;
4726 }
4727
4728 args.as.fd = fd;
4729 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4730 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4731 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4732 args.tv = timeout;
4733
4734 int result = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4735 if (result == -1)
4736 errno = args.as.error;
4737
4738 return result;
4739}
4740#endif /* ! USE_POLL */
4741
4742int
4743rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4744{
4745 return thread_io_wait(NULL, fd, events, timeout);
4746}
4747
4748int
4749rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout)
4750{
4751 return thread_io_wait(io, io->fd, events, timeout);
4752}
4753
4754/*
4755 * for GC
4756 */
4757
4758#ifdef USE_CONSERVATIVE_STACK_END
4759void
4760rb_gc_set_stack_end(VALUE **stack_end_p)
4761{
4762 VALUE stack_end;
4763COMPILER_WARNING_PUSH
4764#ifdef __GNUC__
4765COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4766#endif
4767 *stack_end_p = &stack_end;
4768COMPILER_WARNING_POP
4769}
4770#endif
4771
4772/*
4773 *
4774 */
4775
4776void
4777rb_threadptr_check_signal(rb_thread_t *mth)
4778{
4779 /* mth must be main_thread */
4780 if (rb_signal_buff_size() > 0) {
4781 /* wakeup main thread */
4782 threadptr_trap_interrupt(mth);
4783 }
4784}
4785
4786static void
4787async_bug_fd(const char *mesg, int errno_arg, int fd)
4788{
4789 char buff[64];
4790 size_t n = strlcpy(buff, mesg, sizeof(buff));
4791 if (n < sizeof(buff)-3) {
4792 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4793 }
4794 rb_async_bug_errno(buff, errno_arg);
4795}
4796
4797/* VM-dependent API is not available for this function */
4798static int
4799consume_communication_pipe(int fd)
4800{
4801#if USE_EVENTFD
4802 uint64_t buff[1];
4803#else
4804 /* buffer can be shared because no one refers to them. */
4805 static char buff[1024];
4806#endif
4807 ssize_t result;
4808 int ret = FALSE; /* for rb_sigwait_sleep */
4809
4810 while (1) {
4811 result = read(fd, buff, sizeof(buff));
4812#if USE_EVENTFD
4813 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4814#else
4815 RUBY_DEBUG_LOG("result:%d", (int)result);
4816#endif
4817 if (result > 0) {
4818 ret = TRUE;
4819 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4820 return ret;
4821 }
4822 }
4823 else if (result == 0) {
4824 return ret;
4825 }
4826 else if (result < 0) {
4827 int e = errno;
4828 switch (e) {
4829 case EINTR:
4830 continue; /* retry */
4831 case EAGAIN:
4832#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4833 case EWOULDBLOCK:
4834#endif
4835 return ret;
4836 default:
4837 async_bug_fd("consume_communication_pipe: read", e, fd);
4838 }
4839 }
4840 }
4841}
4842
4843void
4844rb_thread_stop_timer_thread(void)
4845{
4846 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4847 native_reset_timer_thread();
4848 }
4849}
4850
4851void
4852rb_thread_reset_timer_thread(void)
4853{
4854 native_reset_timer_thread();
4855}
4856
4857void
4858rb_thread_start_timer_thread(void)
4859{
4860 system_working = 1;
4861 rb_thread_create_timer_thread();
4862}
4863
4864static int
4865clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4866{
4867 int i;
4868 VALUE coverage = (VALUE)val;
4869 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4870 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4871
4872 if (lines) {
4873 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4874 rb_ary_clear(lines);
4875 }
4876 else {
4877 int i;
4878 for (i = 0; i < RARRAY_LEN(lines); i++) {
4879 if (RARRAY_AREF(lines, i) != Qnil)
4880 RARRAY_ASET(lines, i, INT2FIX(0));
4881 }
4882 }
4883 }
4884 if (branches) {
4885 VALUE counters = RARRAY_AREF(branches, 1);
4886 for (i = 0; i < RARRAY_LEN(counters); i++) {
4887 RARRAY_ASET(counters, i, INT2FIX(0));
4888 }
4889 }
4890
4891 return ST_CONTINUE;
4892}
4893
4894void
4895rb_clear_coverages(void)
4896{
4897 VALUE coverages = rb_get_coverages();
4898 if (RTEST(coverages)) {
4899 rb_hash_foreach(coverages, clear_coverage_i, 0);
4900 }
4901}
4902
4903#if defined(HAVE_WORKING_FORK)
4904
4905static void
4906rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4907{
4908 rb_thread_t *i = 0;
4909 rb_vm_t *vm = th->vm;
4910 rb_ractor_t *r = th->ractor;
4911 vm->ractor.main_ractor = r;
4912 vm->ractor.main_thread = th;
4913 r->threads.main = th;
4914 r->status_ = ractor_created;
4915
4916 thread_sched_atfork(TH_SCHED(th));
4917 ubf_list_atfork();
4918
4919 // OK. Only this thread accesses:
4920 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4921 if (r != vm->ractor.main_ractor) {
4922 rb_ractor_terminate_atfork(vm, r);
4923 }
4924 ccan_list_for_each(&r->threads.set, i, lt_node) {
4925 atfork(i, th);
4926 }
4927 }
4928 rb_vm_living_threads_init(vm);
4929
4930 rb_ractor_atfork(vm, th);
4931 rb_vm_postponed_job_atfork();
4932
4933 /* may be held by any thread in parent */
4934 rb_native_mutex_initialize(&th->interrupt_lock);
4935 ccan_list_head_init(&th->interrupt_exec_tasks);
4936
4937 vm->fork_gen++;
4938 rb_ractor_sleeper_threads_clear(th->ractor);
4939 rb_clear_coverages();
4940
4941 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4942 rb_thread_reset_timer_thread();
4943 rb_thread_start_timer_thread();
4944
4945 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4946 VM_ASSERT(vm->ractor.cnt == 1);
4947}
4948
4949static void
4950terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4951{
4952 if (th != current_th) {
4953 rb_native_mutex_initialize(&th->interrupt_lock);
4954 rb_mutex_abandon_keeping_mutexes(th);
4955 rb_mutex_abandon_locking_mutex(th);
4956 thread_cleanup_func(th, TRUE);
4957 }
4958}
4959
4960void rb_fiber_atfork(rb_thread_t *);
4961void
4962rb_thread_atfork(void)
4963{
4964 rb_thread_t *th = GET_THREAD();
4965 rb_threadptr_pending_interrupt_clear(th);
4966 rb_thread_atfork_internal(th, terminate_atfork_i);
4967 th->join_list = NULL;
4968 rb_fiber_atfork(th);
4969
4970 /* We don't want reproduce CVE-2003-0900. */
4972}
4973
4974static void
4975terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4976{
4977 if (th != current_th) {
4978 thread_cleanup_func_before_exec(th);
4979 }
4980}
4981
4982void
4984{
4985 rb_thread_t *th = GET_THREAD();
4986 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4987}
4988#else
4989void
4990rb_thread_atfork(void)
4991{
4992}
4993
4994void
4996{
4997}
4998#endif
5000struct thgroup {
5001 int enclosed;
5002};
5003
5004static const rb_data_type_t thgroup_data_type = {
5005 "thgroup",
5006 {
5007 0,
5009 NULL, // No external memory to report
5010 },
5011 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
5012};
5013
5014/*
5015 * Document-class: ThreadGroup
5016 *
5017 * ThreadGroup provides a means of keeping track of a number of threads as a
5018 * group.
5019 *
5020 * A given Thread object can only belong to one ThreadGroup at a time; adding
5021 * a thread to a new group will remove it from any previous group.
5022 *
5023 * Newly created threads belong to the same group as the thread from which they
5024 * were created.
5025 */
5026
5027/*
5028 * Document-const: Default
5029 *
5030 * The default ThreadGroup created when Ruby starts; all Threads belong to it
5031 * by default.
5032 */
5033static VALUE
5034thgroup_s_alloc(VALUE klass)
5035{
5036 VALUE group;
5037 struct thgroup *data;
5038
5039 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
5040 data->enclosed = 0;
5041
5042 return group;
5043}
5044
5045/*
5046 * call-seq:
5047 * thgrp.list -> array
5048 *
5049 * Returns an array of all existing Thread objects that belong to this group.
5050 *
5051 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
5052 */
5053
5054static VALUE
5055thgroup_list(VALUE group)
5056{
5057 VALUE ary = rb_ary_new();
5058 rb_thread_t *th = 0;
5059 rb_ractor_t *r = GET_RACTOR();
5060
5061 ccan_list_for_each(&r->threads.set, th, lt_node) {
5062 if (th->thgroup == group) {
5063 rb_ary_push(ary, th->self);
5064 }
5065 }
5066 return ary;
5067}
5068
5069
5070/*
5071 * call-seq:
5072 * thgrp.enclose -> thgrp
5073 *
5074 * Prevents threads from being added to or removed from the receiving
5075 * ThreadGroup.
5076 *
5077 * New threads can still be started in an enclosed ThreadGroup.
5078 *
5079 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
5080 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
5081 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
5082 * tg.add thr
5083 * #=> ThreadError: can't move from the enclosed thread group
5084 */
5085
5086static VALUE
5087thgroup_enclose(VALUE group)
5088{
5089 struct thgroup *data;
5090
5091 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5092 data->enclosed = 1;
5093
5094 return group;
5095}
5096
5097
5098/*
5099 * call-seq:
5100 * thgrp.enclosed? -> true or false
5101 *
5102 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
5103 */
5104
5105static VALUE
5106thgroup_enclosed_p(VALUE group)
5107{
5108 struct thgroup *data;
5109
5110 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5111 return RBOOL(data->enclosed);
5112}
5113
5114
5115/*
5116 * call-seq:
5117 * thgrp.add(thread) -> thgrp
5118 *
5119 * Adds the given +thread+ to this group, removing it from any other
5120 * group to which it may have previously been a member.
5121 *
5122 * puts "Initial group is #{ThreadGroup::Default.list}"
5123 * tg = ThreadGroup.new
5124 * t1 = Thread.new { sleep }
5125 * t2 = Thread.new { sleep }
5126 * puts "t1 is #{t1}"
5127 * puts "t2 is #{t2}"
5128 * tg.add(t1)
5129 * puts "Initial group now #{ThreadGroup::Default.list}"
5130 * puts "tg group now #{tg.list}"
5131 *
5132 * This will produce:
5133 *
5134 * Initial group is #<Thread:0x401bdf4c>
5135 * t1 is #<Thread:0x401b3c90>
5136 * t2 is #<Thread:0x401b3c18>
5137 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
5138 * tg group now #<Thread:0x401b3c90>
5139 */
5140
5141static VALUE
5142thgroup_add(VALUE group, VALUE thread)
5143{
5144 rb_thread_t *target_th = rb_thread_ptr(thread);
5145 struct thgroup *data;
5146
5147 if (OBJ_FROZEN(group)) {
5148 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
5149 }
5150 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5151 if (data->enclosed) {
5152 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
5153 }
5154
5155 if (OBJ_FROZEN(target_th->thgroup)) {
5156 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
5157 }
5158 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
5159 if (data->enclosed) {
5160 rb_raise(rb_eThreadError,
5161 "can't move from the enclosed thread group");
5162 }
5163
5164 target_th->thgroup = group;
5165 return group;
5166}
5167
5168/*
5169 * Document-class: ThreadShield
5170 */
5171static void
5172thread_shield_mark(void *ptr)
5173{
5174 rb_gc_mark((VALUE)ptr);
5175}
5176
5177static const rb_data_type_t thread_shield_data_type = {
5178 "thread_shield",
5179 {thread_shield_mark, 0, 0,},
5180 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
5181};
5182
5183static VALUE
5184thread_shield_alloc(VALUE klass)
5185{
5186 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
5187}
5188
5189#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5190#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5191#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5192#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5193STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5194static inline unsigned int
5195rb_thread_shield_waiting(VALUE b)
5196{
5197 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5198}
5199
5200static inline void
5201rb_thread_shield_waiting_inc(VALUE b)
5202{
5203 unsigned int w = rb_thread_shield_waiting(b);
5204 w++;
5205 if (w > THREAD_SHIELD_WAITING_MAX)
5206 rb_raise(rb_eRuntimeError, "waiting count overflow");
5207 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5208 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5209}
5210
5211static inline void
5212rb_thread_shield_waiting_dec(VALUE b)
5213{
5214 unsigned int w = rb_thread_shield_waiting(b);
5215 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5216 w--;
5217 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5218 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5219}
5220
5221VALUE
5222rb_thread_shield_new(void)
5223{
5224 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5225 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5226 return thread_shield;
5227}
5228
5229bool
5230rb_thread_shield_owned(VALUE self)
5231{
5232 VALUE mutex = GetThreadShieldPtr(self);
5233 if (!mutex) return false;
5234
5235 rb_mutex_t *m = mutex_ptr(mutex);
5236
5237 return m->fiber == GET_EC()->fiber_ptr;
5238}
5239
5240/*
5241 * Wait a thread shield.
5242 *
5243 * Returns
5244 * true: acquired the thread shield
5245 * false: the thread shield was destroyed and no other threads waiting
5246 * nil: the thread shield was destroyed but still in use
5247 */
5248VALUE
5249rb_thread_shield_wait(VALUE self)
5250{
5251 VALUE mutex = GetThreadShieldPtr(self);
5252 rb_mutex_t *m;
5253
5254 if (!mutex) return Qfalse;
5255 m = mutex_ptr(mutex);
5256 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5257 rb_thread_shield_waiting_inc(self);
5258 rb_mutex_lock(mutex);
5259 rb_thread_shield_waiting_dec(self);
5260 if (DATA_PTR(self)) return Qtrue;
5261 rb_mutex_unlock(mutex);
5262 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5263}
5264
5265static VALUE
5266thread_shield_get_mutex(VALUE self)
5267{
5268 VALUE mutex = GetThreadShieldPtr(self);
5269 if (!mutex)
5270 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5271 return mutex;
5272}
5273
5274/*
5275 * Release a thread shield, and return true if it has waiting threads.
5276 */
5277VALUE
5278rb_thread_shield_release(VALUE self)
5279{
5280 VALUE mutex = thread_shield_get_mutex(self);
5281 rb_mutex_unlock(mutex);
5282 return RBOOL(rb_thread_shield_waiting(self) > 0);
5283}
5284
5285/*
5286 * Release and destroy a thread shield, and return true if it has waiting threads.
5287 */
5288VALUE
5289rb_thread_shield_destroy(VALUE self)
5290{
5291 VALUE mutex = thread_shield_get_mutex(self);
5292 DATA_PTR(self) = 0;
5293 rb_mutex_unlock(mutex);
5294 return RBOOL(rb_thread_shield_waiting(self) > 0);
5295}
5296
5297static VALUE
5298threadptr_recursive_hash(rb_thread_t *th)
5299{
5300 return th->ec->local_storage_recursive_hash;
5301}
5302
5303static void
5304threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5305{
5306 th->ec->local_storage_recursive_hash = hash;
5307}
5308
5310
5311/*
5312 * Returns the current "recursive list" used to detect recursion.
5313 * This list is a hash table, unique for the current thread and for
5314 * the current __callee__.
5315 */
5316
5317static VALUE
5318recursive_list_access(VALUE sym)
5319{
5320 rb_thread_t *th = GET_THREAD();
5321 VALUE hash = threadptr_recursive_hash(th);
5322 VALUE list;
5323 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5324 hash = rb_ident_hash_new();
5325 threadptr_recursive_hash_set(th, hash);
5326 list = Qnil;
5327 }
5328 else {
5329 list = rb_hash_aref(hash, sym);
5330 }
5331 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5332 list = rb_ident_hash_new();
5333 rb_hash_aset(hash, sym, list);
5334 }
5335 return list;
5336}
5337
5338/*
5339 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5340 * in the recursion list.
5341 * Assumes the recursion list is valid.
5342 */
5343
5344static bool
5345recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5346{
5347#if SIZEOF_LONG == SIZEOF_VOIDP
5348 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5349#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5350 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5351 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5352#endif
5353
5354 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5355 if (UNDEF_P(pair_list))
5356 return false;
5357 if (paired_obj_id) {
5358 if (!RB_TYPE_P(pair_list, T_HASH)) {
5359 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5360 return false;
5361 }
5362 else {
5363 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5364 return false;
5365 }
5366 }
5367 return true;
5368}
5369
5370/*
5371 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5372 * For a single obj, it sets list[obj] to Qtrue.
5373 * For a pair, it sets list[obj] to paired_obj_id if possible,
5374 * otherwise list[obj] becomes a hash like:
5375 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5376 * Assumes the recursion list is valid.
5377 */
5378
5379static void
5380recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5381{
5382 VALUE pair_list;
5383
5384 if (!paired_obj) {
5385 rb_hash_aset(list, obj, Qtrue);
5386 }
5387 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5388 rb_hash_aset(list, obj, paired_obj);
5389 }
5390 else {
5391 if (!RB_TYPE_P(pair_list, T_HASH)){
5392 VALUE other_paired_obj = pair_list;
5393 pair_list = rb_hash_new();
5394 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5395 rb_hash_aset(list, obj, pair_list);
5396 }
5397 rb_hash_aset(pair_list, paired_obj, Qtrue);
5398 }
5399}
5400
5401/*
5402 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5403 * For a pair, if list[obj] is a hash, then paired_obj_id is
5404 * removed from the hash and no attempt is made to simplify
5405 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5406 * Assumes the recursion list is valid.
5407 */
5408
5409static int
5410recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5411{
5412 if (paired_obj) {
5413 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5414 if (UNDEF_P(pair_list)) {
5415 return 0;
5416 }
5417 if (RB_TYPE_P(pair_list, T_HASH)) {
5418 rb_hash_delete_entry(pair_list, paired_obj);
5419 if (!RHASH_EMPTY_P(pair_list)) {
5420 return 1; /* keep hash until is empty */
5421 }
5422 }
5423 }
5424 rb_hash_delete_entry(list, obj);
5425 return 1;
5426}
5428struct exec_recursive_params {
5429 VALUE (*func) (VALUE, VALUE, int);
5430 VALUE list;
5431 VALUE obj;
5432 VALUE pairid;
5433 VALUE arg;
5434};
5435
5436static VALUE
5437exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5438{
5439 struct exec_recursive_params *p = (void *)data;
5440 return (*p->func)(p->obj, p->arg, FALSE);
5441}
5442
5443/*
5444 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5445 * current method is called recursively on obj, or on the pair <obj, pairid>
5446 * If outer is 0, then the innermost func will be called with recursive set
5447 * to true, otherwise the outermost func will be called. In the latter case,
5448 * all inner func are short-circuited by throw.
5449 * Implementation details: the value thrown is the recursive list which is
5450 * proper to the current method and unlikely to be caught anywhere else.
5451 * list[recursive_key] is used as a flag for the outermost call.
5452 */
5453
5454static VALUE
5455exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5456{
5457 VALUE result = Qundef;
5458 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5459 struct exec_recursive_params p;
5460 int outermost;
5461 p.list = recursive_list_access(sym);
5462 p.obj = obj;
5463 p.pairid = pairid;
5464 p.arg = arg;
5465 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5466
5467 if (recursive_check(p.list, p.obj, pairid)) {
5468 if (outer && !outermost) {
5469 rb_throw_obj(p.list, p.list);
5470 }
5471 return (*func)(obj, arg, TRUE);
5472 }
5473 else {
5474 enum ruby_tag_type state;
5475
5476 p.func = func;
5477
5478 if (outermost) {
5479 recursive_push(p.list, ID2SYM(recursive_key), 0);
5480 recursive_push(p.list, p.obj, p.pairid);
5481 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5482 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5483 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5484 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5485 if (result == p.list) {
5486 result = (*func)(obj, arg, TRUE);
5487 }
5488 }
5489 else {
5490 volatile VALUE ret = Qundef;
5491 recursive_push(p.list, p.obj, p.pairid);
5492 EC_PUSH_TAG(GET_EC());
5493 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5494 ret = (*func)(obj, arg, FALSE);
5495 }
5496 EC_POP_TAG();
5497 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5498 goto invalid;
5499 }
5500 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5501 result = ret;
5502 }
5503 }
5504 *(volatile struct exec_recursive_params *)&p;
5505 return result;
5506
5507 invalid:
5508 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5509 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5510 sym, rb_thread_current());
5512}
5513
5514/*
5515 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5516 * current method is called recursively on obj
5517 */
5518
5519VALUE
5520rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5521{
5522 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5523}
5524
5525/*
5526 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5527 * current method is called recursively on the ordered pair <obj, paired_obj>
5528 */
5529
5530VALUE
5531rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5532{
5533 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5534}
5535
5536/*
5537 * If recursion is detected on the current method and obj, the outermost
5538 * func will be called with (obj, arg, true). All inner func will be
5539 * short-circuited using throw.
5540 */
5541
5542VALUE
5543rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5544{
5545 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5546}
5547
5548VALUE
5549rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5550{
5551 return exec_recursive(func, obj, 0, arg, 1, mid);
5552}
5553
5554/*
5555 * If recursion is detected on the current method, obj and paired_obj,
5556 * the outermost func will be called with (obj, arg, true). All inner
5557 * func will be short-circuited using throw.
5558 */
5559
5560VALUE
5561rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5562{
5563 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5564}
5565
5566/*
5567 * call-seq:
5568 * thread.backtrace -> array or nil
5569 *
5570 * Returns the current backtrace of the target thread.
5571 *
5572 */
5573
5574static VALUE
5575rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5576{
5577 return rb_vm_thread_backtrace(argc, argv, thval);
5578}
5579
5580/* call-seq:
5581 * thread.backtrace_locations(*args) -> array or nil
5582 *
5583 * Returns the execution stack for the target thread---an array containing
5584 * backtrace location objects.
5585 *
5586 * See Thread::Backtrace::Location for more information.
5587 *
5588 * This method behaves similarly to Kernel#caller_locations except it applies
5589 * to a specific thread.
5590 */
5591static VALUE
5592rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5593{
5594 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5595}
5596
5597void
5598Init_Thread_Mutex(void)
5599{
5600 rb_thread_t *th = GET_THREAD();
5601
5602 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5603 rb_native_mutex_initialize(&th->interrupt_lock);
5604}
5605
5606/*
5607 * Document-class: ThreadError
5608 *
5609 * Raised when an invalid operation is attempted on a thread.
5610 *
5611 * For example, when no other thread has been started:
5612 *
5613 * Thread.stop
5614 *
5615 * This will raises the following exception:
5616 *
5617 * ThreadError: stopping only thread
5618 * note: use sleep to stop forever
5619 */
5620
5621void
5622Init_Thread(void)
5623{
5624 rb_thread_t *th = GET_THREAD();
5625
5626 sym_never = ID2SYM(rb_intern_const("never"));
5627 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5628 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5629
5630 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5631 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5632 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5633 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5634 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5635 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5636 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5637 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5638 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5639 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5640 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5641 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5642 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5643 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5644 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5645 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5646 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5647 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5648 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5649
5650 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5651 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5652 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5653 rb_define_method(rb_cThread, "value", thread_value, 0);
5654 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5655 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5656 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5657 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5658 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5659 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5660 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5661 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5662 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5663 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5664 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5665 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5666 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5667 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5668 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5669 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5670 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5671 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5672 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5673 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5674 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5675 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5676 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5677 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5678 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5679 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5680
5681 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5682 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5683 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5684 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5685 rb_define_alias(rb_cThread, "inspect", "to_s");
5686
5687 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5688 "stream closed in another thread");
5689
5690 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5691 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5692 rb_define_method(cThGroup, "list", thgroup_list, 0);
5693 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5694 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5695 rb_define_method(cThGroup, "add", thgroup_add, 1);
5696
5697 const char * ptr = getenv("RUBY_THREAD_TIMESLICE");
5698
5699 if (ptr) {
5700 long quantum = strtol(ptr, NULL, 0);
5701 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5702 thread_default_quantum_ms = (uint32_t)quantum;
5703 }
5704 else if (0) {
5705 fprintf(stderr, "Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5706 }
5707 }
5708
5709 {
5710 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5711 rb_define_const(cThGroup, "Default", th->thgroup);
5712 }
5713
5715
5716 /* init thread core */
5717 {
5718 /* main thread setting */
5719 {
5720 /* acquire global vm lock */
5721#ifdef HAVE_PTHREAD_NP_H
5722 VM_ASSERT(TH_SCHED(th)->running == th);
5723#endif
5724 // thread_sched_to_running() should not be called because
5725 // it assumes blocked by thread_sched_to_waiting().
5726 // thread_sched_to_running(sched, th);
5727
5728 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5729 th->pending_interrupt_queue_checked = 0;
5730 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5731 }
5732 }
5733
5734 rb_thread_create_timer_thread();
5735
5736 Init_thread_sync();
5737
5738 // TODO: Suppress unused function warning for now
5739 // if (0) rb_thread_sched_destroy(NULL);
5740}
5741
5744{
5745 rb_thread_t *th = ruby_thread_from_native();
5746
5747 return th != 0;
5748}
5749
5750#ifdef NON_SCALAR_THREAD_ID
5751 #define thread_id_str(th) (NULL)
5752#else
5753 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5754#endif
5755
5756static void
5757debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5758{
5759 rb_thread_t *th = 0;
5760 VALUE sep = rb_str_new_cstr("\n ");
5761
5762 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5763 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5764 (void *)GET_THREAD(), (void *)r->threads.main);
5765
5766 ccan_list_for_each(&r->threads.set, th, lt_node) {
5767 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5768 "native:%p int:%u",
5769 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5770
5771 if (th->locking_mutex) {
5772 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5773 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5774 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5775 }
5776
5777 {
5778 struct rb_waiting_list *list = th->join_list;
5779 while (list) {
5780 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5781 list = list->next;
5782 }
5783 }
5784 rb_str_catf(msg, "\n ");
5785 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5786 rb_str_catf(msg, "\n");
5787 }
5788}
5789
5790static void
5791rb_check_deadlock(rb_ractor_t *r)
5792{
5793 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5794
5795#ifdef RUBY_THREAD_PTHREAD_H
5796 if (r->threads.sched.readyq_cnt > 0) return;
5797#endif
5798
5799 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5800 int ltnum = rb_ractor_living_thread_num(r);
5801
5802 if (ltnum > sleeper_num) return;
5803 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5804
5805 int found = 0;
5806 rb_thread_t *th = NULL;
5807
5808 ccan_list_for_each(&r->threads.set, th, lt_node) {
5809 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5810 found = 1;
5811 }
5812 else if (th->locking_mutex) {
5813 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5814 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5815 found = 1;
5816 }
5817 }
5818 if (found)
5819 break;
5820 }
5821
5822 if (!found) {
5823 VALUE argv[2];
5824 argv[0] = rb_eFatal;
5825 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5826 debug_deadlock_check(r, argv[1]);
5827 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5828 rb_threadptr_raise(r->threads.main, 2, argv);
5829 }
5830}
5831
5832static void
5833update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5834{
5835 const rb_control_frame_t *cfp = GET_EC()->cfp;
5836 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5837 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5838 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5839 if (lines) {
5840 long line = rb_sourceline() - 1;
5841 VM_ASSERT(line >= 0);
5842 long count;
5843 VALUE num;
5844 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5845 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5846 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5847 rb_ary_push(lines, LONG2FIX(line + 1));
5848 return;
5849 }
5850 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5851 return;
5852 }
5853 num = RARRAY_AREF(lines, line);
5854 if (!FIXNUM_P(num)) return;
5855 count = FIX2LONG(num) + 1;
5856 if (POSFIXABLE(count)) {
5857 RARRAY_ASET(lines, line, LONG2FIX(count));
5858 }
5859 }
5860 }
5861}
5862
5863static void
5864update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5865{
5866 const rb_control_frame_t *cfp = GET_EC()->cfp;
5867 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5868 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5869 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5870 if (branches) {
5871 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5872 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5873 VALUE counters = RARRAY_AREF(branches, 1);
5874 VALUE num = RARRAY_AREF(counters, idx);
5875 count = FIX2LONG(num) + 1;
5876 if (POSFIXABLE(count)) {
5877 RARRAY_ASET(counters, idx, LONG2FIX(count));
5878 }
5879 }
5880 }
5881}
5882
5883const rb_method_entry_t *
5884rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5885{
5886 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5887
5888 if (!me->def) return NULL; // negative cme
5889
5890 retry:
5891 switch (me->def->type) {
5892 case VM_METHOD_TYPE_ISEQ: {
5893 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5894 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5895 path = rb_iseq_path(iseq);
5896 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5897 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5898 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5899 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5900 break;
5901 }
5902 case VM_METHOD_TYPE_BMETHOD: {
5903 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5904 if (iseq) {
5905 rb_iseq_location_t *loc;
5906 rb_iseq_check(iseq);
5907 path = rb_iseq_path(iseq);
5908 loc = &ISEQ_BODY(iseq)->location;
5909 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5910 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5911 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5912 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5913 break;
5914 }
5915 return NULL;
5916 }
5917 case VM_METHOD_TYPE_ALIAS:
5918 me = me->def->body.alias.original_me;
5919 goto retry;
5920 case VM_METHOD_TYPE_REFINED:
5921 me = me->def->body.refined.orig_me;
5922 if (!me) return NULL;
5923 goto retry;
5924 default:
5925 return NULL;
5926 }
5927
5928 /* found */
5929 if (RB_TYPE_P(path, T_ARRAY)) {
5930 path = rb_ary_entry(path, 1);
5931 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5932 }
5933 if (resolved_location) {
5934 resolved_location[0] = path;
5935 resolved_location[1] = beg_pos_lineno;
5936 resolved_location[2] = beg_pos_column;
5937 resolved_location[3] = end_pos_lineno;
5938 resolved_location[4] = end_pos_column;
5939 }
5940 return me;
5941}
5942
5943static void
5944update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5945{
5946 const rb_control_frame_t *cfp = GET_EC()->cfp;
5947 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5948 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5949 VALUE rcount;
5950 long count;
5951
5952 me = rb_resolve_me_location(me, 0);
5953 if (!me) return;
5954
5955 rcount = rb_hash_aref(me2counter, (VALUE) me);
5956 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5957 if (POSFIXABLE(count)) {
5958 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5959 }
5960}
5961
5962VALUE
5963rb_get_coverages(void)
5964{
5965 return GET_VM()->coverages;
5966}
5967
5968int
5969rb_get_coverage_mode(void)
5970{
5971 return GET_VM()->coverage_mode;
5972}
5973
5974void
5975rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5976{
5977 GET_VM()->coverages = coverages;
5978 GET_VM()->me2counter = me2counter;
5979 GET_VM()->coverage_mode = mode;
5980}
5981
5982void
5983rb_resume_coverages(void)
5984{
5985 int mode = GET_VM()->coverage_mode;
5986 VALUE me2counter = GET_VM()->me2counter;
5987 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5988 if (mode & COVERAGE_TARGET_BRANCHES) {
5989 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5990 }
5991 if (mode & COVERAGE_TARGET_METHODS) {
5992 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5993 }
5994}
5995
5996void
5997rb_suspend_coverages(void)
5998{
5999 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
6000 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
6001 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
6002 }
6003 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
6004 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
6005 }
6006}
6007
6008/* Make coverage arrays empty so old covered files are no longer tracked. */
6009void
6010rb_reset_coverages(void)
6011{
6012 rb_clear_coverages();
6013 rb_iseq_remove_coverage_all();
6014 GET_VM()->coverages = Qfalse;
6015}
6016
6017VALUE
6018rb_default_coverage(int n)
6019{
6020 VALUE coverage = rb_ary_hidden_new_fill(3);
6021 VALUE lines = Qfalse, branches = Qfalse;
6022 int mode = GET_VM()->coverage_mode;
6023
6024 if (mode & COVERAGE_TARGET_LINES) {
6025 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
6026 }
6027 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
6028
6029 if (mode & COVERAGE_TARGET_BRANCHES) {
6030 branches = rb_ary_hidden_new_fill(2);
6031 /* internal data structures for branch coverage:
6032 *
6033 * { branch base node =>
6034 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
6035 * branch target id =>
6036 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
6037 * ...
6038 * }],
6039 * ...
6040 * }
6041 *
6042 * Example:
6043 * { NODE_CASE =>
6044 * [1, 0, 4, 3, {
6045 * NODE_WHEN => [2, 8, 2, 9, 0],
6046 * NODE_WHEN => [3, 8, 3, 9, 1],
6047 * ...
6048 * }],
6049 * ...
6050 * }
6051 */
6052 VALUE structure = rb_hash_new();
6053 rb_obj_hide(structure);
6054 RARRAY_ASET(branches, 0, structure);
6055 /* branch execution counters */
6056 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
6057 }
6058 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
6059
6060 return coverage;
6061}
6062
6063static VALUE
6064uninterruptible_exit(VALUE v)
6065{
6066 rb_thread_t *cur_th = GET_THREAD();
6067 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
6068
6069 cur_th->pending_interrupt_queue_checked = 0;
6070 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
6071 RUBY_VM_SET_INTERRUPT(cur_th->ec);
6072 }
6073 return Qnil;
6074}
6075
6076VALUE
6077rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
6078{
6079 VALUE interrupt_mask = rb_ident_hash_new();
6080 rb_thread_t *cur_th = GET_THREAD();
6081
6082 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
6083 OBJ_FREEZE(interrupt_mask);
6084 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
6085
6086 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
6087
6088 RUBY_VM_CHECK_INTS(cur_th->ec);
6089 return ret;
6090}
6091
6092static void
6093thread_specific_storage_alloc(rb_thread_t *th)
6094{
6095 VM_ASSERT(th->specific_storage == NULL);
6096
6097 if (UNLIKELY(specific_key_count > 0)) {
6098 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6099 }
6100}
6101
6102rb_internal_thread_specific_key_t
6104{
6105 rb_vm_t *vm = GET_VM();
6106
6107 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
6108 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
6109 }
6110 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
6111 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6112 }
6113 else {
6114 rb_internal_thread_specific_key_t key = specific_key_count++;
6115
6116 if (key == 0) {
6117 // allocate
6118 rb_ractor_t *cr = GET_RACTOR();
6119 rb_thread_t *th;
6120
6121 ccan_list_for_each(&cr->threads.set, th, lt_node) {
6122 thread_specific_storage_alloc(th);
6123 }
6124 }
6125 return key;
6126 }
6127}
6128
6129// async and native thread safe.
6130void *
6131rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
6132{
6133 rb_thread_t *th = DATA_PTR(thread_val);
6134
6135 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6136 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6137 VM_ASSERT(th->specific_storage);
6138
6139 return th->specific_storage[key];
6140}
6141
6142// async and native thread safe.
6143void
6144rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
6145{
6146 rb_thread_t *th = DATA_PTR(thread_val);
6147
6148 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6149 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6150 VM_ASSERT(th->specific_storage);
6151
6152 th->specific_storage[key] = data;
6153}
6154
6155// interrupt_exec
6158 struct ccan_list_node node;
6159
6160 rb_interrupt_exec_func_t *func;
6161 void *data;
6162 enum rb_interrupt_exec_flag flags;
6163};
6164
6165void
6166rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
6167{
6168 struct rb_interrupt_exec_task *task;
6169
6170 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
6171 if (task->flags & rb_interrupt_exec_flag_value_data) {
6172 rb_gc_mark((VALUE)task->data);
6173 }
6174 }
6175}
6176
6177// native thread safe
6178// th should be available
6179void
6180rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6181{
6182 // should not use ALLOC
6184 *task = (struct rb_interrupt_exec_task) {
6185 .flags = flags,
6186 .func = func,
6187 .data = data,
6188 };
6189
6190 rb_native_mutex_lock(&th->interrupt_lock);
6191 {
6192 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6193 threadptr_set_interrupt_locked(th, true);
6194 }
6195 rb_native_mutex_unlock(&th->interrupt_lock);
6196}
6197
6198static void
6199threadptr_interrupt_exec_exec(rb_thread_t *th)
6200{
6201 while (1) {
6202 struct rb_interrupt_exec_task *task;
6203
6204 rb_native_mutex_lock(&th->interrupt_lock);
6205 {
6206 task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6207 }
6208 rb_native_mutex_unlock(&th->interrupt_lock);
6209
6210 RUBY_DEBUG_LOG("task:%p", task);
6211
6212 if (task) {
6213 (*task->func)(task->data);
6214 ruby_xfree(task);
6215 }
6216 else {
6217 break;
6218 }
6219 }
6220}
6221
6222static void
6223threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6224{
6225 rb_native_mutex_lock(&th->interrupt_lock);
6226 {
6227 struct rb_interrupt_exec_task *task;
6228
6229 while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6230 ruby_xfree(task);
6231 }
6232 }
6233 rb_native_mutex_unlock(&th->interrupt_lock);
6234}
6237 rb_interrupt_exec_func_t *func;
6238 void *data;
6239};
6240
6241static VALUE
6242interrupt_ractor_new_thread_func(void *data)
6243{
6245 ruby_xfree(data);
6246
6247 d.func(d.data);
6248 return Qnil;
6249}
6250
6251static VALUE
6252interrupt_ractor_func(void *data)
6253{
6254 rb_thread_create(interrupt_ractor_new_thread_func, data);
6255 return Qnil;
6256}
6257
6258// native thread safe
6259// func/data should be native thread safe
6260void
6261rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6262 rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6263{
6265
6266 RUBY_DEBUG_LOG("flags:%d", (int)flags);
6267
6268 d->func = func;
6269 d->data = data;
6270 rb_thread_t *main_th = target_r->threads.main;
6271 rb_threadptr_interrupt_exec(main_th, interrupt_ractor_func, d, flags);
6272
6273 // TODO MEMO: we can create a new thread in a ractor, but not sure how to do that now.
6274}
6275
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:313
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:593
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1465
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2828
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1165
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:955
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:942
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:136
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:288
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:486
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:681
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1423
VALUE rb_eIOError
IOError exception.
Definition io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1427
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:4117
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1468
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1422
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:960
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4355
VALUE rb_eSignal
SignalException exception.
Definition error.c:1425
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2090
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:100
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:242
VALUE rb_cThread
Thread class.
Definition vm.c:554
VALUE rb_cModule
Module class.
Definition object.c:63
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3711
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:845
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:847
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1787
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1846
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4356
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1459
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3723
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2916
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:3155
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1389
void rb_thread_fd_close(int fd)
This funciton is now a no-op.
Definition thread.c:2861
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1421
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:3067
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4994
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1442
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:3058
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:3011
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1396
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4989
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:3134
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3996
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3871
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1490
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:3020
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1465
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:2015
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2964
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2100
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1478
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:374
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1916
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1117
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12982
ID rb_to_id(VALUE str)
Definition string.c:12972
#define RB_IO_POINTER(obj, fp)
Queries the underlying IO pointer.
Definition io.h:436
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:190
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:6130
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:6143
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:6102
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1547
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2015
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition thread.h:73
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1686
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2518
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:450
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5742
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:1041
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:458
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
Definition scheduler.c:1073
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:622
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:419
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:641
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4487
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
@ RUBY_Qundef
Represents so-called undef.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:63
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:203
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Ruby's IO, metadata and buffers.
Definition io.h:295
VALUE self
The IO's Ruby level counterpart.
Definition io.h:298
int fd
file descriptor.
Definition io.h:306
struct ccan_list_head blocking_operations
Threads that are performing a blocking operation without the GVL using this IO.
Definition io.h:131
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:295
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:301
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:283
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:289
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376