Ruby 4.0.0dev (2025-11-30 revision a0cd81e005d185be37b3a0323e4ee61b7b4360a6)
thread.c (a0cd81e005d185be37b3a0323e4ee61b7b4360a6)
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/eval.h"
82#include "internal/gc.h"
83#include "internal/hash.h"
84#include "internal/io.h"
85#include "internal/object.h"
86#include "internal/proc.h"
88#include "internal/signal.h"
89#include "internal/thread.h"
90#include "internal/time.h"
91#include "internal/warnings.h"
92#include "iseq.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#include "ccan/list/list.h"
104
105#ifndef USE_NATIVE_THREAD_PRIORITY
106#define USE_NATIVE_THREAD_PRIORITY 0
107#define RUBY_THREAD_PRIORITY_MAX 3
108#define RUBY_THREAD_PRIORITY_MIN -3
109#endif
110
111static VALUE rb_cThreadShield;
112static VALUE cThGroup;
113
114static VALUE sym_immediate;
115static VALUE sym_on_blocking;
116static VALUE sym_never;
117
118static uint32_t thread_default_quantum_ms = 100;
119
120#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
121#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
122
123static inline VALUE
124rb_thread_local_storage(VALUE thread)
125{
126 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
127 rb_ivar_set(thread, idLocals, rb_hash_new());
128 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
129 }
130 return rb_ivar_get(thread, idLocals);
131}
132
133enum SLEEP_FLAGS {
134 SLEEP_DEADLOCKABLE = 0x01,
135 SLEEP_SPURIOUS_CHECK = 0x02,
136 SLEEP_ALLOW_SPURIOUS = 0x04,
137 SLEEP_NO_CHECKINTS = 0x08,
138};
139
140static void sleep_forever(rb_thread_t *th, unsigned int fl);
141static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
142
143static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
144static int rb_threadptr_dead(rb_thread_t *th);
145static void rb_check_deadlock(rb_ractor_t *r);
146static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
147static const char *thread_status_name(rb_thread_t *th, int detail);
148static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
149NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
150MAYBE_UNUSED(static int consume_communication_pipe(int fd));
151
152static rb_atomic_t system_working = 1;
153static rb_internal_thread_specific_key_t specific_key_count;
154
155/********************************************************************************/
156
157#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
158
160 enum rb_thread_status prev_status;
161};
162
163static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
164static void unblock_function_clear(rb_thread_t *th);
165
166static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
167 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
168static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
169
170#define THREAD_BLOCKING_BEGIN(th) do { \
171 struct rb_thread_sched * const sched = TH_SCHED(th); \
172 RB_VM_SAVE_MACHINE_CONTEXT(th); \
173 thread_sched_to_waiting((sched), (th));
174
175#define THREAD_BLOCKING_END(th) \
176 thread_sched_to_running((sched), (th)); \
177 rb_ractor_thread_switch(th->ractor, th, false); \
178} while(0)
179
180#ifdef __GNUC__
181#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
182#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
183#else
184#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
185#endif
186#else
187#define only_if_constant(expr, notconst) notconst
188#endif
189#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
190 struct rb_blocking_region_buffer __region; \
191 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
192 /* always return true unless fail_if_interrupted */ \
193 !only_if_constant(fail_if_interrupted, TRUE)) { \
194 /* Important that this is inlined into the macro, and not part of \
195 * blocking_region_begin - see bug #20493 */ \
196 RB_VM_SAVE_MACHINE_CONTEXT(th); \
197 thread_sched_to_waiting(TH_SCHED(th), th); \
198 exec; \
199 blocking_region_end(th, &__region); \
200 }; \
201} while(0)
202
203/*
204 * returns true if this thread was spuriously interrupted, false otherwise
205 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
206 */
207#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
208static inline int
209vm_check_ints_blocking(rb_execution_context_t *ec)
210{
211#ifdef RUBY_ASSERT_CRITICAL_SECTION
212 VM_ASSERT(ruby_assert_critical_section_entered == 0);
213#endif
214
215 rb_thread_t *th = rb_ec_thread_ptr(ec);
216
217 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
218 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
219 }
220 else {
221 th->pending_interrupt_queue_checked = 0;
222 RUBY_VM_SET_INTERRUPT(ec);
223 }
224 return rb_threadptr_execute_interrupts(th, 1);
225}
226
227int
228rb_vm_check_ints_blocking(rb_execution_context_t *ec)
229{
230 return vm_check_ints_blocking(ec);
231}
232
233/*
234 * poll() is supported by many OSes, but so far Linux is the only
235 * one we know of that supports using poll() in all places select()
236 * would work.
237 */
238#if defined(HAVE_POLL)
239# if defined(__linux__)
240# define USE_POLL
241# endif
242# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
243# define USE_POLL
244 /* FreeBSD does not set POLLOUT when POLLHUP happens */
245# define POLLERR_SET (POLLHUP | POLLERR)
246# endif
247#endif
248
249static void
250timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
251 const struct timeval *timeout)
252{
253 if (timeout) {
254 *rel = rb_timeval2hrtime(timeout);
255 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
256 *to = rel;
257 }
258 else {
259 *to = 0;
260 }
261}
262
263MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
264MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
265MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
266
267#include THREAD_IMPL_SRC
268
269/*
270 * TODO: somebody with win32 knowledge should be able to get rid of
271 * timer-thread by busy-waiting on signals. And it should be possible
272 * to make the GVL in thread_pthread.c be platform-independent.
273 */
274#ifndef BUSY_WAIT_SIGNALS
275# define BUSY_WAIT_SIGNALS (0)
276#endif
277
278#ifndef USE_EVENTFD
279# define USE_EVENTFD (0)
280#endif
281
282#include "thread_sync.c"
283
284void
285rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
286{
288}
289
290void
291rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
292{
294}
295
296void
297rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
298{
300}
301
302void
303rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
304{
306}
307
308static int
309unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
310{
311 do {
312 if (fail_if_interrupted) {
313 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
314 return FALSE;
315 }
316 }
317 else {
318 RUBY_VM_CHECK_INTS(th->ec);
319 }
320
321 rb_native_mutex_lock(&th->interrupt_lock);
322 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
323 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
324
325 VM_ASSERT(th->unblock.func == NULL);
326
327 th->unblock.func = func;
328 th->unblock.arg = arg;
329 rb_native_mutex_unlock(&th->interrupt_lock);
330
331 return TRUE;
332}
333
334static void
335unblock_function_clear(rb_thread_t *th)
336{
337 rb_native_mutex_lock(&th->interrupt_lock);
338 th->unblock.func = 0;
339 rb_native_mutex_unlock(&th->interrupt_lock);
340}
341
342static void
343threadptr_set_interrupt_locked(rb_thread_t *th, bool trap)
344{
345 // th->interrupt_lock should be acquired here
346
347 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
348
349 if (trap) {
350 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
351 }
352 else {
353 RUBY_VM_SET_INTERRUPT(th->ec);
354 }
355
356 if (th->unblock.func != NULL) {
357 (th->unblock.func)(th->unblock.arg);
358 }
359 else {
360 /* none */
361 }
362}
363
364static void
365threadptr_set_interrupt(rb_thread_t *th, int trap)
366{
367 rb_native_mutex_lock(&th->interrupt_lock);
368 {
369 threadptr_set_interrupt_locked(th, trap);
370 }
371 rb_native_mutex_unlock(&th->interrupt_lock);
372}
373
374/* Set interrupt flag on another thread or current thread, and call its UBF if it has one set */
375void
376rb_threadptr_interrupt(rb_thread_t *th)
377{
378 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
379 threadptr_set_interrupt(th, false);
380}
381
382static void
383threadptr_trap_interrupt(rb_thread_t *th)
384{
385 threadptr_set_interrupt(th, true);
386}
387
388static void
389terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
390{
391 rb_thread_t *th = 0;
392
393 ccan_list_for_each(&r->threads.set, th, lt_node) {
394 if (th != main_thread) {
395 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
396
397 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
398 rb_threadptr_interrupt(th);
399
400 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
401 }
402 else {
403 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
404 }
405 }
406}
407
408static void
409rb_threadptr_join_list_wakeup(rb_thread_t *thread)
410{
411 while (thread->join_list) {
412 struct rb_waiting_list *join_list = thread->join_list;
413
414 // Consume the entry from the join list:
415 thread->join_list = join_list->next;
416
417 rb_thread_t *target_thread = join_list->thread;
418
419 if (target_thread->scheduler != Qnil && join_list->fiber) {
420 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
421 }
422 else {
423 rb_threadptr_interrupt(target_thread);
424
425 switch (target_thread->status) {
426 case THREAD_STOPPED:
427 case THREAD_STOPPED_FOREVER:
428 target_thread->status = THREAD_RUNNABLE;
429 break;
430 default:
431 break;
432 }
433 }
434 }
435}
436
437void
438rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
439{
440 while (th->keeping_mutexes) {
441 rb_mutex_t *mutex = th->keeping_mutexes;
442 th->keeping_mutexes = mutex->next_mutex;
443
444 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
445 VM_ASSERT(mutex->fiber_serial);
446 const char *error_message = rb_mutex_unlock_th(mutex, th, NULL);
447 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
448 }
449}
450
451void
452rb_thread_terminate_all(rb_thread_t *th)
453{
454 rb_ractor_t *cr = th->ractor;
455 rb_execution_context_t * volatile ec = th->ec;
456 volatile int sleeping = 0;
457
458 if (cr->threads.main != th) {
459 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
460 (void *)cr->threads.main, (void *)th);
461 }
462
463 /* unlock all locking mutexes */
464 rb_threadptr_unlock_all_locking_mutexes(th);
465
466 EC_PUSH_TAG(ec);
467 if (EC_EXEC_TAG() == TAG_NONE) {
468 retry:
469 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
470
471 terminate_all(cr, th);
472
473 while (rb_ractor_living_thread_num(cr) > 1) {
474 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
475 /*q
476 * Thread exiting routine in thread_start_func_2 notify
477 * me when the last sub-thread exit.
478 */
479 sleeping = 1;
480 native_sleep(th, &rel);
481 RUBY_VM_CHECK_INTS_BLOCKING(ec);
482 sleeping = 0;
483 }
484 }
485 else {
486 /*
487 * When caught an exception (e.g. Ctrl+C), let's broadcast
488 * kill request again to ensure killing all threads even
489 * if they are blocked on sleep, mutex, etc.
490 */
491 if (sleeping) {
492 sleeping = 0;
493 goto retry;
494 }
495 }
496 EC_POP_TAG();
497}
498
499void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
500static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
501
502static void
503thread_cleanup_func_before_exec(void *th_ptr)
504{
505 rb_thread_t *th = th_ptr;
506 th->status = THREAD_KILLED;
507
508 // The thread stack doesn't exist in the forked process:
509 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
510
511 threadptr_interrupt_exec_cleanup(th);
512 rb_threadptr_root_fiber_terminate(th);
513}
514
515static void
516thread_cleanup_func(void *th_ptr, int atfork)
517{
518 rb_thread_t *th = th_ptr;
519
520 th->locking_mutex = Qfalse;
521 thread_cleanup_func_before_exec(th_ptr);
522
523 if (atfork) {
524 native_thread_destroy_atfork(th->nt);
525 th->nt = NULL;
526 return;
527 }
528
529 rb_native_mutex_destroy(&th->interrupt_lock);
530}
531
532void
533rb_thread_free_native_thread(void *th_ptr)
534{
535 rb_thread_t *th = th_ptr;
536
537 native_thread_destroy_atfork(th->nt);
538 th->nt = NULL;
539}
540
541static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
542static VALUE rb_thread_to_s(VALUE thread);
543
544void
545ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
546{
547 native_thread_init_stack(th, local_in_parent_frame);
548}
549
550const VALUE *
551rb_vm_proc_local_ep(VALUE proc)
552{
553 const VALUE *ep = vm_proc_ep(proc);
554
555 if (ep) {
556 return rb_vm_ep_local_ep(ep);
557 }
558 else {
559 return NULL;
560 }
561}
562
563// for ractor, defined in vm.c
564VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
565 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
566
567static VALUE
568thread_do_start_proc(rb_thread_t *th)
569{
570 VALUE args = th->invoke_arg.proc.args;
571 const VALUE *args_ptr;
572 int args_len;
573 VALUE procval = th->invoke_arg.proc.proc;
574 rb_proc_t *proc;
575 GetProcPtr(procval, proc);
576
577 th->ec->errinfo = Qnil;
578 th->ec->root_lep = rb_vm_proc_local_ep(procval);
579 th->ec->root_svar = Qfalse;
580
581 vm_check_ints_blocking(th->ec);
582
583 if (th->invoke_type == thread_invoke_type_ractor_proc) {
584 VALUE self = rb_ractor_self(th->ractor);
585 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
586
587 VM_ASSERT(FIXNUM_P(args));
588 args_len = FIX2INT(args);
589 args_ptr = ALLOCA_N(VALUE, args_len);
590 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
591 vm_check_ints_blocking(th->ec);
592
593 return rb_vm_invoke_proc_with_self(
594 th->ec, proc, self,
595 args_len, args_ptr,
596 th->invoke_arg.proc.kw_splat,
597 VM_BLOCK_HANDLER_NONE
598 );
599 }
600 else {
601 args_len = RARRAY_LENINT(args);
602 if (args_len < 8) {
603 /* free proc.args if the length is enough small */
604 args_ptr = ALLOCA_N(VALUE, args_len);
605 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
606 th->invoke_arg.proc.args = Qnil;
607 }
608 else {
609 args_ptr = RARRAY_CONST_PTR(args);
610 }
611
612 vm_check_ints_blocking(th->ec);
613
614 return rb_vm_invoke_proc(
615 th->ec, proc,
616 args_len, args_ptr,
617 th->invoke_arg.proc.kw_splat,
618 VM_BLOCK_HANDLER_NONE
619 );
620 }
621}
622
623static VALUE
624thread_do_start(rb_thread_t *th)
625{
626 native_set_thread_name(th);
627 VALUE result = Qundef;
628
629 switch (th->invoke_type) {
630 case thread_invoke_type_proc:
631 result = thread_do_start_proc(th);
632 break;
633
634 case thread_invoke_type_ractor_proc:
635 result = thread_do_start_proc(th);
636 rb_ractor_atexit(th->ec, result);
637 break;
638
639 case thread_invoke_type_func:
640 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
641 break;
642
643 case thread_invoke_type_none:
644 rb_bug("unreachable");
645 }
646
647 return result;
648}
649
650void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
651
652static int
653thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
654{
655 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
656 VM_ASSERT(th != th->vm->ractor.main_thread);
657
658 enum ruby_tag_type state;
659 VALUE errinfo = Qnil;
660 rb_thread_t *ractor_main_th = th->ractor->threads.main;
661
662 // setup ractor
663 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
664 RB_VM_LOCK();
665 {
666 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
667 rb_ractor_t *r = th->ractor;
668 r->r_stdin = rb_io_prep_stdin();
669 r->r_stdout = rb_io_prep_stdout();
670 r->r_stderr = rb_io_prep_stderr();
671 }
672 RB_VM_UNLOCK();
673 }
674
675 // Ensure that we are not joinable.
676 VM_ASSERT(UNDEF_P(th->value));
677
678 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
679 VALUE result = Qundef;
680
681 EC_PUSH_TAG(th->ec);
682
683 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
684 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
685
686 result = thread_do_start(th);
687 }
688
689 if (!fiber_scheduler_closed) {
690 fiber_scheduler_closed = 1;
692 }
693
694 if (!event_thread_end_hooked) {
695 event_thread_end_hooked = 1;
696 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
697 }
698
699 if (state == TAG_NONE) {
700 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
701 th->value = result;
702 }
703 else {
704 errinfo = th->ec->errinfo;
705
706 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
707 if (!NIL_P(exc)) errinfo = exc;
708
709 if (state == TAG_FATAL) {
710 if (th->invoke_type == thread_invoke_type_ractor_proc) {
711 rb_ractor_atexit(th->ec, Qnil);
712 }
713 /* fatal error within this thread, need to stop whole script */
714 }
715 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
716 if (th->invoke_type == thread_invoke_type_ractor_proc) {
717 rb_ractor_atexit_exception(th->ec);
718 }
719
720 /* exit on main_thread. */
721 }
722 else {
723 if (th->report_on_exception) {
724 VALUE mesg = rb_thread_to_s(th->self);
725 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
726 rb_write_error_str(mesg);
727 rb_ec_error_print(th->ec, errinfo);
728 }
729
730 if (th->invoke_type == thread_invoke_type_ractor_proc) {
731 rb_ractor_atexit_exception(th->ec);
732 }
733
734 if (th->vm->thread_abort_on_exception ||
735 th->abort_on_exception || RTEST(ruby_debug)) {
736 /* exit on main_thread */
737 }
738 else {
739 errinfo = Qnil;
740 }
741 }
742 th->value = Qnil;
743 }
744
745 // The thread is effectively finished and can be joined.
746 VM_ASSERT(!UNDEF_P(th->value));
747
748 rb_threadptr_join_list_wakeup(th);
749 rb_threadptr_unlock_all_locking_mutexes(th);
750
751 if (th->invoke_type == thread_invoke_type_ractor_proc) {
752 rb_thread_terminate_all(th);
753 rb_ractor_teardown(th->ec);
754 }
755
756 th->status = THREAD_KILLED;
757 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
758
759 if (th->vm->ractor.main_thread == th) {
760 ruby_stop(0);
761 }
762
763 if (RB_TYPE_P(errinfo, T_OBJECT)) {
764 /* treat with normal error object */
765 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
766 }
767
768 EC_POP_TAG();
769
770 rb_ec_clear_current_thread_trace_func(th->ec);
771
772 /* locking_mutex must be Qfalse */
773 if (th->locking_mutex != Qfalse) {
774 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
775 (void *)th, th->locking_mutex);
776 }
777
778 if (ractor_main_th->status == THREAD_KILLED &&
779 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
780 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
781 rb_threadptr_interrupt(ractor_main_th);
782 }
783
784 rb_check_deadlock(th->ractor);
785
786 rb_fiber_close(th->ec->fiber_ptr);
787
788 thread_cleanup_func(th, FALSE);
789 VM_ASSERT(th->ec->vm_stack == NULL);
790
791 if (th->invoke_type == thread_invoke_type_ractor_proc) {
792 // after rb_ractor_living_threads_remove()
793 // GC will happen anytime and this ractor can be collected (and destroy GVL).
794 // So gvl_release() should be before it.
795 thread_sched_to_dead(TH_SCHED(th), th);
796 rb_ractor_living_threads_remove(th->ractor, th);
797 }
798 else {
799 rb_ractor_living_threads_remove(th->ractor, th);
800 thread_sched_to_dead(TH_SCHED(th), th);
801 }
802
803 return 0;
804}
807 enum thread_invoke_type type;
808
809 // for normal proc thread
810 VALUE args;
811 VALUE proc;
812
813 // for ractor
814 rb_ractor_t *g;
815
816 // for func
817 VALUE (*fn)(void *);
818};
819
820static void thread_specific_storage_alloc(rb_thread_t *th);
821
822static VALUE
823thread_create_core(VALUE thval, struct thread_create_params *params)
824{
825 rb_execution_context_t *ec = GET_EC();
826 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
827 int err;
828
829 thread_specific_storage_alloc(th);
830
831 if (OBJ_FROZEN(current_th->thgroup)) {
832 rb_raise(rb_eThreadError,
833 "can't start a new thread (frozen ThreadGroup)");
834 }
835
836 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
837
838 switch (params->type) {
839 case thread_invoke_type_proc:
840 th->invoke_type = thread_invoke_type_proc;
841 th->invoke_arg.proc.args = params->args;
842 th->invoke_arg.proc.proc = params->proc;
843 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
844 break;
845
846 case thread_invoke_type_ractor_proc:
847#if RACTOR_CHECK_MODE > 0
848 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
849#endif
850 th->invoke_type = thread_invoke_type_ractor_proc;
851 th->ractor = params->g;
852 th->ractor->threads.main = th;
853 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc, Qnil);
854 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
855 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
856 rb_ractor_send_parameters(ec, params->g, params->args);
857 break;
858
859 case thread_invoke_type_func:
860 th->invoke_type = thread_invoke_type_func;
861 th->invoke_arg.func.func = params->fn;
862 th->invoke_arg.func.arg = (void *)params->args;
863 break;
864
865 default:
866 rb_bug("unreachable");
867 }
868
869 th->priority = current_th->priority;
870 th->thgroup = current_th->thgroup;
871
872 th->pending_interrupt_queue = rb_ary_hidden_new(0);
873 th->pending_interrupt_queue_checked = 0;
874 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
875 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
876
877 rb_native_mutex_initialize(&th->interrupt_lock);
878
879 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
880
881 rb_ractor_living_threads_insert(th->ractor, th);
882
883 /* kick thread */
884 err = native_thread_create(th);
885 if (err) {
886 th->status = THREAD_KILLED;
887 rb_ractor_living_threads_remove(th->ractor, th);
888 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
889 }
890 return thval;
891}
892
893#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
894
895/*
896 * call-seq:
897 * Thread.new { ... } -> thread
898 * Thread.new(*args, &proc) -> thread
899 * Thread.new(*args) { |args| ... } -> thread
900 *
901 * Creates a new thread executing the given block.
902 *
903 * Any +args+ given to ::new will be passed to the block:
904 *
905 * arr = []
906 * a, b, c = 1, 2, 3
907 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
908 * arr #=> [1, 2, 3]
909 *
910 * A ThreadError exception is raised if ::new is called without a block.
911 *
912 * If you're going to subclass Thread, be sure to call super in your
913 * +initialize+ method, otherwise a ThreadError will be raised.
914 */
915static VALUE
916thread_s_new(int argc, VALUE *argv, VALUE klass)
917{
918 rb_thread_t *th;
919 VALUE thread = rb_thread_alloc(klass);
920
921 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
922 rb_raise(rb_eThreadError, "can't alloc thread");
923 }
924
925 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
926 th = rb_thread_ptr(thread);
927 if (!threadptr_initialized(th)) {
928 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
929 klass);
930 }
931 return thread;
932}
933
934/*
935 * call-seq:
936 * Thread.start([args]*) {|args| block } -> thread
937 * Thread.fork([args]*) {|args| block } -> thread
938 *
939 * Basically the same as ::new. However, if class Thread is subclassed, then
940 * calling +start+ in that subclass will not invoke the subclass's
941 * +initialize+ method.
942 */
943
944static VALUE
945thread_start(VALUE klass, VALUE args)
946{
947 struct thread_create_params params = {
948 .type = thread_invoke_type_proc,
949 .args = args,
950 .proc = rb_block_proc(),
951 };
952 return thread_create_core(rb_thread_alloc(klass), &params);
953}
954
955static VALUE
956threadptr_invoke_proc_location(rb_thread_t *th)
957{
958 if (th->invoke_type == thread_invoke_type_proc) {
959 return rb_proc_location(th->invoke_arg.proc.proc);
960 }
961 else {
962 return Qnil;
963 }
964}
965
966/* :nodoc: */
967static VALUE
968thread_initialize(VALUE thread, VALUE args)
969{
970 rb_thread_t *th = rb_thread_ptr(thread);
971
972 if (!rb_block_given_p()) {
973 rb_raise(rb_eThreadError, "must be called with a block");
974 }
975 else if (th->invoke_type != thread_invoke_type_none) {
976 VALUE loc = threadptr_invoke_proc_location(th);
977 if (!NIL_P(loc)) {
978 rb_raise(rb_eThreadError,
979 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
980 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
981 }
982 else {
983 rb_raise(rb_eThreadError, "already initialized thread");
984 }
985 }
986 else {
987 struct thread_create_params params = {
988 .type = thread_invoke_type_proc,
989 .args = args,
990 .proc = rb_block_proc(),
991 };
992 return thread_create_core(thread, &params);
993 }
994}
995
996VALUE
997rb_thread_create(VALUE (*fn)(void *), void *arg)
998{
999 struct thread_create_params params = {
1000 .type = thread_invoke_type_func,
1001 .fn = fn,
1002 .args = (VALUE)arg,
1003 };
1004 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1005}
1006
1007VALUE
1008rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
1009{
1010 struct thread_create_params params = {
1011 .type = thread_invoke_type_ractor_proc,
1012 .g = r,
1013 .args = args,
1014 .proc = proc,
1015 };
1016 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1017}
1018
1020struct join_arg {
1021 struct rb_waiting_list *waiter;
1022 rb_thread_t *target;
1023 VALUE timeout;
1024 rb_hrtime_t *limit;
1025};
1026
1027static VALUE
1028remove_from_join_list(VALUE arg)
1029{
1030 struct join_arg *p = (struct join_arg *)arg;
1031 rb_thread_t *target_thread = p->target;
1032
1033 if (target_thread->status != THREAD_KILLED) {
1034 struct rb_waiting_list **join_list = &target_thread->join_list;
1035
1036 while (*join_list) {
1037 if (*join_list == p->waiter) {
1038 *join_list = (*join_list)->next;
1039 break;
1040 }
1041
1042 join_list = &(*join_list)->next;
1043 }
1044 }
1045
1046 return Qnil;
1047}
1048
1049static int
1050thread_finished(rb_thread_t *th)
1051{
1052 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1053}
1054
1055static VALUE
1056thread_join_sleep(VALUE arg)
1057{
1058 struct join_arg *p = (struct join_arg *)arg;
1059 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1060 rb_hrtime_t end = 0, *limit = p->limit;
1061
1062 if (limit) {
1063 end = rb_hrtime_add(*limit, rb_hrtime_now());
1064 }
1065
1066 while (!thread_finished(target_th)) {
1067 VALUE scheduler = rb_fiber_scheduler_current();
1068
1069 if (!limit) {
1070 if (scheduler != Qnil) {
1071 rb_fiber_scheduler_block(scheduler, target_th->self, Qnil);
1072 }
1073 else {
1074 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1075 }
1076 }
1077 else {
1078 if (hrtime_update_expire(limit, end)) {
1079 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1080 return Qfalse;
1081 }
1082
1083 if (scheduler != Qnil) {
1084 VALUE timeout = rb_float_new(hrtime2double(*limit));
1085 rb_fiber_scheduler_block(scheduler, target_th->self, timeout);
1086 }
1087 else {
1088 th->status = THREAD_STOPPED;
1089 native_sleep(th, limit);
1090 }
1091 }
1092 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1093 th->status = THREAD_RUNNABLE;
1094
1095 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1096 }
1097
1098 return Qtrue;
1099}
1100
1101static VALUE
1102thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1103{
1104 rb_execution_context_t *ec = GET_EC();
1105 rb_thread_t *th = ec->thread_ptr;
1106 rb_fiber_t *fiber = ec->fiber_ptr;
1107
1108 if (th == target_th) {
1109 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1110 }
1111
1112 if (th->ractor->threads.main == target_th) {
1113 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1114 }
1115
1116 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1117
1118 if (target_th->status != THREAD_KILLED) {
1119 struct rb_waiting_list waiter;
1120 waiter.next = target_th->join_list;
1121 waiter.thread = th;
1122 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1123 target_th->join_list = &waiter;
1124
1125 struct join_arg arg;
1126 arg.waiter = &waiter;
1127 arg.target = target_th;
1128 arg.timeout = timeout;
1129 arg.limit = limit;
1130
1131 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1132 return Qnil;
1133 }
1134 }
1135
1136 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1137
1138 if (target_th->ec->errinfo != Qnil) {
1139 VALUE err = target_th->ec->errinfo;
1140
1141 if (FIXNUM_P(err)) {
1142 switch (err) {
1143 case INT2FIX(TAG_FATAL):
1144 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1145
1146 /* OK. killed. */
1147 break;
1148 default:
1149 if (err == RUBY_FATAL_FIBER_KILLED) { // not integer constant so can't be a case expression
1150 // root fiber killed in non-main thread
1151 break;
1152 }
1153 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1154 }
1155 }
1156 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1157 rb_bug("thread_join: THROW_DATA should not reach here.");
1158 }
1159 else {
1160 /* normal exception */
1161 rb_exc_raise(err);
1162 }
1163 }
1164 return target_th->self;
1165}
1166
1167/*
1168 * call-seq:
1169 * thr.join -> thr
1170 * thr.join(limit) -> thr
1171 *
1172 * The calling thread will suspend execution and run this +thr+.
1173 *
1174 * Does not return until +thr+ exits or until the given +limit+ seconds have
1175 * passed.
1176 *
1177 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1178 * returned.
1179 *
1180 * Any threads not joined will be killed when the main program exits.
1181 *
1182 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1183 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1184 * will be processed at this time.
1185 *
1186 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1187 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1188 * x.join # Let thread x finish, thread a will be killed on exit.
1189 * #=> "axyz"
1190 *
1191 * The following example illustrates the +limit+ parameter.
1192 *
1193 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1194 * puts "Waiting" until y.join(0.15)
1195 *
1196 * This will produce:
1197 *
1198 * tick...
1199 * Waiting
1200 * tick...
1201 * Waiting
1202 * tick...
1203 * tick...
1204 */
1205
1206static VALUE
1207thread_join_m(int argc, VALUE *argv, VALUE self)
1208{
1209 VALUE timeout = Qnil;
1210 rb_hrtime_t rel = 0, *limit = 0;
1211
1212 if (rb_check_arity(argc, 0, 1)) {
1213 timeout = argv[0];
1214 }
1215
1216 // Convert the timeout eagerly, so it's always converted and deterministic
1217 /*
1218 * This supports INFINITY and negative values, so we can't use
1219 * rb_time_interval right now...
1220 */
1221 if (NIL_P(timeout)) {
1222 /* unlimited */
1223 }
1224 else if (FIXNUM_P(timeout)) {
1225 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1226 limit = &rel;
1227 }
1228 else {
1229 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1230 }
1231
1232 return thread_join(rb_thread_ptr(self), timeout, limit);
1233}
1234
1235/*
1236 * call-seq:
1237 * thr.value -> obj
1238 *
1239 * Waits for +thr+ to complete, using #join, and returns its value or raises
1240 * the exception which terminated the thread.
1241 *
1242 * a = Thread.new { 2 + 2 }
1243 * a.value #=> 4
1244 *
1245 * b = Thread.new { raise 'something went wrong' }
1246 * b.value #=> RuntimeError: something went wrong
1247 */
1248
1249static VALUE
1250thread_value(VALUE self)
1251{
1252 rb_thread_t *th = rb_thread_ptr(self);
1253 thread_join(th, Qnil, 0);
1254 if (UNDEF_P(th->value)) {
1255 // If the thread is dead because we forked th->value is still Qundef.
1256 return Qnil;
1257 }
1258 return th->value;
1259}
1260
1261/*
1262 * Thread Scheduling
1263 */
1264
1265static void
1266getclockofday(struct timespec *ts)
1267{
1268#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1269 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1270 return;
1271#endif
1272 rb_timespec_now(ts);
1273}
1274
1275/*
1276 * Don't inline this, since library call is already time consuming
1277 * and we don't want "struct timespec" on stack too long for GC
1278 */
1279NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1280rb_hrtime_t
1281rb_hrtime_now(void)
1282{
1283 struct timespec ts;
1284
1285 getclockofday(&ts);
1286 return rb_timespec2hrtime(&ts);
1287}
1288
1289/*
1290 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1291 * being uninitialized, maybe other versions, too.
1292 */
1293COMPILER_WARNING_PUSH
1294#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1295COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1296#endif
1297#ifndef PRIu64
1298#define PRIu64 PRI_64_PREFIX "u"
1299#endif
1300/*
1301 * @end is the absolute time when @ts is set to expire
1302 * Returns true if @end has past
1303 * Updates @ts and returns false otherwise
1304 */
1305static int
1306hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1307{
1308 rb_hrtime_t now = rb_hrtime_now();
1309
1310 if (now > end) return 1;
1311
1312 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1313
1314 *timeout = end - now;
1315 return 0;
1316}
1317COMPILER_WARNING_POP
1318
1319static int
1320sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1321{
1322 enum rb_thread_status prev_status = th->status;
1323 int woke;
1324 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1325
1326 th->status = THREAD_STOPPED;
1327 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1328 while (th->status == THREAD_STOPPED) {
1329 native_sleep(th, &rel);
1330 woke = vm_check_ints_blocking(th->ec);
1331 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1332 break;
1333 if (hrtime_update_expire(&rel, end))
1334 break;
1335 woke = 1;
1336 }
1337 th->status = prev_status;
1338 return woke;
1339}
1340
1341static int
1342sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1343{
1344 enum rb_thread_status prev_status = th->status;
1345 int woke;
1346 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1347
1348 th->status = THREAD_STOPPED;
1349 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1350 while (th->status == THREAD_STOPPED) {
1351 native_sleep(th, &rel);
1352 woke = vm_check_ints_blocking(th->ec);
1353 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1354 break;
1355 if (hrtime_update_expire(&rel, end))
1356 break;
1357 woke = 1;
1358 }
1359 th->status = prev_status;
1360 return woke;
1361}
1362
1363static void
1364sleep_forever(rb_thread_t *th, unsigned int fl)
1365{
1366 enum rb_thread_status prev_status = th->status;
1367 enum rb_thread_status status;
1368 int woke;
1369
1370 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1371 th->status = status;
1372
1373 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1374
1375 while (th->status == status) {
1376 if (fl & SLEEP_DEADLOCKABLE) {
1377 rb_ractor_sleeper_threads_inc(th->ractor);
1378 rb_check_deadlock(th->ractor);
1379 }
1380 {
1381 native_sleep(th, 0);
1382 }
1383 if (fl & SLEEP_DEADLOCKABLE) {
1384 rb_ractor_sleeper_threads_dec(th->ractor);
1385 }
1386 if (fl & SLEEP_ALLOW_SPURIOUS) {
1387 break;
1388 }
1389
1390 woke = vm_check_ints_blocking(th->ec);
1391
1392 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1393 break;
1394 }
1395 }
1396 th->status = prev_status;
1397}
1398
1399void
1401{
1402 RUBY_DEBUG_LOG("forever");
1403 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1404}
1405
1406void
1408{
1409 RUBY_DEBUG_LOG("deadly");
1410 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1411}
1412
1413static void
1414rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1415{
1416 VALUE scheduler = rb_fiber_scheduler_current();
1417 if (scheduler != Qnil) {
1418 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1419 }
1420 else {
1421 RUBY_DEBUG_LOG("...");
1422 if (end) {
1423 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1424 }
1425 else {
1426 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1427 }
1428 }
1429}
1430
1431void
1432rb_thread_wait_for(struct timeval time)
1433{
1434 rb_thread_t *th = GET_THREAD();
1435
1436 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1437}
1438
1439void
1440rb_ec_check_ints(rb_execution_context_t *ec)
1441{
1442 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1443}
1444
1445/*
1446 * CAUTION: This function causes thread switching.
1447 * rb_thread_check_ints() check ruby's interrupts.
1448 * some interrupt needs thread switching/invoke handlers,
1449 * and so on.
1450 */
1451
1452void
1454{
1455 rb_ec_check_ints(GET_EC());
1456}
1457
1458/*
1459 * Hidden API for tcl/tk wrapper.
1460 * There is no guarantee to perpetuate it.
1461 */
1462int
1463rb_thread_check_trap_pending(void)
1464{
1465 return rb_signal_buff_size() != 0;
1466}
1467
1468/* This function can be called in blocking region. */
1471{
1472 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1473}
1474
1475void
1476rb_thread_sleep(int sec)
1477{
1479}
1480
1481static void
1482rb_thread_schedule_limits(uint32_t limits_us)
1483{
1484 if (!rb_thread_alone()) {
1485 rb_thread_t *th = GET_THREAD();
1486 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1487
1488 if (th->running_time_us >= limits_us) {
1489 RUBY_DEBUG_LOG("switch %s", "start");
1490
1491 RB_VM_SAVE_MACHINE_CONTEXT(th);
1492 thread_sched_yield(TH_SCHED(th), th);
1493 rb_ractor_thread_switch(th->ractor, th, true);
1494
1495 RUBY_DEBUG_LOG("switch %s", "done");
1496 }
1497 }
1498}
1499
1500void
1502{
1503 rb_thread_schedule_limits(0);
1504 RUBY_VM_CHECK_INTS(GET_EC());
1505}
1506
1507/* blocking region */
1508
1509static inline int
1510blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1511 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1512{
1513#ifdef RUBY_ASSERT_CRITICAL_SECTION
1514 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1515#endif
1516 VM_ASSERT(th == GET_THREAD());
1517
1518 region->prev_status = th->status;
1519 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1520 th->blocking_region_buffer = region;
1521 th->status = THREAD_STOPPED;
1522 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1523
1524 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1525 return TRUE;
1526 }
1527 else {
1528 return FALSE;
1529 }
1530}
1531
1532static inline void
1533blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1534{
1535 /* entry to ubf_list still permitted at this point, make it impossible: */
1536 unblock_function_clear(th);
1537 /* entry to ubf_list impossible at this point, so unregister is safe: */
1538 unregister_ubf_list(th);
1539
1540 thread_sched_to_running(TH_SCHED(th), th);
1541 rb_ractor_thread_switch(th->ractor, th, false);
1542
1543 th->blocking_region_buffer = 0;
1544 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1545 if (th->status == THREAD_STOPPED) {
1546 th->status = region->prev_status;
1547 }
1548
1549 RUBY_DEBUG_LOG("end");
1550
1551#ifndef _WIN32
1552 // GET_THREAD() clears WSAGetLastError()
1553 VM_ASSERT(th == GET_THREAD());
1554#endif
1555}
1556
1557/*
1558 * Resolve sentinel unblock function values to their actual function pointers
1559 * and appropriate data2 values. This centralizes the logic for handling
1560 * RUBY_UBF_IO and RUBY_UBF_PROCESS sentinel values.
1561 *
1562 * @param unblock_function Pointer to unblock function pointer (modified in place)
1563 * @param data2 Pointer to data2 pointer (modified in place)
1564 * @param thread Thread context for resolving data2 when needed
1565 * @return true if sentinel values were resolved, false otherwise
1566 */
1567bool
1568rb_thread_resolve_unblock_function(rb_unblock_function_t **unblock_function, void **data2, struct rb_thread_struct *thread)
1569{
1570 rb_unblock_function_t *ubf = *unblock_function;
1571
1572 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1573 *unblock_function = ubf_select;
1574 *data2 = thread;
1575 return true;
1576 }
1577 return false;
1578}
1579
1580void *
1581rb_nogvl(void *(*func)(void *), void *data1,
1582 rb_unblock_function_t *ubf, void *data2,
1583 int flags)
1584{
1585 if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1586 VALUE scheduler = rb_fiber_scheduler_current();
1587 if (scheduler != Qnil) {
1589
1590 VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1591
1592 if (!UNDEF_P(result)) {
1593 rb_errno_set(state.saved_errno);
1594 return state.result;
1595 }
1596 }
1597 }
1598
1599 void *val = 0;
1600 rb_execution_context_t *ec = GET_EC();
1601 rb_thread_t *th = rb_ec_thread_ptr(ec);
1602 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1603 bool is_main_thread = vm->ractor.main_thread == th;
1604 int saved_errno = 0;
1605
1606 rb_thread_resolve_unblock_function(&ubf, &data2, th);
1607
1608 if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1609 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1610 vm->ubf_async_safe = 1;
1611 }
1612 }
1613
1614 rb_vm_t *volatile saved_vm = vm;
1615 BLOCKING_REGION(th, {
1616 val = func(data1);
1617 saved_errno = rb_errno();
1618 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1619 vm = saved_vm;
1620
1621 if (is_main_thread) vm->ubf_async_safe = 0;
1622
1623 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1624 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1625 }
1626
1627 rb_errno_set(saved_errno);
1628
1629 return val;
1630}
1631
1632/*
1633 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1634 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1635 * without interrupt process.
1636 *
1637 * rb_thread_call_without_gvl() does:
1638 * (1) Check interrupts.
1639 * (2) release GVL.
1640 * Other Ruby threads may run in parallel.
1641 * (3) call func with data1
1642 * (4) acquire GVL.
1643 * Other Ruby threads can not run in parallel any more.
1644 * (5) Check interrupts.
1645 *
1646 * rb_thread_call_without_gvl2() does:
1647 * (1) Check interrupt and return if interrupted.
1648 * (2) release GVL.
1649 * (3) call func with data1 and a pointer to the flags.
1650 * (4) acquire GVL.
1651 *
1652 * If another thread interrupts this thread (Thread#kill, signal delivery,
1653 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1654 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1655 * toggling a cancellation flag, canceling the invocation of a call inside
1656 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1657 *
1658 * There are built-in ubfs and you can specify these ubfs:
1659 *
1660 * * RUBY_UBF_IO: ubf for IO operation
1661 * * RUBY_UBF_PROCESS: ubf for process operation
1662 *
1663 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1664 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1665 * provide proper ubf(), your program will not stop for Control+C or other
1666 * shutdown events.
1667 *
1668 * "Check interrupts" on above list means checking asynchronous
1669 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1670 * request, and so on) and calling corresponding procedures
1671 * (such as `trap' for signals, raise an exception for Thread#raise).
1672 * If `func()' finished and received interrupts, you may skip interrupt
1673 * checking. For example, assume the following func() it reads data from file.
1674 *
1675 * read_func(...) {
1676 * // (a) before read
1677 * read(buffer); // (b) reading
1678 * // (c) after read
1679 * }
1680 *
1681 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1682 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1683 * at (c), after *read* operation is completed, checking interrupts is harmful
1684 * because it causes irrevocable side-effect, the read data will vanish. To
1685 * avoid such problem, the `read_func()' should be used with
1686 * `rb_thread_call_without_gvl2()'.
1687 *
1688 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1689 * immediately. This function does not show when the execution was interrupted.
1690 * For example, there are 4 possible timing (a), (b), (c) and before calling
1691 * read_func(). You need to record progress of a read_func() and check
1692 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1693 * `rb_thread_check_ints()' correctly or your program can not process proper
1694 * process such as `trap' and so on.
1695 *
1696 * NOTE: You can not execute most of Ruby C API and touch Ruby
1697 * objects in `func()' and `ubf()', including raising an
1698 * exception, because current thread doesn't acquire GVL
1699 * (it causes synchronization problems). If you need to
1700 * call ruby functions either use rb_thread_call_with_gvl()
1701 * or read source code of C APIs and confirm safety by
1702 * yourself.
1703 *
1704 * NOTE: In short, this API is difficult to use safely. I recommend you
1705 * use other ways if you have. We lack experiences to use this API.
1706 * Please report your problem related on it.
1707 *
1708 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1709 * for a short running `func()'. Be sure to benchmark and use this
1710 * mechanism when `func()' consumes enough time.
1711 *
1712 * Safe C API:
1713 * * rb_thread_interrupted() - check interrupt flag
1714 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1715 * they will work without GVL, and may acquire GVL when GC is needed.
1716 */
1717void *
1718rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1719 rb_unblock_function_t *ubf, void *data2)
1720{
1721 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1722}
1723
1724void *
1725rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1726 rb_unblock_function_t *ubf, void *data2)
1727{
1728 return rb_nogvl(func, data1, ubf, data2, 0);
1729}
1730
1731static int
1732waitfd_to_waiting_flag(int wfd_event)
1733{
1734 return wfd_event << 1;
1735}
1736
1737static struct ccan_list_head *
1738rb_io_blocking_operations(struct rb_io *io)
1739{
1740 rb_serial_t fork_generation = GET_VM()->fork_gen;
1741
1742 // On fork, all existing entries in this list (which are stack allocated) become invalid.
1743 // Therefore, we re-initialize the list which clears it.
1744 if (io->fork_generation != fork_generation) {
1745 ccan_list_head_init(&io->blocking_operations);
1746 io->fork_generation = fork_generation;
1747 }
1748
1749 return &io->blocking_operations;
1750}
1751
1752/*
1753 * Registers a blocking operation for an IO object. This is used to track all threads and fibers
1754 * that are currently blocked on this IO for reading, writing or other operations.
1755 *
1756 * When the IO is closed, all blocking operations will be notified via rb_fiber_scheduler_fiber_interrupt
1757 * for fibers with a scheduler, or via rb_threadptr_interrupt for threads without a scheduler.
1758 *
1759 * @parameter io The IO object on which the operation will block
1760 * @parameter blocking_operation The operation details including the execution context that will be blocked
1761 */
1762static void
1763rb_io_blocking_operation_enter(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1764{
1765 ccan_list_add(rb_io_blocking_operations(io), &blocking_operation->list);
1766}
1767
1768static void
1769rb_io_blocking_operation_pop(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1770{
1771 ccan_list_del(&blocking_operation->list);
1772}
1775 struct rb_io *io;
1776 struct rb_io_blocking_operation *blocking_operation;
1777};
1778
1779static VALUE
1780io_blocking_operation_exit(VALUE _arguments)
1781{
1782 struct io_blocking_operation_arguments *arguments = (void*)_arguments;
1783 struct rb_io_blocking_operation *blocking_operation = arguments->blocking_operation;
1784
1785 rb_io_blocking_operation_pop(arguments->io, blocking_operation);
1786
1787 rb_io_t *io = arguments->io;
1788 rb_thread_t *thread = io->closing_ec->thread_ptr;
1789 rb_fiber_t *fiber = io->closing_ec->fiber_ptr;
1790
1791 if (thread->scheduler != Qnil) {
1792 // This can cause spurious wakeups...
1793 rb_fiber_scheduler_unblock(thread->scheduler, io->self, rb_fiberptr_self(fiber));
1794 }
1795 else {
1796 rb_thread_wakeup(thread->self);
1797 }
1798
1799 return Qnil;
1800}
1801
1802/*
1803 * Called when a blocking operation completes or is interrupted. Removes the operation from
1804 * the IO's blocking_operations list and wakes up any waiting threads/fibers.
1805 *
1806 * If there's a wakeup_mutex (meaning an IO close is in progress), synchronizes the cleanup
1807 * through that mutex to ensure proper coordination with the closing thread.
1808 *
1809 * @parameter io The IO object the operation was performed on
1810 * @parameter blocking_operation The completed operation to clean up
1811 */
1812static void
1813rb_io_blocking_operation_exit(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1814{
1815 VALUE wakeup_mutex = io->wakeup_mutex;
1816
1817 // Indicate that the blocking operation is no longer active:
1818 blocking_operation->ec = NULL;
1819
1820 if (RB_TEST(wakeup_mutex)) {
1821 struct io_blocking_operation_arguments arguments = {
1822 .io = io,
1823 .blocking_operation = blocking_operation
1824 };
1825
1826 rb_mutex_synchronize(wakeup_mutex, io_blocking_operation_exit, (VALUE)&arguments);
1827 }
1828 else {
1829 // If there's no wakeup_mutex, we can safely remove the operation directly:
1830 rb_io_blocking_operation_pop(io, blocking_operation);
1831 }
1832}
1833
1834static VALUE
1835rb_thread_io_blocking_operation_ensure(VALUE _argument)
1836{
1837 struct io_blocking_operation_arguments *arguments = (void*)_argument;
1838
1839 rb_io_blocking_operation_exit(arguments->io, arguments->blocking_operation);
1840
1841 return Qnil;
1842}
1843
1844/*
1845 * Executes a function that performs a blocking IO operation, while properly tracking
1846 * the operation in the IO's blocking_operations list. This ensures proper cleanup
1847 * and interruption handling if the IO is closed while blocked.
1848 *
1849 * The operation is automatically removed from the blocking_operations list when the function
1850 * returns, whether normally or due to an exception.
1851 *
1852 * @parameter self The IO object
1853 * @parameter function The function to execute that will perform the blocking operation
1854 * @parameter argument The argument to pass to the function
1855 * @returns The result of the blocking operation function
1856 */
1857VALUE
1858rb_thread_io_blocking_operation(VALUE self, VALUE(*function)(VALUE), VALUE argument)
1859{
1860 struct rb_io *io;
1861 RB_IO_POINTER(self, io);
1862
1863 rb_execution_context_t *ec = GET_EC();
1864 struct rb_io_blocking_operation blocking_operation = {
1865 .ec = ec,
1866 };
1867 rb_io_blocking_operation_enter(io, &blocking_operation);
1868
1870 .io = io,
1871 .blocking_operation = &blocking_operation
1872 };
1873
1874 return rb_ensure(function, argument, rb_thread_io_blocking_operation_ensure, (VALUE)&io_blocking_operation_arguments);
1875}
1876
1877static bool
1878thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1879{
1880#if defined(USE_MN_THREADS) && USE_MN_THREADS
1881 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1882#else
1883 return false;
1884#endif
1885}
1886
1887// true if need retry
1888static bool
1889thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1890{
1891#if defined(USE_MN_THREADS) && USE_MN_THREADS
1892 if (thread_io_mn_schedulable(th, events, timeout)) {
1893 rb_hrtime_t rel, *prel;
1894
1895 if (timeout) {
1896 rel = rb_timeval2hrtime(timeout);
1897 prel = &rel;
1898 }
1899 else {
1900 prel = NULL;
1901 }
1902
1903 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1904
1905 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1906 // timeout
1907 return false;
1908 }
1909 else {
1910 return true;
1911 }
1912 }
1913#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1914 return false;
1915}
1916
1917// assume read/write
1918static bool
1919blocking_call_retryable_p(int r, int eno)
1920{
1921 if (r != -1) return false;
1922
1923 switch (eno) {
1924 case EAGAIN:
1925#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1926 case EWOULDBLOCK:
1927#endif
1928 return true;
1929 default:
1930 return false;
1931 }
1932}
1933
1934bool
1935rb_thread_mn_schedulable(VALUE thval)
1936{
1937 rb_thread_t *th = rb_thread_ptr(thval);
1938 return th->mn_schedulable;
1939}
1940
1941VALUE
1942rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void *data1, int events)
1943{
1944 rb_execution_context_t * volatile ec = GET_EC();
1945 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
1946
1947 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), io->fd, events);
1948
1949 volatile VALUE val = Qundef; /* shouldn't be used */
1950 volatile int saved_errno = 0;
1951 enum ruby_tag_type state;
1952 volatile bool prev_mn_schedulable = th->mn_schedulable;
1953 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1954
1955 int fd = io->fd;
1956
1957 // `errno` is only valid when there is an actual error - but we can't
1958 // extract that from the return value of `func` alone, so we clear any
1959 // prior `errno` value here so that we can later check if it was set by
1960 // `func` or not (as opposed to some previously set value).
1961 errno = 0;
1962
1963 struct rb_io_blocking_operation blocking_operation = {
1964 .ec = ec,
1965 };
1966 rb_io_blocking_operation_enter(io, &blocking_operation);
1967
1968 {
1969 EC_PUSH_TAG(ec);
1970 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1971 volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1972 retry:
1973 BLOCKING_REGION(th, {
1974 val = func(data1);
1975 saved_errno = errno;
1976 }, ubf_select, th, FALSE);
1977
1978 RUBY_ASSERT(th == rb_ec_thread_ptr(ec));
1979 if (events &&
1980 blocking_call_retryable_p((int)val, saved_errno) &&
1981 thread_io_wait_events(th, fd, events, NULL)) {
1982 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1983 goto retry;
1984 }
1985
1986 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1987
1988 state = saved_state;
1989 }
1990 EC_POP_TAG();
1991
1992 th = rb_ec_thread_ptr(ec);
1993 th->mn_schedulable = prev_mn_schedulable;
1994 }
1995
1996 rb_io_blocking_operation_exit(io, &blocking_operation);
1997
1998 if (state) {
1999 EC_JUMP_TAG(ec, state);
2000 }
2001
2002 // If the error was a timeout, we raise a specific exception for that:
2003 if (saved_errno == ETIMEDOUT) {
2004 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
2005 }
2006
2007 errno = saved_errno;
2008
2009 return val;
2010}
2011
2012VALUE
2013rb_thread_io_blocking_region(struct rb_io *io, rb_blocking_function_t *func, void *data1)
2014{
2015 return rb_thread_io_blocking_call(io, func, data1, 0);
2016}
2017
2018/*
2019 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
2020 *
2021 * After releasing GVL using
2022 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
2023 * methods. If you need to access Ruby you must use this function
2024 * rb_thread_call_with_gvl().
2025 *
2026 * This function rb_thread_call_with_gvl() does:
2027 * (1) acquire GVL.
2028 * (2) call passed function `func'.
2029 * (3) release GVL.
2030 * (4) return a value which is returned at (2).
2031 *
2032 * NOTE: You should not return Ruby object at (2) because such Object
2033 * will not be marked.
2034 *
2035 * NOTE: If an exception is raised in `func', this function DOES NOT
2036 * protect (catch) the exception. If you have any resources
2037 * which should free before throwing exception, you need use
2038 * rb_protect() in `func' and return a value which represents
2039 * exception was raised.
2040 *
2041 * NOTE: This function should not be called by a thread which was not
2042 * created as Ruby thread (created by Thread.new or so). In other
2043 * words, this function *DOES NOT* associate or convert a NON-Ruby
2044 * thread to a Ruby thread.
2045 */
2046void *
2047rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
2048{
2049 rb_thread_t *th = ruby_thread_from_native();
2050 struct rb_blocking_region_buffer *brb;
2051 struct rb_unblock_callback prev_unblock;
2052 void *r;
2053
2054 if (th == 0) {
2055 /* Error has occurred, but we can't use rb_bug()
2056 * because this thread is not Ruby's thread.
2057 * What should we do?
2058 */
2059 bp();
2060 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
2061 exit(EXIT_FAILURE);
2062 }
2063
2064 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
2065 prev_unblock = th->unblock;
2066
2067 if (brb == 0) {
2068 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
2069 }
2070
2071 blocking_region_end(th, brb);
2072 /* enter to Ruby world: You can access Ruby values, methods and so on. */
2073 r = (*func)(data1);
2074 /* leave from Ruby world: You can not access Ruby values, etc. */
2075 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
2076 RUBY_ASSERT_ALWAYS(released);
2077 RB_VM_SAVE_MACHINE_CONTEXT(th);
2078 thread_sched_to_waiting(TH_SCHED(th), th);
2079 return r;
2080}
2081
2082/*
2083 * ruby_thread_has_gvl_p - check if current native thread has GVL.
2084 */
2085
2087ruby_thread_has_gvl_p(void)
2088{
2089 rb_thread_t *th = ruby_thread_from_native();
2090
2091 if (th && th->blocking_region_buffer == 0) {
2092 return 1;
2093 }
2094 else {
2095 return 0;
2096 }
2097}
2098
2099/*
2100 * call-seq:
2101 * Thread.pass -> nil
2102 *
2103 * Give the thread scheduler a hint to pass execution to another thread.
2104 * A running thread may or may not switch, it depends on OS and processor.
2105 */
2106
2107static VALUE
2108thread_s_pass(VALUE klass)
2109{
2111 return Qnil;
2112}
2113
2114/*****************************************************/
2115
2116/*
2117 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
2118 *
2119 * Async events such as an exception thrown by Thread#raise,
2120 * Thread#kill and thread termination (after main thread termination)
2121 * will be queued to th->pending_interrupt_queue.
2122 * - clear: clear the queue.
2123 * - enque: enqueue err object into queue.
2124 * - deque: dequeue err object from queue.
2125 * - active_p: return 1 if the queue should be checked.
2126 *
2127 * All rb_threadptr_pending_interrupt_* functions are called by
2128 * a GVL acquired thread, of course.
2129 * Note that all "rb_" prefix APIs need GVL to call.
2130 */
2131
2132void
2133rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
2134{
2135 rb_ary_clear(th->pending_interrupt_queue);
2136}
2137
2138void
2139rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
2140{
2141 rb_ary_push(th->pending_interrupt_queue, v);
2142 th->pending_interrupt_queue_checked = 0;
2143}
2144
2145static void
2146threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2147{
2148 if (!th->pending_interrupt_queue) {
2149 rb_raise(rb_eThreadError, "uninitialized thread");
2150 }
2151}
2152
2153enum handle_interrupt_timing {
2154 INTERRUPT_NONE,
2155 INTERRUPT_IMMEDIATE,
2156 INTERRUPT_ON_BLOCKING,
2157 INTERRUPT_NEVER
2158};
2159
2160static enum handle_interrupt_timing
2161rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2162{
2163 if (sym == sym_immediate) {
2164 return INTERRUPT_IMMEDIATE;
2165 }
2166 else if (sym == sym_on_blocking) {
2167 return INTERRUPT_ON_BLOCKING;
2168 }
2169 else if (sym == sym_never) {
2170 return INTERRUPT_NEVER;
2171 }
2172 else {
2173 rb_raise(rb_eThreadError, "unknown mask signature");
2174 }
2175}
2176
2177static enum handle_interrupt_timing
2178rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2179{
2180 VALUE mask;
2181 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2182 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2183 VALUE mod;
2184 long i;
2185
2186 for (i=0; i<mask_stack_len; i++) {
2187 mask = mask_stack[mask_stack_len-(i+1)];
2188
2189 if (SYMBOL_P(mask)) {
2190 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2191 if (err != rb_cInteger) {
2192 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2193 }
2194 else {
2195 continue;
2196 }
2197 }
2198
2199 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2200 VALUE klass = mod;
2201 VALUE sym;
2202
2203 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2204 klass = RBASIC(mod)->klass;
2205 }
2206 else if (mod != RCLASS_ORIGIN(mod)) {
2207 continue;
2208 }
2209
2210 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2211 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2212 }
2213 }
2214 /* try to next mask */
2215 }
2216 return INTERRUPT_NONE;
2217}
2218
2219static int
2220rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2221{
2222 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2223}
2224
2225static int
2226rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2227{
2228 int i;
2229 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2230 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2231 if (rb_obj_is_kind_of(e, err)) {
2232 return TRUE;
2233 }
2234 }
2235 return FALSE;
2236}
2237
2238static VALUE
2239rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2240{
2241#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2242 int i;
2243
2244 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2245 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2246
2247 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2248
2249 switch (mask_timing) {
2250 case INTERRUPT_ON_BLOCKING:
2251 if (timing != INTERRUPT_ON_BLOCKING) {
2252 break;
2253 }
2254 /* fall through */
2255 case INTERRUPT_NONE: /* default: IMMEDIATE */
2256 case INTERRUPT_IMMEDIATE:
2257 rb_ary_delete_at(th->pending_interrupt_queue, i);
2258 return err;
2259 case INTERRUPT_NEVER:
2260 break;
2261 }
2262 }
2263
2264 th->pending_interrupt_queue_checked = 1;
2265 return Qundef;
2266#else
2267 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2268 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2269 th->pending_interrupt_queue_checked = 1;
2270 }
2271 return err;
2272#endif
2273}
2274
2275static int
2276threadptr_pending_interrupt_active_p(rb_thread_t *th)
2277{
2278 /*
2279 * For optimization, we don't check async errinfo queue
2280 * if the queue and the thread interrupt mask were not changed
2281 * since last check.
2282 */
2283 if (th->pending_interrupt_queue_checked) {
2284 return 0;
2285 }
2286
2287 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2288 return 0;
2289 }
2290
2291 return 1;
2292}
2293
2294static int
2295handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2296{
2297 VALUE *maskp = (VALUE *)args;
2298
2299 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2300 rb_raise(rb_eArgError, "unknown mask signature");
2301 }
2302
2303 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2304 *maskp = val;
2305 return ST_CONTINUE;
2306 }
2307
2308 if (RTEST(*maskp)) {
2309 if (!RB_TYPE_P(*maskp, T_HASH)) {
2310 VALUE prev = *maskp;
2311 *maskp = rb_ident_hash_new();
2312 if (SYMBOL_P(prev)) {
2313 rb_hash_aset(*maskp, rb_eException, prev);
2314 }
2315 }
2316 rb_hash_aset(*maskp, key, val);
2317 }
2318 else {
2319 *maskp = Qfalse;
2320 }
2321
2322 return ST_CONTINUE;
2323}
2324
2325/*
2326 * call-seq:
2327 * Thread.handle_interrupt(hash) { ... } -> result of the block
2328 *
2329 * Changes asynchronous interrupt timing.
2330 *
2331 * _interrupt_ means asynchronous event and corresponding procedure
2332 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2333 * and main thread termination (if main thread terminates, then all
2334 * other thread will be killed).
2335 *
2336 * The given +hash+ has pairs like <code>ExceptionClass =>
2337 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2338 * the given block. The TimingSymbol can be one of the following symbols:
2339 *
2340 * [+:immediate+] Invoke interrupts immediately.
2341 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2342 * [+:never+] Never invoke all interrupts.
2343 *
2344 * _BlockingOperation_ means that the operation will block the calling thread,
2345 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2346 * operation executed without GVL.
2347 *
2348 * Masked asynchronous interrupts are delayed until they are enabled.
2349 * This method is similar to sigprocmask(3).
2350 *
2351 * === NOTE
2352 *
2353 * Asynchronous interrupts are difficult to use.
2354 *
2355 * If you need to communicate between threads, please consider to use another way such as Queue.
2356 *
2357 * Or use them with deep understanding about this method.
2358 *
2359 * === Usage
2360 *
2361 * In this example, we can guard from Thread#raise exceptions.
2362 *
2363 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2364 * ignored in the first block of the main thread. In the second
2365 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2366 *
2367 * th = Thread.new do
2368 * Thread.handle_interrupt(RuntimeError => :never) {
2369 * begin
2370 * # You can write resource allocation code safely.
2371 * Thread.handle_interrupt(RuntimeError => :immediate) {
2372 * # ...
2373 * }
2374 * ensure
2375 * # You can write resource deallocation code safely.
2376 * end
2377 * }
2378 * end
2379 * Thread.pass
2380 * # ...
2381 * th.raise "stop"
2382 *
2383 * While we are ignoring the RuntimeError exception, it's safe to write our
2384 * resource allocation code. Then, the ensure block is where we can safely
2385 * deallocate your resources.
2386 *
2387 * ==== Stack control settings
2388 *
2389 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2390 * to control more than one ExceptionClass and TimingSymbol at a time.
2391 *
2392 * Thread.handle_interrupt(FooError => :never) {
2393 * Thread.handle_interrupt(BarError => :never) {
2394 * # FooError and BarError are prohibited.
2395 * }
2396 * }
2397 *
2398 * ==== Inheritance with ExceptionClass
2399 *
2400 * All exceptions inherited from the ExceptionClass parameter will be considered.
2401 *
2402 * Thread.handle_interrupt(Exception => :never) {
2403 * # all exceptions inherited from Exception are prohibited.
2404 * }
2405 *
2406 * For handling all interrupts, use +Object+ and not +Exception+
2407 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2408 */
2409static VALUE
2410rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2411{
2412 VALUE mask = Qundef;
2413 rb_execution_context_t * volatile ec = GET_EC();
2414 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2415 volatile VALUE r = Qnil;
2416 enum ruby_tag_type state;
2417
2418 if (!rb_block_given_p()) {
2419 rb_raise(rb_eArgError, "block is needed.");
2420 }
2421
2422 mask_arg = rb_to_hash_type(mask_arg);
2423
2424 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2425 mask = Qnil;
2426 }
2427
2428 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2429
2430 if (UNDEF_P(mask)) {
2431 return rb_yield(Qnil);
2432 }
2433
2434 if (!RTEST(mask)) {
2435 mask = mask_arg;
2436 }
2437 else if (RB_TYPE_P(mask, T_HASH)) {
2438 OBJ_FREEZE(mask);
2439 }
2440
2441 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2442 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2443 th->pending_interrupt_queue_checked = 0;
2444 RUBY_VM_SET_INTERRUPT(th->ec);
2445 }
2446
2447 EC_PUSH_TAG(th->ec);
2448 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2449 r = rb_yield(Qnil);
2450 }
2451 EC_POP_TAG();
2452
2453 rb_ary_pop(th->pending_interrupt_mask_stack);
2454 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2455 th->pending_interrupt_queue_checked = 0;
2456 RUBY_VM_SET_INTERRUPT(th->ec);
2457 }
2458
2459 RUBY_VM_CHECK_INTS(th->ec);
2460
2461 if (state) {
2462 EC_JUMP_TAG(th->ec, state);
2463 }
2464
2465 return r;
2466}
2467
2468/*
2469 * call-seq:
2470 * target_thread.pending_interrupt?(error = nil) -> true/false
2471 *
2472 * Returns whether or not the asynchronous queue is empty for the target thread.
2473 *
2474 * If +error+ is given, then check only for +error+ type deferred events.
2475 *
2476 * See ::pending_interrupt? for more information.
2477 */
2478static VALUE
2479rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2480{
2481 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2482
2483 if (!target_th->pending_interrupt_queue) {
2484 return Qfalse;
2485 }
2486 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2487 return Qfalse;
2488 }
2489 if (rb_check_arity(argc, 0, 1)) {
2490 VALUE err = argv[0];
2491 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2492 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2493 }
2494 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2495 }
2496 else {
2497 return Qtrue;
2498 }
2499}
2500
2501/*
2502 * call-seq:
2503 * Thread.pending_interrupt?(error = nil) -> true/false
2504 *
2505 * Returns whether or not the asynchronous queue is empty.
2506 *
2507 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2508 * this method can be used to determine if there are any deferred events.
2509 *
2510 * If you find this method returns true, then you may finish +:never+ blocks.
2511 *
2512 * For example, the following method processes deferred asynchronous events
2513 * immediately.
2514 *
2515 * def Thread.kick_interrupt_immediately
2516 * Thread.handle_interrupt(Object => :immediate) {
2517 * Thread.pass
2518 * }
2519 * end
2520 *
2521 * If +error+ is given, then check only for +error+ type deferred events.
2522 *
2523 * === Usage
2524 *
2525 * th = Thread.new{
2526 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2527 * while true
2528 * ...
2529 * # reach safe point to invoke interrupt
2530 * if Thread.pending_interrupt?
2531 * Thread.handle_interrupt(Object => :immediate){}
2532 * end
2533 * ...
2534 * end
2535 * }
2536 * }
2537 * ...
2538 * th.raise # stop thread
2539 *
2540 * This example can also be written as the following, which you should use to
2541 * avoid asynchronous interrupts.
2542 *
2543 * flag = true
2544 * th = Thread.new{
2545 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2546 * while true
2547 * ...
2548 * # reach safe point to invoke interrupt
2549 * break if flag == false
2550 * ...
2551 * end
2552 * }
2553 * }
2554 * ...
2555 * flag = false # stop thread
2556 */
2557
2558static VALUE
2559rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2560{
2561 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2562}
2563
2564NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2565
2566static void
2567rb_threadptr_to_kill(rb_thread_t *th)
2568{
2569 VM_ASSERT(GET_THREAD() == th);
2570 rb_threadptr_pending_interrupt_clear(th);
2571 th->status = THREAD_RUNNABLE;
2572 th->to_kill = 1;
2573 th->ec->errinfo = INT2FIX(TAG_FATAL);
2574 EC_JUMP_TAG(th->ec, TAG_FATAL);
2575}
2576
2577static inline rb_atomic_t
2578threadptr_get_interrupts(rb_thread_t *th)
2579{
2580 rb_execution_context_t *ec = th->ec;
2581 rb_atomic_t interrupt;
2582 rb_atomic_t old;
2583
2584 old = ATOMIC_LOAD_RELAXED(ec->interrupt_flag);
2585 do {
2586 interrupt = old;
2587 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2588 } while (old != interrupt);
2589 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2590}
2591
2592static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2593
2594// Execute interrupts on currently running thread
2595// In certain situations, calling this function will raise an exception. Some examples are:
2596// * during VM shutdown (`rb_ractor_terminate_all`)
2597// * Call to Thread#exit for current thread (`rb_thread_kill`)
2598// * Call to Thread#raise for current thread
2599int
2600rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2601{
2602 rb_atomic_t interrupt;
2603 int postponed_job_interrupt = 0;
2604 int ret = FALSE;
2605
2606 VM_ASSERT(GET_THREAD() == th);
2607
2608 if (th->ec->raised_flag) return ret;
2609
2610 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2611 int sig;
2612 int timer_interrupt;
2613 int pending_interrupt;
2614 int trap_interrupt;
2615 int terminate_interrupt;
2616
2617 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2618 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2619 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2620 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2621 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2622
2623 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2624 RB_VM_LOCKING();
2625 }
2626
2627 if (postponed_job_interrupt) {
2628 rb_postponed_job_flush(th->vm);
2629 }
2630
2631 if (trap_interrupt) {
2632 /* signal handling */
2633 if (th == th->vm->ractor.main_thread) {
2634 enum rb_thread_status prev_status = th->status;
2635
2636 th->status = THREAD_RUNNABLE;
2637 {
2638 while ((sig = rb_get_next_signal()) != 0) {
2639 ret |= rb_signal_exec(th, sig);
2640 }
2641 }
2642 th->status = prev_status;
2643 }
2644
2645 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2646 enum rb_thread_status prev_status = th->status;
2647
2648 th->status = THREAD_RUNNABLE;
2649 {
2650 threadptr_interrupt_exec_exec(th);
2651 }
2652 th->status = prev_status;
2653 }
2654 }
2655
2656 /* exception from another thread */
2657 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2658 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2659 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2660 ret = TRUE;
2661
2662 if (UNDEF_P(err)) {
2663 /* no error */
2664 }
2665 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2666 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2667 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2668 terminate_interrupt = 1;
2669 }
2670 else {
2671 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2672 /* the only special exception to be queued across thread */
2673 err = ruby_vm_special_exception_copy(err);
2674 }
2675 /* set runnable if th was slept. */
2676 if (th->status == THREAD_STOPPED ||
2677 th->status == THREAD_STOPPED_FOREVER)
2678 th->status = THREAD_RUNNABLE;
2679 rb_exc_raise(err);
2680 }
2681 }
2682
2683 if (terminate_interrupt) {
2684 rb_threadptr_to_kill(th);
2685 }
2686
2687 if (timer_interrupt) {
2688 uint32_t limits_us = thread_default_quantum_ms * 1000;
2689
2690 if (th->priority > 0)
2691 limits_us <<= th->priority;
2692 else
2693 limits_us >>= -th->priority;
2694
2695 if (th->status == THREAD_RUNNABLE)
2696 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2697
2698 VM_ASSERT(th->ec->cfp);
2699 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2700 0, 0, 0, Qundef);
2701
2702 rb_thread_schedule_limits(limits_us);
2703 }
2704 }
2705 return ret;
2706}
2707
2708void
2709rb_thread_execute_interrupts(VALUE thval)
2710{
2711 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2712}
2713
2714static void
2715rb_threadptr_ready(rb_thread_t *th)
2716{
2717 rb_threadptr_interrupt(th);
2718}
2719
2720static VALUE
2721rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2722{
2723 if (rb_threadptr_dead(target_th)) {
2724 return Qnil;
2725 }
2726
2727 VALUE exception = rb_exception_setup(argc, argv);
2728
2729 /* making an exception object can switch thread,
2730 so we need to check thread deadness again */
2731 if (rb_threadptr_dead(target_th)) {
2732 return Qnil;
2733 }
2734
2735 rb_threadptr_pending_interrupt_enque(target_th, exception);
2736 rb_threadptr_interrupt(target_th);
2737
2738 return Qnil;
2739}
2740
2741void
2742rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2743{
2744 VALUE argv[2];
2745
2746 argv[0] = rb_eSignal;
2747 argv[1] = INT2FIX(sig);
2748 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2749}
2750
2751void
2752rb_threadptr_signal_exit(rb_thread_t *th)
2753{
2754 VALUE argv[2];
2755
2756 argv[0] = rb_eSystemExit;
2757 argv[1] = rb_str_new2("exit");
2758
2759 // TODO: check signal raise deliverly
2760 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2761}
2762
2763int
2764rb_ec_set_raised(rb_execution_context_t *ec)
2765{
2766 if (ec->raised_flag & RAISED_EXCEPTION) {
2767 return 1;
2768 }
2769 ec->raised_flag |= RAISED_EXCEPTION;
2770 return 0;
2771}
2772
2773int
2774rb_ec_reset_raised(rb_execution_context_t *ec)
2775{
2776 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2777 return 0;
2778 }
2779 ec->raised_flag &= ~RAISED_EXCEPTION;
2780 return 1;
2781}
2782
2783/*
2784 * Thread-safe IO closing mechanism.
2785 *
2786 * When an IO is closed while other threads or fibers are blocked on it, we need to:
2787 * 1. Track and notify all blocking operations through io->blocking_operations
2788 * 2. Ensure only one thread can close at a time using io->closing_ec
2789 * 3. Synchronize cleanup using wakeup_mutex
2790 *
2791 * The close process works as follows:
2792 * - First check if any thread is already closing (io->closing_ec)
2793 * - Set up wakeup_mutex for synchronization
2794 * - Iterate through all blocking operations in io->blocking_operations
2795 * - For each blocked fiber with a scheduler:
2796 * - Notify via rb_fiber_scheduler_fiber_interrupt
2797 * - For each blocked thread without a scheduler:
2798 * - Enqueue IOError via rb_threadptr_pending_interrupt_enque
2799 * - Wake via rb_threadptr_interrupt
2800 * - Wait on wakeup_mutex until all operations are cleaned up
2801 * - Only then clear closing state and allow actual close to proceed
2802 */
2803static VALUE
2804thread_io_close_notify_all(VALUE _io)
2805{
2806 struct rb_io *io = (struct rb_io *)_io;
2807
2808 size_t count = 0;
2809 rb_vm_t *vm = io->closing_ec->thread_ptr->vm;
2810 VALUE error = vm->special_exceptions[ruby_error_stream_closed];
2811
2812 struct rb_io_blocking_operation *blocking_operation;
2813 ccan_list_for_each(rb_io_blocking_operations(io), blocking_operation, list) {
2814 rb_execution_context_t *ec = blocking_operation->ec;
2815
2816 // If the operation is in progress, we need to interrupt it:
2817 if (ec) {
2818 rb_thread_t *thread = ec->thread_ptr;
2819
2820 VALUE result = RUBY_Qundef;
2821 if (thread->scheduler != Qnil) {
2822 result = rb_fiber_scheduler_fiber_interrupt(thread->scheduler, rb_fiberptr_self(ec->fiber_ptr), error);
2823 }
2824
2825 if (result == RUBY_Qundef) {
2826 // If the thread is not the current thread, we need to enqueue an error:
2827 rb_threadptr_pending_interrupt_enque(thread, error);
2828 rb_threadptr_interrupt(thread);
2829 }
2830 }
2831
2832 count += 1;
2833 }
2834
2835 return (VALUE)count;
2836}
2837
2838size_t
2839rb_thread_io_close_interrupt(struct rb_io *io)
2840{
2841 // We guard this operation based on `io->closing_ec` -> only one thread will ever enter this function.
2842 if (io->closing_ec) {
2843 return 0;
2844 }
2845
2846 // If there are no blocking operations, we are done:
2847 if (ccan_list_empty(rb_io_blocking_operations(io))) {
2848 return 0;
2849 }
2850
2851 // Otherwise, we are now closing the IO:
2852 rb_execution_context_t *ec = GET_EC();
2853 io->closing_ec = ec;
2854
2855 // This is used to ensure the correct execution context is woken up after the blocking operation is interrupted:
2856 io->wakeup_mutex = rb_mutex_new();
2857 rb_mutex_allow_trap(io->wakeup_mutex, 1);
2858
2859 // We need to use a mutex here as entering the fiber scheduler may cause a context switch:
2860 VALUE result = rb_mutex_synchronize(io->wakeup_mutex, thread_io_close_notify_all, (VALUE)io);
2861
2862 return (size_t)result;
2863}
2864
2865void
2866rb_thread_io_close_wait(struct rb_io* io)
2867{
2868 VALUE wakeup_mutex = io->wakeup_mutex;
2869
2870 if (!RB_TEST(wakeup_mutex)) {
2871 // There was nobody else using this file when we closed it, so we never bothered to allocate a mutex:
2872 return;
2873 }
2874
2875 rb_mutex_lock(wakeup_mutex);
2876 while (!ccan_list_empty(rb_io_blocking_operations(io))) {
2877 rb_mutex_sleep(wakeup_mutex, Qnil);
2878 }
2879 rb_mutex_unlock(wakeup_mutex);
2880
2881 // We are done closing:
2882 io->wakeup_mutex = Qnil;
2883 io->closing_ec = NULL;
2884}
2885
2886void
2887rb_thread_fd_close(int fd)
2888{
2889 rb_warn("rb_thread_fd_close is deprecated (and is now a no-op).");
2890}
2891
2892/*
2893 * call-seq:
2894 * thr.raise
2895 * thr.raise(string)
2896 * thr.raise(exception [, string [, array]])
2897 *
2898 * Raises an exception from the given thread. The caller does not have to be
2899 * +thr+. See Kernel#raise for more information.
2900 *
2901 * Thread.abort_on_exception = true
2902 * a = Thread.new { sleep(200) }
2903 * a.raise("Gotcha")
2904 *
2905 * This will produce:
2906 *
2907 * prog.rb:3: Gotcha (RuntimeError)
2908 * from prog.rb:2:in `initialize'
2909 * from prog.rb:2:in `new'
2910 * from prog.rb:2
2911 */
2912
2913static VALUE
2914thread_raise_m(int argc, VALUE *argv, VALUE self)
2915{
2916 rb_thread_t *target_th = rb_thread_ptr(self);
2917 const rb_thread_t *current_th = GET_THREAD();
2918
2919 threadptr_check_pending_interrupt_queue(target_th);
2920 rb_threadptr_raise(target_th, argc, argv);
2921
2922 /* To perform Thread.current.raise as Kernel.raise */
2923 if (current_th == target_th) {
2924 RUBY_VM_CHECK_INTS(target_th->ec);
2925 }
2926 return Qnil;
2927}
2928
2929
2930/*
2931 * call-seq:
2932 * thr.exit -> thr
2933 * thr.kill -> thr
2934 * thr.terminate -> thr
2935 *
2936 * Terminates +thr+ and schedules another thread to be run, returning
2937 * the terminated Thread. If this is the main thread, or the last
2938 * thread, exits the process. Note that the caller does not wait for
2939 * the thread to terminate if the receiver is different from the currently
2940 * running thread. The termination is asynchronous, and the thread can still
2941 * run a small amount of ruby code before exiting.
2942 */
2943
2945rb_thread_kill(VALUE thread)
2946{
2947 rb_thread_t *target_th = rb_thread_ptr(thread);
2948
2949 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2950 return thread;
2951 }
2952 if (target_th == target_th->vm->ractor.main_thread) {
2953 rb_exit(EXIT_SUCCESS);
2954 }
2955
2956 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2957
2958 if (target_th == GET_THREAD()) {
2959 /* kill myself immediately */
2960 rb_threadptr_to_kill(target_th);
2961 }
2962 else {
2963 threadptr_check_pending_interrupt_queue(target_th);
2964 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2965 rb_threadptr_interrupt(target_th);
2966 }
2967
2968 return thread;
2969}
2970
2971int
2972rb_thread_to_be_killed(VALUE thread)
2973{
2974 rb_thread_t *target_th = rb_thread_ptr(thread);
2975
2976 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2977 return TRUE;
2978 }
2979 return FALSE;
2980}
2981
2982/*
2983 * call-seq:
2984 * Thread.kill(thread) -> thread
2985 *
2986 * Causes the given +thread+ to exit, see also Thread::exit.
2987 *
2988 * count = 0
2989 * a = Thread.new { loop { count += 1 } }
2990 * sleep(0.1) #=> 0
2991 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2992 * count #=> 93947
2993 * a.alive? #=> false
2994 */
2995
2996static VALUE
2997rb_thread_s_kill(VALUE obj, VALUE th)
2998{
2999 return rb_thread_kill(th);
3000}
3001
3002
3003/*
3004 * call-seq:
3005 * Thread.exit -> thread
3006 *
3007 * Terminates the currently running thread and schedules another thread to be
3008 * run.
3009 *
3010 * If this thread is already marked to be killed, ::exit returns the Thread.
3011 *
3012 * If this is the main thread, or the last thread, exit the process.
3013 */
3014
3015static VALUE
3016rb_thread_exit(VALUE _)
3017{
3018 rb_thread_t *th = GET_THREAD();
3019 return rb_thread_kill(th->self);
3020}
3021
3022
3023/*
3024 * call-seq:
3025 * thr.wakeup -> thr
3026 *
3027 * Marks a given thread as eligible for scheduling, however it may still
3028 * remain blocked on I/O.
3029 *
3030 * *Note:* This does not invoke the scheduler, see #run for more information.
3031 *
3032 * c = Thread.new { Thread.stop; puts "hey!" }
3033 * sleep 0.1 while c.status!='sleep'
3034 * c.wakeup
3035 * c.join
3036 * #=> "hey!"
3037 */
3038
3040rb_thread_wakeup(VALUE thread)
3041{
3042 if (!RTEST(rb_thread_wakeup_alive(thread))) {
3043 rb_raise(rb_eThreadError, "killed thread");
3044 }
3045 return thread;
3046}
3047
3050{
3051 rb_thread_t *target_th = rb_thread_ptr(thread);
3052 if (target_th->status == THREAD_KILLED) return Qnil;
3053
3054 rb_threadptr_ready(target_th);
3055
3056 if (target_th->status == THREAD_STOPPED ||
3057 target_th->status == THREAD_STOPPED_FOREVER) {
3058 target_th->status = THREAD_RUNNABLE;
3059 }
3060
3061 return thread;
3062}
3063
3064
3065/*
3066 * call-seq:
3067 * thr.run -> thr
3068 *
3069 * Wakes up +thr+, making it eligible for scheduling.
3070 *
3071 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
3072 * sleep 0.1 while a.status!='sleep'
3073 * puts "Got here"
3074 * a.run
3075 * a.join
3076 *
3077 * This will produce:
3078 *
3079 * a
3080 * Got here
3081 * c
3082 *
3083 * See also the instance method #wakeup.
3084 */
3085
3087rb_thread_run(VALUE thread)
3088{
3089 rb_thread_wakeup(thread);
3091 return thread;
3092}
3093
3094
3096rb_thread_stop(void)
3097{
3098 if (rb_thread_alone()) {
3099 rb_raise(rb_eThreadError,
3100 "stopping only thread\n\tnote: use sleep to stop forever");
3101 }
3103 return Qnil;
3104}
3105
3106/*
3107 * call-seq:
3108 * Thread.stop -> nil
3109 *
3110 * Stops execution of the current thread, putting it into a ``sleep'' state,
3111 * and schedules execution of another thread.
3112 *
3113 * a = Thread.new { print "a"; Thread.stop; print "c" }
3114 * sleep 0.1 while a.status!='sleep'
3115 * print "b"
3116 * a.run
3117 * a.join
3118 * #=> "abc"
3119 */
3120
3121static VALUE
3122thread_stop(VALUE _)
3123{
3124 return rb_thread_stop();
3125}
3126
3127/********************************************************************/
3128
3129VALUE
3130rb_thread_list(void)
3131{
3132 // TODO
3133 return rb_ractor_thread_list();
3134}
3135
3136/*
3137 * call-seq:
3138 * Thread.list -> array
3139 *
3140 * Returns an array of Thread objects for all threads that are either runnable
3141 * or stopped.
3142 *
3143 * Thread.new { sleep(200) }
3144 * Thread.new { 1000000.times {|i| i*i } }
3145 * Thread.new { Thread.stop }
3146 * Thread.list.each {|t| p t}
3147 *
3148 * This will produce:
3149 *
3150 * #<Thread:0x401b3e84 sleep>
3151 * #<Thread:0x401b3f38 run>
3152 * #<Thread:0x401b3fb0 sleep>
3153 * #<Thread:0x401bdf4c run>
3154 */
3155
3156static VALUE
3157thread_list(VALUE _)
3158{
3159 return rb_thread_list();
3160}
3161
3164{
3165 return GET_THREAD()->self;
3166}
3167
3168/*
3169 * call-seq:
3170 * Thread.current -> thread
3171 *
3172 * Returns the currently executing thread.
3173 *
3174 * Thread.current #=> #<Thread:0x401bdf4c run>
3175 */
3176
3177static VALUE
3178thread_s_current(VALUE klass)
3179{
3180 return rb_thread_current();
3181}
3182
3184rb_thread_main(void)
3185{
3186 return GET_RACTOR()->threads.main->self;
3187}
3188
3189/*
3190 * call-seq:
3191 * Thread.main -> thread
3192 *
3193 * Returns the main thread.
3194 */
3195
3196static VALUE
3197rb_thread_s_main(VALUE klass)
3198{
3199 return rb_thread_main();
3200}
3201
3202
3203/*
3204 * call-seq:
3205 * Thread.abort_on_exception -> true or false
3206 *
3207 * Returns the status of the global ``abort on exception'' condition.
3208 *
3209 * The default is +false+.
3210 *
3211 * When set to +true+, if any thread is aborted by an exception, the
3212 * raised exception will be re-raised in the main thread.
3213 *
3214 * Can also be specified by the global $DEBUG flag or command line option
3215 * +-d+.
3216 *
3217 * See also ::abort_on_exception=.
3218 *
3219 * There is also an instance level method to set this for a specific thread,
3220 * see #abort_on_exception.
3221 */
3222
3223static VALUE
3224rb_thread_s_abort_exc(VALUE _)
3225{
3226 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3227}
3228
3229
3230/*
3231 * call-seq:
3232 * Thread.abort_on_exception= boolean -> true or false
3233 *
3234 * When set to +true+, if any thread is aborted by an exception, the
3235 * raised exception will be re-raised in the main thread.
3236 * Returns the new state.
3237 *
3238 * Thread.abort_on_exception = true
3239 * t1 = Thread.new do
3240 * puts "In new thread"
3241 * raise "Exception from thread"
3242 * end
3243 * sleep(1)
3244 * puts "not reached"
3245 *
3246 * This will produce:
3247 *
3248 * In new thread
3249 * prog.rb:4: Exception from thread (RuntimeError)
3250 * from prog.rb:2:in `initialize'
3251 * from prog.rb:2:in `new'
3252 * from prog.rb:2
3253 *
3254 * See also ::abort_on_exception.
3255 *
3256 * There is also an instance level method to set this for a specific thread,
3257 * see #abort_on_exception=.
3258 */
3259
3260static VALUE
3261rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3262{
3263 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3264 return val;
3265}
3266
3267
3268/*
3269 * call-seq:
3270 * thr.abort_on_exception -> true or false
3271 *
3272 * Returns the status of the thread-local ``abort on exception'' condition for
3273 * this +thr+.
3274 *
3275 * The default is +false+.
3276 *
3277 * See also #abort_on_exception=.
3278 *
3279 * There is also a class level method to set this for all threads, see
3280 * ::abort_on_exception.
3281 */
3282
3283static VALUE
3284rb_thread_abort_exc(VALUE thread)
3285{
3286 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3287}
3288
3289
3290/*
3291 * call-seq:
3292 * thr.abort_on_exception= boolean -> true or false
3293 *
3294 * When set to +true+, if this +thr+ is aborted by an exception, the
3295 * raised exception will be re-raised in the main thread.
3296 *
3297 * See also #abort_on_exception.
3298 *
3299 * There is also a class level method to set this for all threads, see
3300 * ::abort_on_exception=.
3301 */
3302
3303static VALUE
3304rb_thread_abort_exc_set(VALUE thread, VALUE val)
3305{
3306 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3307 return val;
3308}
3309
3310
3311/*
3312 * call-seq:
3313 * Thread.report_on_exception -> true or false
3314 *
3315 * Returns the status of the global ``report on exception'' condition.
3316 *
3317 * The default is +true+ since Ruby 2.5.
3318 *
3319 * All threads created when this flag is true will report
3320 * a message on $stderr if an exception kills the thread.
3321 *
3322 * Thread.new { 1.times { raise } }
3323 *
3324 * will produce this output on $stderr:
3325 *
3326 * #<Thread:...> terminated with exception (report_on_exception is true):
3327 * Traceback (most recent call last):
3328 * 2: from -e:1:in `block in <main>'
3329 * 1: from -e:1:in `times'
3330 *
3331 * This is done to catch errors in threads early.
3332 * In some cases, you might not want this output.
3333 * There are multiple ways to avoid the extra output:
3334 *
3335 * * If the exception is not intended, the best is to fix the cause of
3336 * the exception so it does not happen anymore.
3337 * * If the exception is intended, it might be better to rescue it closer to
3338 * where it is raised rather then let it kill the Thread.
3339 * * If it is guaranteed the Thread will be joined with Thread#join or
3340 * Thread#value, then it is safe to disable this report with
3341 * <code>Thread.current.report_on_exception = false</code>
3342 * when starting the Thread.
3343 * However, this might handle the exception much later, or not at all
3344 * if the Thread is never joined due to the parent thread being blocked, etc.
3345 *
3346 * See also ::report_on_exception=.
3347 *
3348 * There is also an instance level method to set this for a specific thread,
3349 * see #report_on_exception=.
3350 *
3351 */
3352
3353static VALUE
3354rb_thread_s_report_exc(VALUE _)
3355{
3356 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3357}
3358
3359
3360/*
3361 * call-seq:
3362 * Thread.report_on_exception= boolean -> true or false
3363 *
3364 * Returns the new state.
3365 * When set to +true+, all threads created afterwards will inherit the
3366 * condition and report a message on $stderr if an exception kills a thread:
3367 *
3368 * Thread.report_on_exception = true
3369 * t1 = Thread.new do
3370 * puts "In new thread"
3371 * raise "Exception from thread"
3372 * end
3373 * sleep(1)
3374 * puts "In the main thread"
3375 *
3376 * This will produce:
3377 *
3378 * In new thread
3379 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3380 * Traceback (most recent call last):
3381 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3382 * In the main thread
3383 *
3384 * See also ::report_on_exception.
3385 *
3386 * There is also an instance level method to set this for a specific thread,
3387 * see #report_on_exception=.
3388 */
3389
3390static VALUE
3391rb_thread_s_report_exc_set(VALUE self, VALUE val)
3392{
3393 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3394 return val;
3395}
3396
3397
3398/*
3399 * call-seq:
3400 * Thread.ignore_deadlock -> true or false
3401 *
3402 * Returns the status of the global ``ignore deadlock'' condition.
3403 * The default is +false+, so that deadlock conditions are not ignored.
3404 *
3405 * See also ::ignore_deadlock=.
3406 *
3407 */
3408
3409static VALUE
3410rb_thread_s_ignore_deadlock(VALUE _)
3411{
3412 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3413}
3414
3415
3416/*
3417 * call-seq:
3418 * Thread.ignore_deadlock = boolean -> true or false
3419 *
3420 * Returns the new state.
3421 * When set to +true+, the VM will not check for deadlock conditions.
3422 * It is only useful to set this if your application can break a
3423 * deadlock condition via some other means, such as a signal.
3424 *
3425 * Thread.ignore_deadlock = true
3426 * queue = Thread::Queue.new
3427 *
3428 * trap(:SIGUSR1){queue.push "Received signal"}
3429 *
3430 * # raises fatal error unless ignoring deadlock
3431 * puts queue.pop
3432 *
3433 * See also ::ignore_deadlock.
3434 */
3435
3436static VALUE
3437rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3438{
3439 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3440 return val;
3441}
3442
3443
3444/*
3445 * call-seq:
3446 * thr.report_on_exception -> true or false
3447 *
3448 * Returns the status of the thread-local ``report on exception'' condition for
3449 * this +thr+.
3450 *
3451 * The default value when creating a Thread is the value of
3452 * the global flag Thread.report_on_exception.
3453 *
3454 * See also #report_on_exception=.
3455 *
3456 * There is also a class level method to set this for all new threads, see
3457 * ::report_on_exception=.
3458 */
3459
3460static VALUE
3461rb_thread_report_exc(VALUE thread)
3462{
3463 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3464}
3465
3466
3467/*
3468 * call-seq:
3469 * thr.report_on_exception= boolean -> true or false
3470 *
3471 * When set to +true+, a message is printed on $stderr if an exception
3472 * kills this +thr+. See ::report_on_exception for details.
3473 *
3474 * See also #report_on_exception.
3475 *
3476 * There is also a class level method to set this for all new threads, see
3477 * ::report_on_exception=.
3478 */
3479
3480static VALUE
3481rb_thread_report_exc_set(VALUE thread, VALUE val)
3482{
3483 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3484 return val;
3485}
3486
3487
3488/*
3489 * call-seq:
3490 * thr.group -> thgrp or nil
3491 *
3492 * Returns the ThreadGroup which contains the given thread.
3493 *
3494 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3495 */
3496
3497VALUE
3498rb_thread_group(VALUE thread)
3499{
3500 return rb_thread_ptr(thread)->thgroup;
3501}
3502
3503static const char *
3504thread_status_name(rb_thread_t *th, int detail)
3505{
3506 switch (th->status) {
3507 case THREAD_RUNNABLE:
3508 return th->to_kill ? "aborting" : "run";
3509 case THREAD_STOPPED_FOREVER:
3510 if (detail) return "sleep_forever";
3511 case THREAD_STOPPED:
3512 return "sleep";
3513 case THREAD_KILLED:
3514 return "dead";
3515 default:
3516 return "unknown";
3517 }
3518}
3519
3520static int
3521rb_threadptr_dead(rb_thread_t *th)
3522{
3523 return th->status == THREAD_KILLED;
3524}
3525
3526
3527/*
3528 * call-seq:
3529 * thr.status -> string, false or nil
3530 *
3531 * Returns the status of +thr+.
3532 *
3533 * [<tt>"sleep"</tt>]
3534 * Returned if this thread is sleeping or waiting on I/O
3535 * [<tt>"run"</tt>]
3536 * When this thread is executing
3537 * [<tt>"aborting"</tt>]
3538 * If this thread is aborting
3539 * [+false+]
3540 * When this thread is terminated normally
3541 * [+nil+]
3542 * If terminated with an exception.
3543 *
3544 * a = Thread.new { raise("die now") }
3545 * b = Thread.new { Thread.stop }
3546 * c = Thread.new { Thread.exit }
3547 * d = Thread.new { sleep }
3548 * d.kill #=> #<Thread:0x401b3678 aborting>
3549 * a.status #=> nil
3550 * b.status #=> "sleep"
3551 * c.status #=> false
3552 * d.status #=> "aborting"
3553 * Thread.current.status #=> "run"
3554 *
3555 * See also the instance methods #alive? and #stop?
3556 */
3557
3558static VALUE
3559rb_thread_status(VALUE thread)
3560{
3561 rb_thread_t *target_th = rb_thread_ptr(thread);
3562
3563 if (rb_threadptr_dead(target_th)) {
3564 if (!NIL_P(target_th->ec->errinfo) &&
3565 !FIXNUM_P(target_th->ec->errinfo)) {
3566 return Qnil;
3567 }
3568 else {
3569 return Qfalse;
3570 }
3571 }
3572 else {
3573 return rb_str_new2(thread_status_name(target_th, FALSE));
3574 }
3575}
3576
3577
3578/*
3579 * call-seq:
3580 * thr.alive? -> true or false
3581 *
3582 * Returns +true+ if +thr+ is running or sleeping.
3583 *
3584 * thr = Thread.new { }
3585 * thr.join #=> #<Thread:0x401b3fb0 dead>
3586 * Thread.current.alive? #=> true
3587 * thr.alive? #=> false
3588 *
3589 * See also #stop? and #status.
3590 */
3591
3592static VALUE
3593rb_thread_alive_p(VALUE thread)
3594{
3595 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3596}
3597
3598/*
3599 * call-seq:
3600 * thr.stop? -> true or false
3601 *
3602 * Returns +true+ if +thr+ is dead or sleeping.
3603 *
3604 * a = Thread.new { Thread.stop }
3605 * b = Thread.current
3606 * a.stop? #=> true
3607 * b.stop? #=> false
3608 *
3609 * See also #alive? and #status.
3610 */
3611
3612static VALUE
3613rb_thread_stop_p(VALUE thread)
3614{
3615 rb_thread_t *th = rb_thread_ptr(thread);
3616
3617 if (rb_threadptr_dead(th)) {
3618 return Qtrue;
3619 }
3620 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3621}
3622
3623/*
3624 * call-seq:
3625 * thr.name -> string
3626 *
3627 * show the name of the thread.
3628 */
3629
3630static VALUE
3631rb_thread_getname(VALUE thread)
3632{
3633 return rb_thread_ptr(thread)->name;
3634}
3635
3636/*
3637 * call-seq:
3638 * thr.name=(name) -> string
3639 *
3640 * set given name to the ruby thread.
3641 * On some platform, it may set the name to pthread and/or kernel.
3642 */
3643
3644static VALUE
3645rb_thread_setname(VALUE thread, VALUE name)
3646{
3647 rb_thread_t *target_th = rb_thread_ptr(thread);
3648
3649 if (!NIL_P(name)) {
3650 rb_encoding *enc;
3651 StringValueCStr(name);
3652 enc = rb_enc_get(name);
3653 if (!rb_enc_asciicompat(enc)) {
3654 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3655 rb_enc_name(enc));
3656 }
3657 name = rb_str_new_frozen(name);
3658 }
3659 target_th->name = name;
3660 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3661 native_set_another_thread_name(target_th->nt->thread_id, name);
3662 }
3663 return name;
3664}
3665
3666#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3667/*
3668 * call-seq:
3669 * thr.native_thread_id -> integer
3670 *
3671 * Return the native thread ID which is used by the Ruby thread.
3672 *
3673 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3674 * * On Linux it is TID returned by gettid(2).
3675 * * On macOS it is the system-wide unique integral ID of thread returned
3676 * by pthread_threadid_np(3).
3677 * * On FreeBSD it is the unique integral ID of the thread returned by
3678 * pthread_getthreadid_np(3).
3679 * * On Windows it is the thread identifier returned by GetThreadId().
3680 * * On other platforms, it raises NotImplementedError.
3681 *
3682 * NOTE:
3683 * If the thread is not associated yet or already deassociated with a native
3684 * thread, it returns _nil_.
3685 * If the Ruby implementation uses M:N thread model, the ID may change
3686 * depending on the timing.
3687 */
3688
3689static VALUE
3690rb_thread_native_thread_id(VALUE thread)
3691{
3692 rb_thread_t *target_th = rb_thread_ptr(thread);
3693 if (rb_threadptr_dead(target_th)) return Qnil;
3694 return native_thread_native_thread_id(target_th);
3695}
3696#else
3697# define rb_thread_native_thread_id rb_f_notimplement
3698#endif
3699
3700/*
3701 * call-seq:
3702 * thr.to_s -> string
3703 *
3704 * Dump the name, id, and status of _thr_ to a string.
3705 */
3706
3707static VALUE
3708rb_thread_to_s(VALUE thread)
3709{
3710 VALUE cname = rb_class_path(rb_obj_class(thread));
3711 rb_thread_t *target_th = rb_thread_ptr(thread);
3712 const char *status;
3713 VALUE str, loc;
3714
3715 status = thread_status_name(target_th, TRUE);
3716 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3717 if (!NIL_P(target_th->name)) {
3718 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3719 }
3720 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3721 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3722 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3723 }
3724 rb_str_catf(str, " %s>", status);
3725
3726 return str;
3727}
3728
3729/* variables for recursive traversals */
3730#define recursive_key id__recursive_key__
3731
3732static VALUE
3733threadptr_local_aref(rb_thread_t *th, ID id)
3734{
3735 if (id == recursive_key) {
3736 return th->ec->local_storage_recursive_hash;
3737 }
3738 else {
3739 VALUE val;
3740 struct rb_id_table *local_storage = th->ec->local_storage;
3741
3742 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3743 return val;
3744 }
3745 else {
3746 return Qnil;
3747 }
3748 }
3749}
3750
3752rb_thread_local_aref(VALUE thread, ID id)
3753{
3754 return threadptr_local_aref(rb_thread_ptr(thread), id);
3755}
3756
3757/*
3758 * call-seq:
3759 * thr[sym] -> obj or nil
3760 *
3761 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3762 * if not explicitly inside a Fiber), using either a symbol or a string name.
3763 * If the specified variable does not exist, returns +nil+.
3764 *
3765 * [
3766 * Thread.new { Thread.current["name"] = "A" },
3767 * Thread.new { Thread.current[:name] = "B" },
3768 * Thread.new { Thread.current["name"] = "C" }
3769 * ].each do |th|
3770 * th.join
3771 * puts "#{th.inspect}: #{th[:name]}"
3772 * end
3773 *
3774 * This will produce:
3775 *
3776 * #<Thread:0x00000002a54220 dead>: A
3777 * #<Thread:0x00000002a541a8 dead>: B
3778 * #<Thread:0x00000002a54130 dead>: C
3779 *
3780 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3781 * This confusion did not exist in Ruby 1.8 because
3782 * fibers are only available since Ruby 1.9.
3783 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3784 * following idiom for dynamic scope.
3785 *
3786 * def meth(newvalue)
3787 * begin
3788 * oldvalue = Thread.current[:name]
3789 * Thread.current[:name] = newvalue
3790 * yield
3791 * ensure
3792 * Thread.current[:name] = oldvalue
3793 * end
3794 * end
3795 *
3796 * The idiom may not work as dynamic scope if the methods are thread-local
3797 * and a given block switches fiber.
3798 *
3799 * f = Fiber.new {
3800 * meth(1) {
3801 * Fiber.yield
3802 * }
3803 * }
3804 * meth(2) {
3805 * f.resume
3806 * }
3807 * f.resume
3808 * p Thread.current[:name]
3809 * #=> nil if fiber-local
3810 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3811 *
3812 * For thread-local variables, please see #thread_variable_get and
3813 * #thread_variable_set.
3814 *
3815 */
3816
3817static VALUE
3818rb_thread_aref(VALUE thread, VALUE key)
3819{
3820 ID id = rb_check_id(&key);
3821 if (!id) return Qnil;
3822 return rb_thread_local_aref(thread, id);
3823}
3824
3825/*
3826 * call-seq:
3827 * thr.fetch(sym) -> obj
3828 * thr.fetch(sym) { } -> obj
3829 * thr.fetch(sym, default) -> obj
3830 *
3831 * Returns a fiber-local for the given key. If the key can't be
3832 * found, there are several options: With no other arguments, it will
3833 * raise a KeyError exception; if <i>default</i> is given, then that
3834 * will be returned; if the optional code block is specified, then
3835 * that will be run and its result returned. See Thread#[] and
3836 * Hash#fetch.
3837 */
3838static VALUE
3839rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3840{
3841 VALUE key, val;
3842 ID id;
3843 rb_thread_t *target_th = rb_thread_ptr(self);
3844 int block_given;
3845
3846 rb_check_arity(argc, 1, 2);
3847 key = argv[0];
3848
3849 block_given = rb_block_given_p();
3850 if (block_given && argc == 2) {
3851 rb_warn("block supersedes default value argument");
3852 }
3853
3854 id = rb_check_id(&key);
3855
3856 if (id == recursive_key) {
3857 return target_th->ec->local_storage_recursive_hash;
3858 }
3859 else if (id && target_th->ec->local_storage &&
3860 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3861 return val;
3862 }
3863 else if (block_given) {
3864 return rb_yield(key);
3865 }
3866 else if (argc == 1) {
3867 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3868 }
3869 else {
3870 return argv[1];
3871 }
3872}
3873
3874static VALUE
3875threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3876{
3877 if (id == recursive_key) {
3878 th->ec->local_storage_recursive_hash = val;
3879 return val;
3880 }
3881 else {
3882 struct rb_id_table *local_storage = th->ec->local_storage;
3883
3884 if (NIL_P(val)) {
3885 if (!local_storage) return Qnil;
3886 rb_id_table_delete(local_storage, id);
3887 return Qnil;
3888 }
3889 else {
3890 if (local_storage == NULL) {
3891 th->ec->local_storage = local_storage = rb_id_table_create(0);
3892 }
3893 rb_id_table_insert(local_storage, id, val);
3894 return val;
3895 }
3896 }
3897}
3898
3900rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3901{
3902 if (OBJ_FROZEN(thread)) {
3903 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3904 }
3905
3906 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3907}
3908
3909/*
3910 * call-seq:
3911 * thr[sym] = obj -> obj
3912 *
3913 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3914 * using either a symbol or a string.
3915 *
3916 * See also Thread#[].
3917 *
3918 * For thread-local variables, please see #thread_variable_set and
3919 * #thread_variable_get.
3920 */
3921
3922static VALUE
3923rb_thread_aset(VALUE self, VALUE id, VALUE val)
3924{
3925 return rb_thread_local_aset(self, rb_to_id(id), val);
3926}
3927
3928/*
3929 * call-seq:
3930 * thr.thread_variable_get(key) -> obj or nil
3931 *
3932 * Returns the value of a thread local variable that has been set. Note that
3933 * these are different than fiber local values. For fiber local values,
3934 * please see Thread#[] and Thread#[]=.
3935 *
3936 * Thread local values are carried along with threads, and do not respect
3937 * fibers. For example:
3938 *
3939 * Thread.new {
3940 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3941 * Thread.current["foo"] = "bar" # set a fiber local
3942 *
3943 * Fiber.new {
3944 * Fiber.yield [
3945 * Thread.current.thread_variable_get("foo"), # get the thread local
3946 * Thread.current["foo"], # get the fiber local
3947 * ]
3948 * }.resume
3949 * }.join.value # => ['bar', nil]
3950 *
3951 * The value "bar" is returned for the thread local, where nil is returned
3952 * for the fiber local. The fiber is executed in the same thread, so the
3953 * thread local values are available.
3954 */
3955
3956static VALUE
3957rb_thread_variable_get(VALUE thread, VALUE key)
3958{
3959 VALUE locals;
3960 VALUE symbol = rb_to_symbol(key);
3961
3962 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3963 return Qnil;
3964 }
3965 locals = rb_thread_local_storage(thread);
3966 return rb_hash_aref(locals, symbol);
3967}
3968
3969/*
3970 * call-seq:
3971 * thr.thread_variable_set(key, value)
3972 *
3973 * Sets a thread local with +key+ to +value+. Note that these are local to
3974 * threads, and not to fibers. Please see Thread#thread_variable_get and
3975 * Thread#[] for more information.
3976 */
3977
3978static VALUE
3979rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3980{
3981 VALUE locals;
3982
3983 if (OBJ_FROZEN(thread)) {
3984 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3985 }
3986
3987 locals = rb_thread_local_storage(thread);
3988 return rb_hash_aset(locals, rb_to_symbol(key), val);
3989}
3990
3991/*
3992 * call-seq:
3993 * thr.key?(sym) -> true or false
3994 *
3995 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3996 * variable.
3997 *
3998 * me = Thread.current
3999 * me[:oliver] = "a"
4000 * me.key?(:oliver) #=> true
4001 * me.key?(:stanley) #=> false
4002 */
4003
4004static VALUE
4005rb_thread_key_p(VALUE self, VALUE key)
4006{
4007 VALUE val;
4008 ID id = rb_check_id(&key);
4009 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4010
4011 if (!id || local_storage == NULL) {
4012 return Qfalse;
4013 }
4014 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
4015}
4016
4017static enum rb_id_table_iterator_result
4018thread_keys_i(ID key, VALUE value, void *ary)
4019{
4020 rb_ary_push((VALUE)ary, ID2SYM(key));
4021 return ID_TABLE_CONTINUE;
4022}
4023
4025rb_thread_alone(void)
4026{
4027 // TODO
4028 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
4029}
4030
4031/*
4032 * call-seq:
4033 * thr.keys -> array
4034 *
4035 * Returns an array of the names of the fiber-local variables (as Symbols).
4036 *
4037 * thr = Thread.new do
4038 * Thread.current[:cat] = 'meow'
4039 * Thread.current["dog"] = 'woof'
4040 * end
4041 * thr.join #=> #<Thread:0x401b3f10 dead>
4042 * thr.keys #=> [:dog, :cat]
4043 */
4044
4045static VALUE
4046rb_thread_keys(VALUE self)
4047{
4048 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4049 VALUE ary = rb_ary_new();
4050
4051 if (local_storage) {
4052 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
4053 }
4054 return ary;
4055}
4056
4057static int
4058keys_i(VALUE key, VALUE value, VALUE ary)
4059{
4060 rb_ary_push(ary, key);
4061 return ST_CONTINUE;
4062}
4063
4064/*
4065 * call-seq:
4066 * thr.thread_variables -> array
4067 *
4068 * Returns an array of the names of the thread-local variables (as Symbols).
4069 *
4070 * thr = Thread.new do
4071 * Thread.current.thread_variable_set(:cat, 'meow')
4072 * Thread.current.thread_variable_set("dog", 'woof')
4073 * end
4074 * thr.join #=> #<Thread:0x401b3f10 dead>
4075 * thr.thread_variables #=> [:dog, :cat]
4076 *
4077 * Note that these are not fiber local variables. Please see Thread#[] and
4078 * Thread#thread_variable_get for more details.
4079 */
4080
4081static VALUE
4082rb_thread_variables(VALUE thread)
4083{
4084 VALUE locals;
4085 VALUE ary;
4086
4087 ary = rb_ary_new();
4088 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4089 return ary;
4090 }
4091 locals = rb_thread_local_storage(thread);
4092 rb_hash_foreach(locals, keys_i, ary);
4093
4094 return ary;
4095}
4096
4097/*
4098 * call-seq:
4099 * thr.thread_variable?(key) -> true or false
4100 *
4101 * Returns +true+ if the given string (or symbol) exists as a thread-local
4102 * variable.
4103 *
4104 * me = Thread.current
4105 * me.thread_variable_set(:oliver, "a")
4106 * me.thread_variable?(:oliver) #=> true
4107 * me.thread_variable?(:stanley) #=> false
4108 *
4109 * Note that these are not fiber local variables. Please see Thread#[] and
4110 * Thread#thread_variable_get for more details.
4111 */
4112
4113static VALUE
4114rb_thread_variable_p(VALUE thread, VALUE key)
4115{
4116 VALUE locals;
4117 VALUE symbol = rb_to_symbol(key);
4118
4119 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4120 return Qfalse;
4121 }
4122 locals = rb_thread_local_storage(thread);
4123
4124 return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
4125}
4126
4127/*
4128 * call-seq:
4129 * thr.priority -> integer
4130 *
4131 * Returns the priority of <i>thr</i>. Default is inherited from the
4132 * current thread which creating the new thread, or zero for the
4133 * initial main thread; higher-priority thread will run more frequently
4134 * than lower-priority threads (but lower-priority threads can also run).
4135 *
4136 * This is just hint for Ruby thread scheduler. It may be ignored on some
4137 * platform.
4138 *
4139 * Thread.current.priority #=> 0
4140 */
4141
4142static VALUE
4143rb_thread_priority(VALUE thread)
4144{
4145 return INT2NUM(rb_thread_ptr(thread)->priority);
4146}
4147
4148
4149/*
4150 * call-seq:
4151 * thr.priority= integer -> thr
4152 *
4153 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
4154 * will run more frequently than lower-priority threads (but lower-priority
4155 * threads can also run).
4156 *
4157 * This is just hint for Ruby thread scheduler. It may be ignored on some
4158 * platform.
4159 *
4160 * count1 = count2 = 0
4161 * a = Thread.new do
4162 * loop { count1 += 1 }
4163 * end
4164 * a.priority = -1
4165 *
4166 * b = Thread.new do
4167 * loop { count2 += 1 }
4168 * end
4169 * b.priority = -2
4170 * sleep 1 #=> 1
4171 * count1 #=> 622504
4172 * count2 #=> 5832
4173 */
4174
4175static VALUE
4176rb_thread_priority_set(VALUE thread, VALUE prio)
4177{
4178 rb_thread_t *target_th = rb_thread_ptr(thread);
4179 int priority;
4180
4181#if USE_NATIVE_THREAD_PRIORITY
4182 target_th->priority = NUM2INT(prio);
4183 native_thread_apply_priority(th);
4184#else
4185 priority = NUM2INT(prio);
4186 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4187 priority = RUBY_THREAD_PRIORITY_MAX;
4188 }
4189 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4190 priority = RUBY_THREAD_PRIORITY_MIN;
4191 }
4192 target_th->priority = (int8_t)priority;
4193#endif
4194 return INT2NUM(target_th->priority);
4195}
4196
4197/* for IO */
4198
4199#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4200
4201/*
4202 * several Unix platforms support file descriptors bigger than FD_SETSIZE
4203 * in select(2) system call.
4204 *
4205 * - Linux 2.2.12 (?)
4206 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4207 * select(2) documents how to allocate fd_set dynamically.
4208 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4209 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4210 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4211 * select(2) documents how to allocate fd_set dynamically.
4212 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4213 * - Solaris 8 has select_large_fdset
4214 * - Mac OS X 10.7 (Lion)
4215 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4216 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4217 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4218 *
4219 * When fd_set is not big enough to hold big file descriptors,
4220 * it should be allocated dynamically.
4221 * Note that this assumes fd_set is structured as bitmap.
4222 *
4223 * rb_fd_init allocates the memory.
4224 * rb_fd_term free the memory.
4225 * rb_fd_set may re-allocates bitmap.
4226 *
4227 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4228 */
4229
4230void
4232{
4233 fds->maxfd = 0;
4234 fds->fdset = ALLOC(fd_set);
4235 FD_ZERO(fds->fdset);
4236}
4237
4238void
4239rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4240{
4241 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4242
4243 if (size < sizeof(fd_set))
4244 size = sizeof(fd_set);
4245 dst->maxfd = src->maxfd;
4246 dst->fdset = xmalloc(size);
4247 memcpy(dst->fdset, src->fdset, size);
4248}
4249
4250void
4252{
4253 xfree(fds->fdset);
4254 fds->maxfd = 0;
4255 fds->fdset = 0;
4256}
4257
4258void
4260{
4261 if (fds->fdset)
4262 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4263}
4264
4265static void
4266rb_fd_resize(int n, rb_fdset_t *fds)
4267{
4268 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4269 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4270
4271 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4272 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4273
4274 if (m > o) {
4275 fds->fdset = xrealloc(fds->fdset, m);
4276 memset((char *)fds->fdset + o, 0, m - o);
4277 }
4278 if (n >= fds->maxfd) fds->maxfd = n + 1;
4279}
4280
4281void
4282rb_fd_set(int n, rb_fdset_t *fds)
4283{
4284 rb_fd_resize(n, fds);
4285 FD_SET(n, fds->fdset);
4286}
4287
4288void
4289rb_fd_clr(int n, rb_fdset_t *fds)
4290{
4291 if (n >= fds->maxfd) return;
4292 FD_CLR(n, fds->fdset);
4293}
4294
4295int
4296rb_fd_isset(int n, const rb_fdset_t *fds)
4297{
4298 if (n >= fds->maxfd) return 0;
4299 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4300}
4301
4302void
4303rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4304{
4305 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4306
4307 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4308 dst->maxfd = max;
4309 dst->fdset = xrealloc(dst->fdset, size);
4310 memcpy(dst->fdset, src, size);
4311}
4312
4313void
4314rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4315{
4316 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4317
4318 if (size < sizeof(fd_set))
4319 size = sizeof(fd_set);
4320 dst->maxfd = src->maxfd;
4321 dst->fdset = xrealloc(dst->fdset, size);
4322 memcpy(dst->fdset, src->fdset, size);
4323}
4324
4325int
4326rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4327{
4328 fd_set *r = NULL, *w = NULL, *e = NULL;
4329 if (readfds) {
4330 rb_fd_resize(n - 1, readfds);
4331 r = rb_fd_ptr(readfds);
4332 }
4333 if (writefds) {
4334 rb_fd_resize(n - 1, writefds);
4335 w = rb_fd_ptr(writefds);
4336 }
4337 if (exceptfds) {
4338 rb_fd_resize(n - 1, exceptfds);
4339 e = rb_fd_ptr(exceptfds);
4340 }
4341 return select(n, r, w, e, timeout);
4342}
4343
4344#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4345
4346#undef FD_ZERO
4347#undef FD_SET
4348#undef FD_CLR
4349#undef FD_ISSET
4350
4351#define FD_ZERO(f) rb_fd_zero(f)
4352#define FD_SET(i, f) rb_fd_set((i), (f))
4353#define FD_CLR(i, f) rb_fd_clr((i), (f))
4354#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4355
4356#elif defined(_WIN32)
4357
4358void
4360{
4361 set->capa = FD_SETSIZE;
4362 set->fdset = ALLOC(fd_set);
4363 FD_ZERO(set->fdset);
4364}
4365
4366void
4367rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4368{
4369 rb_fd_init(dst);
4370 rb_fd_dup(dst, src);
4371}
4372
4373void
4375{
4376 xfree(set->fdset);
4377 set->fdset = NULL;
4378 set->capa = 0;
4379}
4380
4381void
4382rb_fd_set(int fd, rb_fdset_t *set)
4383{
4384 unsigned int i;
4385 SOCKET s = rb_w32_get_osfhandle(fd);
4386
4387 for (i = 0; i < set->fdset->fd_count; i++) {
4388 if (set->fdset->fd_array[i] == s) {
4389 return;
4390 }
4391 }
4392 if (set->fdset->fd_count >= (unsigned)set->capa) {
4393 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4394 set->fdset =
4395 rb_xrealloc_mul_add(
4396 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4397 }
4398 set->fdset->fd_array[set->fdset->fd_count++] = s;
4399}
4400
4401#undef FD_ZERO
4402#undef FD_SET
4403#undef FD_CLR
4404#undef FD_ISSET
4405
4406#define FD_ZERO(f) rb_fd_zero(f)
4407#define FD_SET(i, f) rb_fd_set((i), (f))
4408#define FD_CLR(i, f) rb_fd_clr((i), (f))
4409#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4410
4411#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4412
4413#endif
4414
4415#ifndef rb_fd_no_init
4416#define rb_fd_no_init(fds) (void)(fds)
4417#endif
4418
4419static int
4420wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4421{
4422 int r = *result;
4423 if (r < 0) {
4424 switch (errnum) {
4425 case EINTR:
4426#ifdef ERESTART
4427 case ERESTART:
4428#endif
4429 *result = 0;
4430 if (rel && hrtime_update_expire(rel, end)) {
4431 *rel = 0;
4432 }
4433 return TRUE;
4434 }
4435 return FALSE;
4436 }
4437 else if (r == 0) {
4438 /* check for spurious wakeup */
4439 if (rel) {
4440 return !hrtime_update_expire(rel, end);
4441 }
4442 return TRUE;
4443 }
4444 return FALSE;
4445}
4447struct select_set {
4448 int max;
4449 rb_thread_t *th;
4450 rb_fdset_t *rset;
4451 rb_fdset_t *wset;
4452 rb_fdset_t *eset;
4453 rb_fdset_t orig_rset;
4454 rb_fdset_t orig_wset;
4455 rb_fdset_t orig_eset;
4456 struct timeval *timeout;
4457};
4458
4459static VALUE
4460select_set_free(VALUE p)
4461{
4462 struct select_set *set = (struct select_set *)p;
4463
4464 rb_fd_term(&set->orig_rset);
4465 rb_fd_term(&set->orig_wset);
4466 rb_fd_term(&set->orig_eset);
4467
4468 return Qfalse;
4469}
4470
4471static VALUE
4472do_select(VALUE p)
4473{
4474 struct select_set *set = (struct select_set *)p;
4475 volatile int result = 0;
4476 int lerrno;
4477 rb_hrtime_t *to, rel, end = 0;
4478
4479 timeout_prepare(&to, &rel, &end, set->timeout);
4480 volatile rb_hrtime_t endtime = end;
4481#define restore_fdset(dst, src) \
4482 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4483#define do_select_update() \
4484 (restore_fdset(set->rset, &set->orig_rset), \
4485 restore_fdset(set->wset, &set->orig_wset), \
4486 restore_fdset(set->eset, &set->orig_eset), \
4487 TRUE)
4488
4489 do {
4490 lerrno = 0;
4491
4492 BLOCKING_REGION(set->th, {
4493 struct timeval tv;
4494
4495 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4496 result = native_fd_select(set->max,
4497 set->rset, set->wset, set->eset,
4498 rb_hrtime2timeval(&tv, to), set->th);
4499 if (result < 0) lerrno = errno;
4500 }
4501 }, ubf_select, set->th, TRUE);
4502
4503 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4504 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4505
4506 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4507
4508 if (result < 0) {
4509 errno = lerrno;
4510 }
4511
4512 return (VALUE)result;
4513}
4514
4516rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4517 struct timeval *timeout)
4518{
4519 struct select_set set;
4520
4521 set.th = GET_THREAD();
4522 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4523 set.max = max;
4524 set.rset = read;
4525 set.wset = write;
4526 set.eset = except;
4527 set.timeout = timeout;
4528
4529 if (!set.rset && !set.wset && !set.eset) {
4530 if (!timeout) {
4532 return 0;
4533 }
4534 rb_thread_wait_for(*timeout);
4535 return 0;
4536 }
4537
4538#define fd_init_copy(f) do { \
4539 if (set.f) { \
4540 rb_fd_resize(set.max - 1, set.f); \
4541 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4542 rb_fd_init_copy(&set.orig_##f, set.f); \
4543 } \
4544 } \
4545 else { \
4546 rb_fd_no_init(&set.orig_##f); \
4547 } \
4548 } while (0)
4549 fd_init_copy(rset);
4550 fd_init_copy(wset);
4551 fd_init_copy(eset);
4552#undef fd_init_copy
4553
4554 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4555}
4556
4557#ifdef USE_POLL
4558
4559/* The same with linux kernel. TODO: make platform independent definition. */
4560#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4561#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4562#define POLLEX_SET (POLLPRI)
4563
4564#ifndef POLLERR_SET /* defined for FreeBSD for now */
4565# define POLLERR_SET (0)
4566#endif
4567
4568static int
4569wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4570 rb_hrtime_t *const to, volatile int *lerrno)
4571{
4572 struct timespec ts;
4573 volatile int result = 0;
4574
4575 *lerrno = 0;
4576 BLOCKING_REGION(th, {
4577 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4578 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4579 if (result < 0) *lerrno = errno;
4580 }
4581 }, ubf_select, th, TRUE);
4582 return result;
4583}
4584
4585/*
4586 * returns a mask of events
4587 */
4588static int
4589thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4590{
4591 struct pollfd fds[1] = {{
4592 .fd = fd,
4593 .events = (short)events,
4594 .revents = 0,
4595 }};
4596 volatile int result = 0;
4597 nfds_t nfds;
4598 struct rb_io_blocking_operation blocking_operation;
4599 enum ruby_tag_type state;
4600 volatile int lerrno;
4601
4602 rb_execution_context_t *ec = GET_EC();
4603 rb_thread_t *th = rb_ec_thread_ptr(ec);
4604
4605 if (io) {
4606 blocking_operation.ec = ec;
4607 rb_io_blocking_operation_enter(io, &blocking_operation);
4608 }
4609
4610 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4611 // fd is readable
4612 state = 0;
4613 fds[0].revents = events;
4614 errno = 0;
4615 }
4616 else {
4617 EC_PUSH_TAG(ec);
4618 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4619 rb_hrtime_t *to, rel, end = 0;
4620 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4621 timeout_prepare(&to, &rel, &end, timeout);
4622 do {
4623 nfds = numberof(fds);
4624 result = wait_for_single_fd_blocking_region(th, fds, nfds, to, &lerrno);
4625
4626 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4627 } while (wait_retryable(&result, lerrno, to, end));
4628
4629 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4630 }
4631
4632 EC_POP_TAG();
4633 }
4634
4635 if (io) {
4636 rb_io_blocking_operation_exit(io, &blocking_operation);
4637 }
4638
4639 if (state) {
4640 EC_JUMP_TAG(ec, state);
4641 }
4642
4643 if (result < 0) {
4644 errno = lerrno;
4645 return -1;
4646 }
4647
4648 if (fds[0].revents & POLLNVAL) {
4649 errno = EBADF;
4650 return -1;
4651 }
4652
4653 /*
4654 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4655 * Therefore we need to fix it up.
4656 */
4657 result = 0;
4658 if (fds[0].revents & POLLIN_SET)
4659 result |= RB_WAITFD_IN;
4660 if (fds[0].revents & POLLOUT_SET)
4661 result |= RB_WAITFD_OUT;
4662 if (fds[0].revents & POLLEX_SET)
4663 result |= RB_WAITFD_PRI;
4664
4665 /* all requested events are ready if there is an error */
4666 if (fds[0].revents & POLLERR_SET)
4667 result |= events;
4668
4669 return result;
4670}
4671#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4672struct select_args {
4673 struct rb_io *io;
4674 struct rb_io_blocking_operation *blocking_operation;
4675
4676 union {
4677 int fd;
4678 int error;
4679 } as;
4680 rb_fdset_t *read;
4681 rb_fdset_t *write;
4682 rb_fdset_t *except;
4683 struct timeval *tv;
4684};
4685
4686static VALUE
4687select_single(VALUE ptr)
4688{
4689 struct select_args *args = (struct select_args *)ptr;
4690 int r;
4691
4692 r = rb_thread_fd_select(args->as.fd + 1,
4693 args->read, args->write, args->except, args->tv);
4694 if (r == -1)
4695 args->as.error = errno;
4696 if (r > 0) {
4697 r = 0;
4698 if (args->read && rb_fd_isset(args->as.fd, args->read))
4699 r |= RB_WAITFD_IN;
4700 if (args->write && rb_fd_isset(args->as.fd, args->write))
4701 r |= RB_WAITFD_OUT;
4702 if (args->except && rb_fd_isset(args->as.fd, args->except))
4703 r |= RB_WAITFD_PRI;
4704 }
4705 return (VALUE)r;
4706}
4707
4708static VALUE
4709select_single_cleanup(VALUE ptr)
4710{
4711 struct select_args *args = (struct select_args *)ptr;
4712
4713 if (args->blocking_operation) {
4714 rb_io_blocking_operation_exit(args->io, args->blocking_operation);
4715 }
4716
4717 if (args->read) rb_fd_term(args->read);
4718 if (args->write) rb_fd_term(args->write);
4719 if (args->except) rb_fd_term(args->except);
4720
4721 return (VALUE)-1;
4722}
4723
4724static rb_fdset_t *
4725init_set_fd(int fd, rb_fdset_t *fds)
4726{
4727 if (fd < 0) {
4728 return 0;
4729 }
4730 rb_fd_init(fds);
4731 rb_fd_set(fd, fds);
4732
4733 return fds;
4734}
4735
4736static int
4737thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4738{
4739 rb_fdset_t rfds, wfds, efds;
4740 struct select_args args;
4741 VALUE ptr = (VALUE)&args;
4742
4743 struct rb_io_blocking_operation blocking_operation;
4744 if (io) {
4745 args.io = io;
4746 blocking_operation.ec = GET_EC();
4747 rb_io_blocking_operation_enter(io, &blocking_operation);
4748 args.blocking_operation = &blocking_operation;
4749 }
4750 else {
4751 args.io = NULL;
4752 blocking_operation.ec = NULL;
4753 args.blocking_operation = NULL;
4754 }
4755
4756 args.as.fd = fd;
4757 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4758 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4759 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4760 args.tv = timeout;
4761
4762 int result = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4763 if (result == -1)
4764 errno = args.as.error;
4765
4766 return result;
4767}
4768#endif /* ! USE_POLL */
4769
4770int
4771rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4772{
4773 return thread_io_wait(NULL, fd, events, timeout);
4774}
4775
4776int
4777rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout)
4778{
4779 return thread_io_wait(io, io->fd, events, timeout);
4780}
4781
4782/*
4783 * for GC
4784 */
4785
4786#ifdef USE_CONSERVATIVE_STACK_END
4787void
4788rb_gc_set_stack_end(VALUE **stack_end_p)
4789{
4790 VALUE stack_end;
4791COMPILER_WARNING_PUSH
4792#if RBIMPL_COMPILER_IS(GCC)
4793COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4794#endif
4795 *stack_end_p = &stack_end;
4796COMPILER_WARNING_POP
4797}
4798#endif
4799
4800/*
4801 *
4802 */
4803
4804void
4805rb_threadptr_check_signal(rb_thread_t *mth)
4806{
4807 /* mth must be main_thread */
4808 if (rb_signal_buff_size() > 0) {
4809 /* wakeup main thread */
4810 threadptr_trap_interrupt(mth);
4811 }
4812}
4813
4814static void
4815async_bug_fd(const char *mesg, int errno_arg, int fd)
4816{
4817 char buff[64];
4818 size_t n = strlcpy(buff, mesg, sizeof(buff));
4819 if (n < sizeof(buff)-3) {
4820 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4821 }
4822 rb_async_bug_errno(buff, errno_arg);
4823}
4824
4825/* VM-dependent API is not available for this function */
4826static int
4827consume_communication_pipe(int fd)
4828{
4829#if USE_EVENTFD
4830 uint64_t buff[1];
4831#else
4832 /* buffer can be shared because no one refers to them. */
4833 static char buff[1024];
4834#endif
4835 ssize_t result;
4836 int ret = FALSE; /* for rb_sigwait_sleep */
4837
4838 while (1) {
4839 result = read(fd, buff, sizeof(buff));
4840#if USE_EVENTFD
4841 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4842#else
4843 RUBY_DEBUG_LOG("result:%d", (int)result);
4844#endif
4845 if (result > 0) {
4846 ret = TRUE;
4847 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4848 return ret;
4849 }
4850 }
4851 else if (result == 0) {
4852 return ret;
4853 }
4854 else if (result < 0) {
4855 int e = errno;
4856 switch (e) {
4857 case EINTR:
4858 continue; /* retry */
4859 case EAGAIN:
4860#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4861 case EWOULDBLOCK:
4862#endif
4863 return ret;
4864 default:
4865 async_bug_fd("consume_communication_pipe: read", e, fd);
4866 }
4867 }
4868 }
4869}
4870
4871void
4872rb_thread_stop_timer_thread(void)
4873{
4874 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4875 native_reset_timer_thread();
4876 }
4877}
4878
4879void
4880rb_thread_reset_timer_thread(void)
4881{
4882 native_reset_timer_thread();
4883}
4884
4885void
4886rb_thread_start_timer_thread(void)
4887{
4888 system_working = 1;
4889 rb_thread_create_timer_thread();
4890}
4891
4892static int
4893clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4894{
4895 int i;
4896 VALUE coverage = (VALUE)val;
4897 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4898 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4899
4900 if (lines) {
4901 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4902 rb_ary_clear(lines);
4903 }
4904 else {
4905 int i;
4906 for (i = 0; i < RARRAY_LEN(lines); i++) {
4907 if (RARRAY_AREF(lines, i) != Qnil)
4908 RARRAY_ASET(lines, i, INT2FIX(0));
4909 }
4910 }
4911 }
4912 if (branches) {
4913 VALUE counters = RARRAY_AREF(branches, 1);
4914 for (i = 0; i < RARRAY_LEN(counters); i++) {
4915 RARRAY_ASET(counters, i, INT2FIX(0));
4916 }
4917 }
4918
4919 return ST_CONTINUE;
4920}
4921
4922void
4923rb_clear_coverages(void)
4924{
4925 VALUE coverages = rb_get_coverages();
4926 if (RTEST(coverages)) {
4927 rb_hash_foreach(coverages, clear_coverage_i, 0);
4928 }
4929}
4930
4931#if defined(HAVE_WORKING_FORK)
4932
4933static void
4934rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4935{
4936 rb_thread_t *i = 0;
4937 rb_vm_t *vm = th->vm;
4938 rb_ractor_t *r = th->ractor;
4939 vm->ractor.main_ractor = r;
4940 vm->ractor.main_thread = th;
4941 r->threads.main = th;
4942 r->status_ = ractor_created;
4943
4944 thread_sched_atfork(TH_SCHED(th));
4945 ubf_list_atfork();
4946 rb_signal_atfork();
4947
4948 // OK. Only this thread accesses:
4949 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4950 if (r != vm->ractor.main_ractor) {
4951 rb_ractor_terminate_atfork(vm, r);
4952 }
4953 ccan_list_for_each(&r->threads.set, i, lt_node) {
4954 atfork(i, th);
4955 }
4956 }
4957 rb_vm_living_threads_init(vm);
4958
4959 rb_ractor_atfork(vm, th);
4960 rb_vm_postponed_job_atfork();
4961
4962 /* may be held by any thread in parent */
4963 rb_native_mutex_initialize(&th->interrupt_lock);
4964 ccan_list_head_init(&th->interrupt_exec_tasks);
4965
4966 vm->fork_gen++;
4967 rb_ractor_sleeper_threads_clear(th->ractor);
4968 rb_clear_coverages();
4969
4970 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4971 rb_thread_reset_timer_thread();
4972 rb_thread_start_timer_thread();
4973
4974 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4975 VM_ASSERT(vm->ractor.cnt == 1);
4976}
4977
4978static void
4979terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4980{
4981 if (th != current_th) {
4982 rb_native_mutex_initialize(&th->interrupt_lock);
4983 rb_mutex_abandon_keeping_mutexes(th);
4984 rb_mutex_abandon_locking_mutex(th);
4985 thread_cleanup_func(th, TRUE);
4986 }
4987}
4988
4989void rb_fiber_atfork(rb_thread_t *);
4990void
4991rb_thread_atfork(void)
4992{
4993 rb_thread_t *th = GET_THREAD();
4994 rb_threadptr_pending_interrupt_clear(th);
4995 rb_thread_atfork_internal(th, terminate_atfork_i);
4996 th->join_list = NULL;
4997 rb_fiber_atfork(th);
4998
4999 /* We don't want reproduce CVE-2003-0900. */
5001}
5002
5003static void
5004terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
5005{
5006 if (th != current_th) {
5007 thread_cleanup_func_before_exec(th);
5008 }
5009}
5010
5011void
5013{
5014 rb_thread_t *th = GET_THREAD();
5015 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
5016}
5017#else
5018void
5019rb_thread_atfork(void)
5020{
5021}
5022
5023void
5025{
5026}
5027#endif
5029struct thgroup {
5030 int enclosed;
5031};
5032
5033static const rb_data_type_t thgroup_data_type = {
5034 "thgroup",
5035 {
5036 0,
5038 NULL, // No external memory to report
5039 },
5040 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
5041};
5042
5043/*
5044 * Document-class: ThreadGroup
5045 *
5046 * ThreadGroup provides a means of keeping track of a number of threads as a
5047 * group.
5048 *
5049 * A given Thread object can only belong to one ThreadGroup at a time; adding
5050 * a thread to a new group will remove it from any previous group.
5051 *
5052 * Newly created threads belong to the same group as the thread from which they
5053 * were created.
5054 */
5055
5056/*
5057 * Document-const: Default
5058 *
5059 * The default ThreadGroup created when Ruby starts; all Threads belong to it
5060 * by default.
5061 */
5062static VALUE
5063thgroup_s_alloc(VALUE klass)
5064{
5065 VALUE group;
5066 struct thgroup *data;
5067
5068 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
5069 data->enclosed = 0;
5070
5071 return group;
5072}
5073
5074/*
5075 * call-seq:
5076 * thgrp.list -> array
5077 *
5078 * Returns an array of all existing Thread objects that belong to this group.
5079 *
5080 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
5081 */
5082
5083static VALUE
5084thgroup_list(VALUE group)
5085{
5086 VALUE ary = rb_ary_new();
5087 rb_thread_t *th = 0;
5088 rb_ractor_t *r = GET_RACTOR();
5089
5090 ccan_list_for_each(&r->threads.set, th, lt_node) {
5091 if (th->thgroup == group) {
5092 rb_ary_push(ary, th->self);
5093 }
5094 }
5095 return ary;
5096}
5097
5098
5099/*
5100 * call-seq:
5101 * thgrp.enclose -> thgrp
5102 *
5103 * Prevents threads from being added to or removed from the receiving
5104 * ThreadGroup.
5105 *
5106 * New threads can still be started in an enclosed ThreadGroup.
5107 *
5108 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
5109 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
5110 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
5111 * tg.add thr
5112 * #=> ThreadError: can't move from the enclosed thread group
5113 */
5114
5115static VALUE
5116thgroup_enclose(VALUE group)
5117{
5118 struct thgroup *data;
5119
5120 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5121 data->enclosed = 1;
5122
5123 return group;
5124}
5125
5126
5127/*
5128 * call-seq:
5129 * thgrp.enclosed? -> true or false
5130 *
5131 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
5132 */
5133
5134static VALUE
5135thgroup_enclosed_p(VALUE group)
5136{
5137 struct thgroup *data;
5138
5139 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5140 return RBOOL(data->enclosed);
5141}
5142
5143
5144/*
5145 * call-seq:
5146 * thgrp.add(thread) -> thgrp
5147 *
5148 * Adds the given +thread+ to this group, removing it from any other
5149 * group to which it may have previously been a member.
5150 *
5151 * puts "Initial group is #{ThreadGroup::Default.list}"
5152 * tg = ThreadGroup.new
5153 * t1 = Thread.new { sleep }
5154 * t2 = Thread.new { sleep }
5155 * puts "t1 is #{t1}"
5156 * puts "t2 is #{t2}"
5157 * tg.add(t1)
5158 * puts "Initial group now #{ThreadGroup::Default.list}"
5159 * puts "tg group now #{tg.list}"
5160 *
5161 * This will produce:
5162 *
5163 * Initial group is #<Thread:0x401bdf4c>
5164 * t1 is #<Thread:0x401b3c90>
5165 * t2 is #<Thread:0x401b3c18>
5166 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
5167 * tg group now #<Thread:0x401b3c90>
5168 */
5169
5170static VALUE
5171thgroup_add(VALUE group, VALUE thread)
5172{
5173 rb_thread_t *target_th = rb_thread_ptr(thread);
5174 struct thgroup *data;
5175
5176 if (OBJ_FROZEN(group)) {
5177 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
5178 }
5179 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5180 if (data->enclosed) {
5181 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
5182 }
5183
5184 if (OBJ_FROZEN(target_th->thgroup)) {
5185 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
5186 }
5187 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
5188 if (data->enclosed) {
5189 rb_raise(rb_eThreadError,
5190 "can't move from the enclosed thread group");
5191 }
5192
5193 target_th->thgroup = group;
5194 return group;
5195}
5196
5197/*
5198 * Document-class: ThreadShield
5199 */
5200static void
5201thread_shield_mark(void *ptr)
5202{
5203 rb_gc_mark((VALUE)ptr);
5204}
5205
5206static const rb_data_type_t thread_shield_data_type = {
5207 "thread_shield",
5208 {thread_shield_mark, 0, 0,},
5209 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
5210};
5211
5212static VALUE
5213thread_shield_alloc(VALUE klass)
5214{
5215 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
5216}
5217
5218#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5219#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5220#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5221#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5222STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5223static inline unsigned int
5224rb_thread_shield_waiting(VALUE b)
5225{
5226 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5227}
5228
5229static inline void
5230rb_thread_shield_waiting_inc(VALUE b)
5231{
5232 unsigned int w = rb_thread_shield_waiting(b);
5233 w++;
5234 if (w > THREAD_SHIELD_WAITING_MAX)
5235 rb_raise(rb_eRuntimeError, "waiting count overflow");
5236 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5237 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5238}
5239
5240static inline void
5241rb_thread_shield_waiting_dec(VALUE b)
5242{
5243 unsigned int w = rb_thread_shield_waiting(b);
5244 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5245 w--;
5246 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5247 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5248}
5249
5250VALUE
5251rb_thread_shield_new(void)
5252{
5253 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5254 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5255 return thread_shield;
5256}
5257
5258bool
5259rb_thread_shield_owned(VALUE self)
5260{
5261 VALUE mutex = GetThreadShieldPtr(self);
5262 if (!mutex) return false;
5263
5264 rb_mutex_t *m = mutex_ptr(mutex);
5265
5266 return m->fiber_serial == rb_fiber_serial(GET_EC()->fiber_ptr);
5267}
5268
5269/*
5270 * Wait a thread shield.
5271 *
5272 * Returns
5273 * true: acquired the thread shield
5274 * false: the thread shield was destroyed and no other threads waiting
5275 * nil: the thread shield was destroyed but still in use
5276 */
5277VALUE
5278rb_thread_shield_wait(VALUE self)
5279{
5280 VALUE mutex = GetThreadShieldPtr(self);
5281 rb_mutex_t *m;
5282
5283 if (!mutex) return Qfalse;
5284 m = mutex_ptr(mutex);
5285 if (m->fiber_serial == rb_fiber_serial(GET_EC()->fiber_ptr)) return Qnil;
5286 rb_thread_shield_waiting_inc(self);
5287 rb_mutex_lock(mutex);
5288 rb_thread_shield_waiting_dec(self);
5289 if (DATA_PTR(self)) return Qtrue;
5290 rb_mutex_unlock(mutex);
5291 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5292}
5293
5294static VALUE
5295thread_shield_get_mutex(VALUE self)
5296{
5297 VALUE mutex = GetThreadShieldPtr(self);
5298 if (!mutex)
5299 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5300 return mutex;
5301}
5302
5303/*
5304 * Release a thread shield, and return true if it has waiting threads.
5305 */
5306VALUE
5307rb_thread_shield_release(VALUE self)
5308{
5309 VALUE mutex = thread_shield_get_mutex(self);
5310 rb_mutex_unlock(mutex);
5311 return RBOOL(rb_thread_shield_waiting(self) > 0);
5312}
5313
5314/*
5315 * Release and destroy a thread shield, and return true if it has waiting threads.
5316 */
5317VALUE
5318rb_thread_shield_destroy(VALUE self)
5319{
5320 VALUE mutex = thread_shield_get_mutex(self);
5321 DATA_PTR(self) = 0;
5322 rb_mutex_unlock(mutex);
5323 return RBOOL(rb_thread_shield_waiting(self) > 0);
5324}
5325
5326static VALUE
5327threadptr_recursive_hash(rb_thread_t *th)
5328{
5329 return th->ec->local_storage_recursive_hash;
5330}
5331
5332static void
5333threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5334{
5335 th->ec->local_storage_recursive_hash = hash;
5336}
5337
5339
5340/*
5341 * Returns the current "recursive list" used to detect recursion.
5342 * This list is a hash table, unique for the current thread and for
5343 * the current __callee__.
5344 */
5345
5346static VALUE
5347recursive_list_access(VALUE sym)
5348{
5349 rb_thread_t *th = GET_THREAD();
5350 VALUE hash = threadptr_recursive_hash(th);
5351 VALUE list;
5352 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5353 hash = rb_ident_hash_new();
5354 threadptr_recursive_hash_set(th, hash);
5355 list = Qnil;
5356 }
5357 else {
5358 list = rb_hash_aref(hash, sym);
5359 }
5360 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5361 list = rb_ident_hash_new();
5362 rb_hash_aset(hash, sym, list);
5363 }
5364 return list;
5365}
5366
5367/*
5368 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5369 * in the recursion list.
5370 * Assumes the recursion list is valid.
5371 */
5372
5373static bool
5374recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5375{
5376#if SIZEOF_LONG == SIZEOF_VOIDP
5377 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5378#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5379 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5380 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5381#endif
5382
5383 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5384 if (UNDEF_P(pair_list))
5385 return false;
5386 if (paired_obj_id) {
5387 if (!RB_TYPE_P(pair_list, T_HASH)) {
5388 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5389 return false;
5390 }
5391 else {
5392 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5393 return false;
5394 }
5395 }
5396 return true;
5397}
5398
5399/*
5400 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5401 * For a single obj, it sets list[obj] to Qtrue.
5402 * For a pair, it sets list[obj] to paired_obj_id if possible,
5403 * otherwise list[obj] becomes a hash like:
5404 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5405 * Assumes the recursion list is valid.
5406 */
5407
5408static void
5409recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5410{
5411 VALUE pair_list;
5412
5413 if (!paired_obj) {
5414 rb_hash_aset(list, obj, Qtrue);
5415 }
5416 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5417 rb_hash_aset(list, obj, paired_obj);
5418 }
5419 else {
5420 if (!RB_TYPE_P(pair_list, T_HASH)){
5421 VALUE other_paired_obj = pair_list;
5422 pair_list = rb_hash_new();
5423 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5424 rb_hash_aset(list, obj, pair_list);
5425 }
5426 rb_hash_aset(pair_list, paired_obj, Qtrue);
5427 }
5428}
5429
5430/*
5431 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5432 * For a pair, if list[obj] is a hash, then paired_obj_id is
5433 * removed from the hash and no attempt is made to simplify
5434 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5435 * Assumes the recursion list is valid.
5436 */
5437
5438static int
5439recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5440{
5441 if (paired_obj) {
5442 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5443 if (UNDEF_P(pair_list)) {
5444 return 0;
5445 }
5446 if (RB_TYPE_P(pair_list, T_HASH)) {
5447 rb_hash_delete_entry(pair_list, paired_obj);
5448 if (!RHASH_EMPTY_P(pair_list)) {
5449 return 1; /* keep hash until is empty */
5450 }
5451 }
5452 }
5453 rb_hash_delete_entry(list, obj);
5454 return 1;
5455}
5457struct exec_recursive_params {
5458 VALUE (*func) (VALUE, VALUE, int);
5459 VALUE list;
5460 VALUE obj;
5461 VALUE pairid;
5462 VALUE arg;
5463};
5464
5465static VALUE
5466exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5467{
5468 struct exec_recursive_params *p = (void *)data;
5469 return (*p->func)(p->obj, p->arg, FALSE);
5470}
5471
5472/*
5473 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5474 * current method is called recursively on obj, or on the pair <obj, pairid>
5475 * If outer is 0, then the innermost func will be called with recursive set
5476 * to true, otherwise the outermost func will be called. In the latter case,
5477 * all inner func are short-circuited by throw.
5478 * Implementation details: the value thrown is the recursive list which is
5479 * proper to the current method and unlikely to be caught anywhere else.
5480 * list[recursive_key] is used as a flag for the outermost call.
5481 */
5482
5483static VALUE
5484exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5485{
5486 VALUE result = Qundef;
5487 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5488 struct exec_recursive_params p;
5489 int outermost;
5490 p.list = recursive_list_access(sym);
5491 p.obj = obj;
5492 p.pairid = pairid;
5493 p.arg = arg;
5494 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5495
5496 if (recursive_check(p.list, p.obj, pairid)) {
5497 if (outer && !outermost) {
5498 rb_throw_obj(p.list, p.list);
5499 }
5500 return (*func)(obj, arg, TRUE);
5501 }
5502 else {
5503 enum ruby_tag_type state;
5504
5505 p.func = func;
5506
5507 if (outermost) {
5508 recursive_push(p.list, ID2SYM(recursive_key), 0);
5509 recursive_push(p.list, p.obj, p.pairid);
5510 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5511 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5512 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5513 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5514 if (result == p.list) {
5515 result = (*func)(obj, arg, TRUE);
5516 }
5517 }
5518 else {
5519 volatile VALUE ret = Qundef;
5520 recursive_push(p.list, p.obj, p.pairid);
5521 EC_PUSH_TAG(GET_EC());
5522 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5523 ret = (*func)(obj, arg, FALSE);
5524 }
5525 EC_POP_TAG();
5526 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5527 goto invalid;
5528 }
5529 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5530 result = ret;
5531 }
5532 }
5533 *(volatile struct exec_recursive_params *)&p;
5534 return result;
5535
5536 invalid:
5537 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5538 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5539 sym, rb_thread_current());
5541}
5542
5543/*
5544 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5545 * current method is called recursively on obj
5546 */
5547
5548VALUE
5549rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5550{
5551 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5552}
5553
5554/*
5555 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5556 * current method is called recursively on the ordered pair <obj, paired_obj>
5557 */
5558
5559VALUE
5560rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5561{
5562 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5563}
5564
5565/*
5566 * If recursion is detected on the current method and obj, the outermost
5567 * func will be called with (obj, arg, true). All inner func will be
5568 * short-circuited using throw.
5569 */
5570
5571VALUE
5572rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5573{
5574 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5575}
5576
5577VALUE
5578rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5579{
5580 return exec_recursive(func, obj, 0, arg, 1, mid);
5581}
5582
5583/*
5584 * If recursion is detected on the current method, obj and paired_obj,
5585 * the outermost func will be called with (obj, arg, true). All inner
5586 * func will be short-circuited using throw.
5587 */
5588
5589VALUE
5590rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5591{
5592 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5593}
5594
5595/*
5596 * call-seq:
5597 * thread.backtrace -> array or nil
5598 *
5599 * Returns the current backtrace of the target thread.
5600 *
5601 */
5602
5603static VALUE
5604rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5605{
5606 return rb_vm_thread_backtrace(argc, argv, thval);
5607}
5608
5609/* call-seq:
5610 * thread.backtrace_locations(*args) -> array or nil
5611 *
5612 * Returns the execution stack for the target thread---an array containing
5613 * backtrace location objects.
5614 *
5615 * See Thread::Backtrace::Location for more information.
5616 *
5617 * This method behaves similarly to Kernel#caller_locations except it applies
5618 * to a specific thread.
5619 */
5620static VALUE
5621rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5622{
5623 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5624}
5625
5626void
5627Init_Thread_Mutex(void)
5628{
5629 rb_thread_t *th = GET_THREAD();
5630
5631 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5632 rb_native_mutex_initialize(&th->interrupt_lock);
5633}
5634
5635/*
5636 * Document-class: ThreadError
5637 *
5638 * Raised when an invalid operation is attempted on a thread.
5639 *
5640 * For example, when no other thread has been started:
5641 *
5642 * Thread.stop
5643 *
5644 * This will raises the following exception:
5645 *
5646 * ThreadError: stopping only thread
5647 * note: use sleep to stop forever
5648 */
5649
5650void
5651Init_Thread(void)
5652{
5653 rb_thread_t *th = GET_THREAD();
5654
5655 sym_never = ID2SYM(rb_intern_const("never"));
5656 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5657 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5658
5659 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5660 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5661 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5662 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5663 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5664 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5665 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5666 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5667 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5668 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5669 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5670 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5671 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5672 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5673 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5674 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5675 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5676 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5677 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5678
5679 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5680 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5681 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5682 rb_define_method(rb_cThread, "value", thread_value, 0);
5683 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5684 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5685 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5686 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5687 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5688 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5689 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5690 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5691 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5692 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5693 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5694 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5695 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5696 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5697 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5698 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5699 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5700 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5701 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5702 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5703 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5704 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5705 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5706 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5707 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5708 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5709
5710 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5711 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5712 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5713 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5714 rb_define_alias(rb_cThread, "inspect", "to_s");
5715
5716 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5717 "stream closed in another thread");
5718
5719 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5720 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5721 rb_define_method(cThGroup, "list", thgroup_list, 0);
5722 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5723 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5724 rb_define_method(cThGroup, "add", thgroup_add, 1);
5725
5726 const char * ptr = getenv("RUBY_THREAD_TIMESLICE");
5727
5728 if (ptr) {
5729 long quantum = strtol(ptr, NULL, 0);
5730 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5731 thread_default_quantum_ms = (uint32_t)quantum;
5732 }
5733 else if (0) {
5734 fprintf(stderr, "Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5735 }
5736 }
5737
5738 {
5739 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5740 rb_define_const(cThGroup, "Default", th->thgroup);
5741 }
5742
5744
5745 /* init thread core */
5746 {
5747 /* main thread setting */
5748 {
5749 /* acquire global vm lock */
5750#ifdef HAVE_PTHREAD_NP_H
5751 VM_ASSERT(TH_SCHED(th)->running == th);
5752#endif
5753 // thread_sched_to_running() should not be called because
5754 // it assumes blocked by thread_sched_to_waiting().
5755 // thread_sched_to_running(sched, th);
5756
5757 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5758 th->pending_interrupt_queue_checked = 0;
5759 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5760 }
5761 }
5762
5763 rb_thread_create_timer_thread();
5764
5765 Init_thread_sync();
5766
5767 // TODO: Suppress unused function warning for now
5768 // if (0) rb_thread_sched_destroy(NULL);
5769}
5770
5773{
5774 rb_thread_t *th = ruby_thread_from_native();
5775
5776 return th != 0;
5777}
5778
5779#ifdef NON_SCALAR_THREAD_ID
5780 #define thread_id_str(th) (NULL)
5781#else
5782 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5783#endif
5784
5785static void
5786debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5787{
5788 rb_thread_t *th = 0;
5789 VALUE sep = rb_str_new_cstr("\n ");
5790
5791 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5792 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5793 (void *)GET_THREAD(), (void *)r->threads.main);
5794
5795 ccan_list_for_each(&r->threads.set, th, lt_node) {
5796 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5797 "native:%p int:%u",
5798 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5799
5800 if (th->locking_mutex) {
5801 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5802 rb_str_catf(msg, " mutex:%llu cond:%"PRIuSIZE,
5803 (unsigned long long)mutex->fiber_serial, rb_mutex_num_waiting(mutex));
5804 }
5805
5806 {
5807 struct rb_waiting_list *list = th->join_list;
5808 while (list) {
5809 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5810 list = list->next;
5811 }
5812 }
5813 rb_str_catf(msg, "\n ");
5814 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5815 rb_str_catf(msg, "\n");
5816 }
5817}
5818
5819static void
5820rb_check_deadlock(rb_ractor_t *r)
5821{
5822 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5823
5824#ifdef RUBY_THREAD_PTHREAD_H
5825 if (r->threads.sched.readyq_cnt > 0) return;
5826#endif
5827
5828 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5829 int ltnum = rb_ractor_living_thread_num(r);
5830
5831 if (ltnum > sleeper_num) return;
5832 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5833
5834 int found = 0;
5835 rb_thread_t *th = NULL;
5836
5837 ccan_list_for_each(&r->threads.set, th, lt_node) {
5838 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5839 found = 1;
5840 }
5841 else if (th->locking_mutex) {
5842 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5843 if (mutex->fiber_serial == rb_fiber_serial(th->ec->fiber_ptr) || (!mutex->fiber_serial && !ccan_list_empty(&mutex->waitq))) {
5844 found = 1;
5845 }
5846 }
5847 if (found)
5848 break;
5849 }
5850
5851 if (!found) {
5852 VALUE argv[2];
5853 argv[0] = rb_eFatal;
5854 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5855 debug_deadlock_check(r, argv[1]);
5856 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5857 rb_threadptr_raise(r->threads.main, 2, argv);
5858 }
5859}
5860
5861static void
5862update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5863{
5864 const rb_control_frame_t *cfp = GET_EC()->cfp;
5865 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5866 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5867 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5868 if (lines) {
5869 long line = rb_sourceline() - 1;
5870 VM_ASSERT(line >= 0);
5871 long count;
5872 VALUE num;
5873 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5874 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5875 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5876 rb_ary_push(lines, LONG2FIX(line + 1));
5877 return;
5878 }
5879 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5880 return;
5881 }
5882 num = RARRAY_AREF(lines, line);
5883 if (!FIXNUM_P(num)) return;
5884 count = FIX2LONG(num) + 1;
5885 if (POSFIXABLE(count)) {
5886 RARRAY_ASET(lines, line, LONG2FIX(count));
5887 }
5888 }
5889 }
5890}
5891
5892static void
5893update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5894{
5895 const rb_control_frame_t *cfp = GET_EC()->cfp;
5896 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5897 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5898 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5899 if (branches) {
5900 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5901 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5902 VALUE counters = RARRAY_AREF(branches, 1);
5903 VALUE num = RARRAY_AREF(counters, idx);
5904 count = FIX2LONG(num) + 1;
5905 if (POSFIXABLE(count)) {
5906 RARRAY_ASET(counters, idx, LONG2FIX(count));
5907 }
5908 }
5909 }
5910}
5911
5912const rb_method_entry_t *
5913rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5914{
5915 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5916
5917 if (!me->def) return NULL; // negative cme
5918
5919 retry:
5920 switch (me->def->type) {
5921 case VM_METHOD_TYPE_ISEQ: {
5922 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5923 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5924 path = rb_iseq_path(iseq);
5925 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5926 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5927 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5928 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5929 break;
5930 }
5931 case VM_METHOD_TYPE_BMETHOD: {
5932 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5933 if (iseq) {
5934 rb_iseq_location_t *loc;
5935 rb_iseq_check(iseq);
5936 path = rb_iseq_path(iseq);
5937 loc = &ISEQ_BODY(iseq)->location;
5938 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5939 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5940 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5941 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5942 break;
5943 }
5944 return NULL;
5945 }
5946 case VM_METHOD_TYPE_ALIAS:
5947 me = me->def->body.alias.original_me;
5948 goto retry;
5949 case VM_METHOD_TYPE_REFINED:
5950 me = me->def->body.refined.orig_me;
5951 if (!me) return NULL;
5952 goto retry;
5953 default:
5954 return NULL;
5955 }
5956
5957 /* found */
5958 if (RB_TYPE_P(path, T_ARRAY)) {
5959 path = rb_ary_entry(path, 1);
5960 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5961 }
5962 if (resolved_location) {
5963 resolved_location[0] = path;
5964 resolved_location[1] = beg_pos_lineno;
5965 resolved_location[2] = beg_pos_column;
5966 resolved_location[3] = end_pos_lineno;
5967 resolved_location[4] = end_pos_column;
5968 }
5969 return me;
5970}
5971
5972static void
5973update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5974{
5975 const rb_control_frame_t *cfp = GET_EC()->cfp;
5976 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5977 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5978 VALUE rcount;
5979 long count;
5980
5981 me = rb_resolve_me_location(me, 0);
5982 if (!me) return;
5983
5984 rcount = rb_hash_aref(me2counter, (VALUE) me);
5985 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5986 if (POSFIXABLE(count)) {
5987 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5988 }
5989}
5990
5991VALUE
5992rb_get_coverages(void)
5993{
5994 return GET_VM()->coverages;
5995}
5996
5997int
5998rb_get_coverage_mode(void)
5999{
6000 return GET_VM()->coverage_mode;
6001}
6002
6003void
6004rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
6005{
6006 GET_VM()->coverages = coverages;
6007 GET_VM()->me2counter = me2counter;
6008 GET_VM()->coverage_mode = mode;
6009}
6010
6011void
6012rb_resume_coverages(void)
6013{
6014 int mode = GET_VM()->coverage_mode;
6015 VALUE me2counter = GET_VM()->me2counter;
6016 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6017 if (mode & COVERAGE_TARGET_BRANCHES) {
6018 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6019 }
6020 if (mode & COVERAGE_TARGET_METHODS) {
6021 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6022 }
6023}
6024
6025void
6026rb_suspend_coverages(void)
6027{
6028 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
6029 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
6030 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
6031 }
6032 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
6033 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
6034 }
6035}
6036
6037/* Make coverage arrays empty so old covered files are no longer tracked. */
6038void
6039rb_reset_coverages(void)
6040{
6041 rb_clear_coverages();
6042 rb_iseq_remove_coverage_all();
6043 GET_VM()->coverages = Qfalse;
6044}
6045
6046VALUE
6047rb_default_coverage(int n)
6048{
6049 VALUE coverage = rb_ary_hidden_new_fill(3);
6050 VALUE lines = Qfalse, branches = Qfalse;
6051 int mode = GET_VM()->coverage_mode;
6052
6053 if (mode & COVERAGE_TARGET_LINES) {
6054 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
6055 }
6056 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
6057
6058 if (mode & COVERAGE_TARGET_BRANCHES) {
6059 branches = rb_ary_hidden_new_fill(2);
6060 /* internal data structures for branch coverage:
6061 *
6062 * { branch base node =>
6063 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
6064 * branch target id =>
6065 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
6066 * ...
6067 * }],
6068 * ...
6069 * }
6070 *
6071 * Example:
6072 * { NODE_CASE =>
6073 * [1, 0, 4, 3, {
6074 * NODE_WHEN => [2, 8, 2, 9, 0],
6075 * NODE_WHEN => [3, 8, 3, 9, 1],
6076 * ...
6077 * }],
6078 * ...
6079 * }
6080 */
6081 VALUE structure = rb_hash_new();
6082 rb_obj_hide(structure);
6083 RARRAY_ASET(branches, 0, structure);
6084 /* branch execution counters */
6085 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
6086 }
6087 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
6088
6089 return coverage;
6090}
6091
6092static VALUE
6093uninterruptible_exit(VALUE v)
6094{
6095 rb_thread_t *cur_th = GET_THREAD();
6096 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
6097
6098 cur_th->pending_interrupt_queue_checked = 0;
6099 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
6100 RUBY_VM_SET_INTERRUPT(cur_th->ec);
6101 }
6102 return Qnil;
6103}
6104
6105VALUE
6106rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
6107{
6108 VALUE interrupt_mask = rb_ident_hash_new();
6109 rb_thread_t *cur_th = GET_THREAD();
6110
6111 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
6112 OBJ_FREEZE(interrupt_mask);
6113 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
6114
6115 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
6116
6117 RUBY_VM_CHECK_INTS(cur_th->ec);
6118 return ret;
6119}
6120
6121static void
6122thread_specific_storage_alloc(rb_thread_t *th)
6123{
6124 VM_ASSERT(th->specific_storage == NULL);
6125
6126 if (UNLIKELY(specific_key_count > 0)) {
6127 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6128 }
6129}
6130
6131rb_internal_thread_specific_key_t
6133{
6134 rb_vm_t *vm = GET_VM();
6135
6136 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
6137 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
6138 }
6139 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
6140 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6141 }
6142 else {
6143 rb_internal_thread_specific_key_t key = specific_key_count++;
6144
6145 if (key == 0) {
6146 // allocate
6147 rb_ractor_t *cr = GET_RACTOR();
6148 rb_thread_t *th;
6149
6150 ccan_list_for_each(&cr->threads.set, th, lt_node) {
6151 thread_specific_storage_alloc(th);
6152 }
6153 }
6154 return key;
6155 }
6156}
6157
6158// async and native thread safe.
6159void *
6160rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
6161{
6162 rb_thread_t *th = DATA_PTR(thread_val);
6163
6164 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6165 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6166 VM_ASSERT(th->specific_storage);
6167
6168 return th->specific_storage[key];
6169}
6170
6171// async and native thread safe.
6172void
6173rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
6174{
6175 rb_thread_t *th = DATA_PTR(thread_val);
6176
6177 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6178 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6179 VM_ASSERT(th->specific_storage);
6180
6181 th->specific_storage[key] = data;
6182}
6183
6184// interrupt_exec
6187 struct ccan_list_node node;
6188
6189 rb_interrupt_exec_func_t *func;
6190 void *data;
6191 enum rb_interrupt_exec_flag flags;
6192};
6193
6194void
6195rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
6196{
6197 struct rb_interrupt_exec_task *task;
6198
6199 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
6200 if (task->flags & rb_interrupt_exec_flag_value_data) {
6201 rb_gc_mark((VALUE)task->data);
6202 }
6203 }
6204}
6205
6206// native thread safe
6207// th should be available
6208void
6209rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6210{
6211 // should not use ALLOC
6213 *task = (struct rb_interrupt_exec_task) {
6214 .flags = flags,
6215 .func = func,
6216 .data = data,
6217 };
6218
6219 rb_native_mutex_lock(&th->interrupt_lock);
6220 {
6221 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6222 threadptr_set_interrupt_locked(th, true);
6223 }
6224 rb_native_mutex_unlock(&th->interrupt_lock);
6225}
6226
6227static void
6228threadptr_interrupt_exec_exec(rb_thread_t *th)
6229{
6230 while (1) {
6231 struct rb_interrupt_exec_task *task;
6232
6233 rb_native_mutex_lock(&th->interrupt_lock);
6234 {
6235 task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6236 }
6237 rb_native_mutex_unlock(&th->interrupt_lock);
6238
6239 RUBY_DEBUG_LOG("task:%p", task);
6240
6241 if (task) {
6242 if (task->flags & rb_interrupt_exec_flag_new_thread) {
6243 rb_thread_create(task->func, task->data);
6244 }
6245 else {
6246 (*task->func)(task->data);
6247 }
6248 ruby_xfree(task);
6249 }
6250 else {
6251 break;
6252 }
6253 }
6254}
6255
6256static void
6257threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6258{
6259 rb_native_mutex_lock(&th->interrupt_lock);
6260 {
6261 struct rb_interrupt_exec_task *task;
6262
6263 while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6264 ruby_xfree(task);
6265 }
6266 }
6267 rb_native_mutex_unlock(&th->interrupt_lock);
6268}
6269
6270// native thread safe
6271// func/data should be native thread safe
6272void
6273rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6274 rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6275{
6276 RUBY_DEBUG_LOG("flags:%d", (int)flags);
6277
6278 rb_thread_t *main_th = target_r->threads.main;
6279 rb_threadptr_interrupt_exec(main_th, func, data, flags | rb_interrupt_exec_flag_new_thread);
6280}
6281
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:319
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:600
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1572
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2931
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1260
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:1050
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:1037
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1674
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:136
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:290
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:486
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:683
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1424
VALUE rb_eIOError
IOError exception.
Definition io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1428
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1431
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:4121
VALUE rb_eFatal
fatal exception.
Definition error.c:1427
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1429
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1423
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:1055
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4363
VALUE rb_eSignal
SignalException exception.
Definition error.c:1426
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2190
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:100
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:265
VALUE rb_cThread
Thread class.
Definition vm.c:671
VALUE rb_cModule
Module class.
Definition object.c:62
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3811
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:924
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:847
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1817
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1516
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4032
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1655
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1513
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1469
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3751
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2944
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:3183
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1399
void rb_thread_fd_close(int fd)
This funciton is now a no-op.
Definition thread.c:2886
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1431
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:3095
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:5023
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1452
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:3086
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:3039
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1406
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:5018
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:3162
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:4024
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3899
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1500
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:3048
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1475
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:2001
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2953
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2013
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1488
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:380
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:2075
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12508
ID rb_to_id(VALUE str)
Definition string.c:12498
#define RB_IO_POINTER(obj, fp)
Queries the underlying IO pointer.
Definition io.h:436
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:190
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:6159
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:6172
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:6131
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1580
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2046
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition thread.h:73
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1717
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2519
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:80
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:520
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:455
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:502
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5771
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:1061
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:464
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
Definition scheduler.c:1092
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:628
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:425
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:647
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4515
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
@ RUBY_Qundef
Represents so-called undef.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:63
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:202
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Ruby's IO, metadata and buffers.
Definition io.h:295
VALUE self
The IO's Ruby level counterpart.
Definition io.h:298
int fd
file descriptor.
Definition io.h:306
struct ccan_list_head blocking_operations
Threads that are performing a blocking operation without the GVL using this IO.
Definition io.h:131
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:296
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:302
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:284
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:290
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376