Ruby 3.5.0dev (2025-02-20 revision 34098b669c0cbc024cd08e686891f1dfe0a10aaf)
thread.c (34098b669c0cbc024cd08e686891f1dfe0a10aaf)
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "ruby/debug.h"
93#include "ruby/io.h"
94#include "ruby/thread.h"
95#include "ruby/thread_native.h"
96#include "timev.h"
97#include "vm_core.h"
98#include "ractor_core.h"
99#include "vm_debug.h"
100#include "vm_sync.h"
101
102#ifndef USE_NATIVE_THREAD_PRIORITY
103#define USE_NATIVE_THREAD_PRIORITY 0
104#define RUBY_THREAD_PRIORITY_MAX 3
105#define RUBY_THREAD_PRIORITY_MIN -3
106#endif
107
108static VALUE rb_cThreadShield;
109
110static VALUE sym_immediate;
111static VALUE sym_on_blocking;
112static VALUE sym_never;
113
114static uint32_t thread_default_quantum_ms = 100;
115
116#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
117#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
118
119static inline VALUE
120rb_thread_local_storage(VALUE thread)
121{
122 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
123 rb_ivar_set(thread, idLocals, rb_hash_new());
124 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
125 }
126 return rb_ivar_get(thread, idLocals);
127}
128
129enum SLEEP_FLAGS {
130 SLEEP_DEADLOCKABLE = 0x01,
131 SLEEP_SPURIOUS_CHECK = 0x02,
132 SLEEP_ALLOW_SPURIOUS = 0x04,
133 SLEEP_NO_CHECKINTS = 0x08,
134};
135
136static void sleep_forever(rb_thread_t *th, unsigned int fl);
137static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
138
139static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
140static int rb_threadptr_dead(rb_thread_t *th);
141static void rb_check_deadlock(rb_ractor_t *r);
142static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
143static const char *thread_status_name(rb_thread_t *th, int detail);
144static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
145NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
146MAYBE_UNUSED(static int consume_communication_pipe(int fd));
147
148static volatile int system_working = 1;
149static rb_internal_thread_specific_key_t specific_key_count;
150
152 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
153 rb_thread_t *th;
154 int fd;
155 struct rb_io_close_wait_list *busy;
156};
157
158/********************************************************************************/
159
160#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
161
163 enum rb_thread_status prev_status;
164};
165
166static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
167static void unblock_function_clear(rb_thread_t *th);
168
169static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
170 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
171static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
172
173#define THREAD_BLOCKING_BEGIN(th) do { \
174 struct rb_thread_sched * const sched = TH_SCHED(th); \
175 RB_VM_SAVE_MACHINE_CONTEXT(th); \
176 thread_sched_to_waiting((sched), (th));
177
178#define THREAD_BLOCKING_END(th) \
179 thread_sched_to_running((sched), (th)); \
180 rb_ractor_thread_switch(th->ractor, th); \
181} while(0)
182
183#ifdef __GNUC__
184#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
185#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
186#else
187#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
188#endif
189#else
190#define only_if_constant(expr, notconst) notconst
191#endif
192#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
193 struct rb_blocking_region_buffer __region; \
194 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
195 /* always return true unless fail_if_interrupted */ \
196 !only_if_constant(fail_if_interrupted, TRUE)) { \
197 /* Important that this is inlined into the macro, and not part of \
198 * blocking_region_begin - see bug #20493 */ \
199 RB_VM_SAVE_MACHINE_CONTEXT(th); \
200 thread_sched_to_waiting(TH_SCHED(th), th); \
201 exec; \
202 blocking_region_end(th, &__region); \
203 }; \
204} while(0)
205
206/*
207 * returns true if this thread was spuriously interrupted, false otherwise
208 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
209 */
210#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
211static inline int
212vm_check_ints_blocking(rb_execution_context_t *ec)
213{
214 rb_thread_t *th = rb_ec_thread_ptr(ec);
215
216 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
217 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
218 }
219 else {
220 th->pending_interrupt_queue_checked = 0;
221 RUBY_VM_SET_INTERRUPT(ec);
222 }
223 return rb_threadptr_execute_interrupts(th, 1);
224}
225
226int
227rb_vm_check_ints_blocking(rb_execution_context_t *ec)
228{
229 return vm_check_ints_blocking(ec);
230}
231
232/*
233 * poll() is supported by many OSes, but so far Linux is the only
234 * one we know of that supports using poll() in all places select()
235 * would work.
236 */
237#if defined(HAVE_POLL)
238# if defined(__linux__)
239# define USE_POLL
240# endif
241# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
242# define USE_POLL
243 /* FreeBSD does not set POLLOUT when POLLHUP happens */
244# define POLLERR_SET (POLLHUP | POLLERR)
245# endif
246#endif
247
248static void
249timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
250 const struct timeval *timeout)
251{
252 if (timeout) {
253 *rel = rb_timeval2hrtime(timeout);
254 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
255 *to = rel;
256 }
257 else {
258 *to = 0;
259 }
260}
261
262MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
263MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
264MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
265
266#include THREAD_IMPL_SRC
267
268/*
269 * TODO: somebody with win32 knowledge should be able to get rid of
270 * timer-thread by busy-waiting on signals. And it should be possible
271 * to make the GVL in thread_pthread.c be platform-independent.
272 */
273#ifndef BUSY_WAIT_SIGNALS
274# define BUSY_WAIT_SIGNALS (0)
275#endif
276
277#ifndef USE_EVENTFD
278# define USE_EVENTFD (0)
279#endif
280
281#include "thread_sync.c"
282
283void
284rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
285{
287}
288
289void
290rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
291{
293}
294
295void
296rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
297{
299}
300
301void
302rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
303{
305}
306
307static int
308unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
309{
310 do {
311 if (fail_if_interrupted) {
312 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
313 return FALSE;
314 }
315 }
316 else {
317 RUBY_VM_CHECK_INTS(th->ec);
318 }
319
320 rb_native_mutex_lock(&th->interrupt_lock);
321 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
322 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
323
324 VM_ASSERT(th->unblock.func == NULL);
325
326 th->unblock.func = func;
327 th->unblock.arg = arg;
328 rb_native_mutex_unlock(&th->interrupt_lock);
329
330 return TRUE;
331}
332
333static void
334unblock_function_clear(rb_thread_t *th)
335{
336 rb_native_mutex_lock(&th->interrupt_lock);
337 th->unblock.func = 0;
338 rb_native_mutex_unlock(&th->interrupt_lock);
339}
340
341static void
342threadptr_interrupt_locked(rb_thread_t *th, bool trap)
343{
344 // th->interrupt_lock should be acquired here
345
346 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
347
348 if (trap) {
349 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
350 }
351 else {
352 RUBY_VM_SET_INTERRUPT(th->ec);
353 }
354
355 if (th->unblock.func != NULL) {
356 (th->unblock.func)(th->unblock.arg);
357 }
358 else {
359 /* none */
360 }
361}
362
363static void
364threadptr_interrupt(rb_thread_t *th, int trap)
365{
366 rb_native_mutex_lock(&th->interrupt_lock);
367 {
368 threadptr_interrupt_locked(th, trap);
369 }
370 rb_native_mutex_unlock(&th->interrupt_lock);
371}
372
373void
374rb_threadptr_interrupt(rb_thread_t *th)
375{
376 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
377 threadptr_interrupt(th, false);
378}
379
380static void
381threadptr_trap_interrupt(rb_thread_t *th)
382{
383 threadptr_interrupt(th, true);
384}
385
386static void
387terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
388{
389 rb_thread_t *th = 0;
390
391 ccan_list_for_each(&r->threads.set, th, lt_node) {
392 if (th != main_thread) {
393 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
394
395 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
396 rb_threadptr_interrupt(th);
397
398 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
399 }
400 else {
401 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
402 }
403 }
404}
405
406static void
407rb_threadptr_join_list_wakeup(rb_thread_t *thread)
408{
409 while (thread->join_list) {
410 struct rb_waiting_list *join_list = thread->join_list;
411
412 // Consume the entry from the join list:
413 thread->join_list = join_list->next;
414
415 rb_thread_t *target_thread = join_list->thread;
416
417 if (target_thread->scheduler != Qnil && join_list->fiber) {
418 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
419 }
420 else {
421 rb_threadptr_interrupt(target_thread);
422
423 switch (target_thread->status) {
424 case THREAD_STOPPED:
425 case THREAD_STOPPED_FOREVER:
426 target_thread->status = THREAD_RUNNABLE;
427 break;
428 default:
429 break;
430 }
431 }
432 }
433}
434
435void
436rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
437{
438 while (th->keeping_mutexes) {
439 rb_mutex_t *mutex = th->keeping_mutexes;
440 th->keeping_mutexes = mutex->next_mutex;
441
442 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
443
444 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
445 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
446 }
447}
448
449void
450rb_thread_terminate_all(rb_thread_t *th)
451{
452 rb_ractor_t *cr = th->ractor;
453 rb_execution_context_t * volatile ec = th->ec;
454 volatile int sleeping = 0;
455
456 if (cr->threads.main != th) {
457 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
458 (void *)cr->threads.main, (void *)th);
459 }
460
461 /* unlock all locking mutexes */
462 rb_threadptr_unlock_all_locking_mutexes(th);
463
464 EC_PUSH_TAG(ec);
465 if (EC_EXEC_TAG() == TAG_NONE) {
466 retry:
467 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
468
469 terminate_all(cr, th);
470
471 while (rb_ractor_living_thread_num(cr) > 1) {
472 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
473 /*q
474 * Thread exiting routine in thread_start_func_2 notify
475 * me when the last sub-thread exit.
476 */
477 sleeping = 1;
478 native_sleep(th, &rel);
479 RUBY_VM_CHECK_INTS_BLOCKING(ec);
480 sleeping = 0;
481 }
482 }
483 else {
484 /*
485 * When caught an exception (e.g. Ctrl+C), let's broadcast
486 * kill request again to ensure killing all threads even
487 * if they are blocked on sleep, mutex, etc.
488 */
489 if (sleeping) {
490 sleeping = 0;
491 goto retry;
492 }
493 }
494 EC_POP_TAG();
495}
496
497void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
498static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
499
500static void
501thread_cleanup_func_before_exec(void *th_ptr)
502{
503 rb_thread_t *th = th_ptr;
504 th->status = THREAD_KILLED;
505
506 // The thread stack doesn't exist in the forked process:
507 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
508
509 threadptr_interrupt_exec_cleanup(th);
510 rb_threadptr_root_fiber_terminate(th);
511}
512
513static void
514thread_cleanup_func(void *th_ptr, int atfork)
515{
516 rb_thread_t *th = th_ptr;
517
518 th->locking_mutex = Qfalse;
519 thread_cleanup_func_before_exec(th_ptr);
520
521 /*
522 * Unfortunately, we can't release native threading resource at fork
523 * because libc may have unstable locking state therefore touching
524 * a threading resource may cause a deadlock.
525 */
526 if (atfork) {
527 th->nt = NULL;
528 return;
529 }
530
531 rb_native_mutex_destroy(&th->interrupt_lock);
532}
533
534static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
535static VALUE rb_thread_to_s(VALUE thread);
536
537void
538ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
539{
540 native_thread_init_stack(th, local_in_parent_frame);
541}
542
543const VALUE *
544rb_vm_proc_local_ep(VALUE proc)
545{
546 const VALUE *ep = vm_proc_ep(proc);
547
548 if (ep) {
549 return rb_vm_ep_local_ep(ep);
550 }
551 else {
552 return NULL;
553 }
554}
555
556// for ractor, defined in vm.c
557VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
558 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
559
560static VALUE
561thread_do_start_proc(rb_thread_t *th)
562{
563 VALUE args = th->invoke_arg.proc.args;
564 const VALUE *args_ptr;
565 int args_len;
566 VALUE procval = th->invoke_arg.proc.proc;
567 rb_proc_t *proc;
568 GetProcPtr(procval, proc);
569
570 th->ec->errinfo = Qnil;
571 th->ec->root_lep = rb_vm_proc_local_ep(procval);
572 th->ec->root_svar = Qfalse;
573
574 vm_check_ints_blocking(th->ec);
575
576 if (th->invoke_type == thread_invoke_type_ractor_proc) {
577 VALUE self = rb_ractor_self(th->ractor);
578 VM_ASSERT(FIXNUM_P(args));
579 args_len = FIX2INT(args);
580 args_ptr = ALLOCA_N(VALUE, args_len);
581 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
582 vm_check_ints_blocking(th->ec);
583
584 return rb_vm_invoke_proc_with_self(
585 th->ec, proc, self,
586 args_len, args_ptr,
587 th->invoke_arg.proc.kw_splat,
588 VM_BLOCK_HANDLER_NONE
589 );
590 }
591 else {
592 args_len = RARRAY_LENINT(args);
593 if (args_len < 8) {
594 /* free proc.args if the length is enough small */
595 args_ptr = ALLOCA_N(VALUE, args_len);
596 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
597 th->invoke_arg.proc.args = Qnil;
598 }
599 else {
600 args_ptr = RARRAY_CONST_PTR(args);
601 }
602
603 vm_check_ints_blocking(th->ec);
604
605 return rb_vm_invoke_proc(
606 th->ec, proc,
607 args_len, args_ptr,
608 th->invoke_arg.proc.kw_splat,
609 VM_BLOCK_HANDLER_NONE
610 );
611 }
612}
613
614static VALUE
615thread_do_start(rb_thread_t *th)
616{
617 native_set_thread_name(th);
618 VALUE result = Qundef;
619
620 switch (th->invoke_type) {
621 case thread_invoke_type_proc:
622 result = thread_do_start_proc(th);
623 break;
624
625 case thread_invoke_type_ractor_proc:
626 result = thread_do_start_proc(th);
627 rb_ractor_atexit(th->ec, result);
628 break;
629
630 case thread_invoke_type_func:
631 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
632 break;
633
634 case thread_invoke_type_none:
635 rb_bug("unreachable");
636 }
637
638 return result;
639}
640
641void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
642
643static int
644thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
645{
646 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
647 VM_ASSERT(th != th->vm->ractor.main_thread);
648
649 enum ruby_tag_type state;
650 VALUE errinfo = Qnil;
651 rb_thread_t *ractor_main_th = th->ractor->threads.main;
652
653 // setup ractor
654 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
655 RB_VM_LOCK();
656 {
657 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
658 rb_ractor_t *r = th->ractor;
659 r->r_stdin = rb_io_prep_stdin();
660 r->r_stdout = rb_io_prep_stdout();
661 r->r_stderr = rb_io_prep_stderr();
662 }
663 RB_VM_UNLOCK();
664 }
665
666 // Ensure that we are not joinable.
667 VM_ASSERT(UNDEF_P(th->value));
668
669 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
670 VALUE result = Qundef;
671
672 EC_PUSH_TAG(th->ec);
673
674 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
675 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
676
677 result = thread_do_start(th);
678 }
679
680 if (!fiber_scheduler_closed) {
681 fiber_scheduler_closed = 1;
683 }
684
685 if (!event_thread_end_hooked) {
686 event_thread_end_hooked = 1;
687 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
688 }
689
690 if (state == TAG_NONE) {
691 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
692 th->value = result;
693 } else {
694 errinfo = th->ec->errinfo;
695
696 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
697 if (!NIL_P(exc)) errinfo = exc;
698
699 if (state == TAG_FATAL) {
700 if (th->invoke_type == thread_invoke_type_ractor_proc) {
701 rb_ractor_atexit(th->ec, Qnil);
702 }
703 /* fatal error within this thread, need to stop whole script */
704 }
705 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
706 /* exit on main_thread. */
707 }
708 else {
709 if (th->report_on_exception) {
710 VALUE mesg = rb_thread_to_s(th->self);
711 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
712 rb_write_error_str(mesg);
713 rb_ec_error_print(th->ec, errinfo);
714 }
715
716 if (th->invoke_type == thread_invoke_type_ractor_proc) {
717 rb_ractor_atexit_exception(th->ec);
718 }
719
720 if (th->vm->thread_abort_on_exception ||
721 th->abort_on_exception || RTEST(ruby_debug)) {
722 /* exit on main_thread */
723 }
724 else {
725 errinfo = Qnil;
726 }
727 }
728 th->value = Qnil;
729 }
730
731 // The thread is effectively finished and can be joined.
732 VM_ASSERT(!UNDEF_P(th->value));
733
734 rb_threadptr_join_list_wakeup(th);
735 rb_threadptr_unlock_all_locking_mutexes(th);
736
737 if (th->invoke_type == thread_invoke_type_ractor_proc) {
738 rb_thread_terminate_all(th);
739 rb_ractor_teardown(th->ec);
740 }
741
742 th->status = THREAD_KILLED;
743 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
744
745 if (th->vm->ractor.main_thread == th) {
746 ruby_stop(0);
747 }
748
749 if (RB_TYPE_P(errinfo, T_OBJECT)) {
750 /* treat with normal error object */
751 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
752 }
753
754 EC_POP_TAG();
755
756 rb_ec_clear_current_thread_trace_func(th->ec);
757
758 /* locking_mutex must be Qfalse */
759 if (th->locking_mutex != Qfalse) {
760 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
761 (void *)th, th->locking_mutex);
762 }
763
764 if (ractor_main_th->status == THREAD_KILLED &&
765 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
766 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
767 rb_threadptr_interrupt(ractor_main_th);
768 }
769
770 rb_check_deadlock(th->ractor);
771
772 rb_fiber_close(th->ec->fiber_ptr);
773
774 thread_cleanup_func(th, FALSE);
775 VM_ASSERT(th->ec->vm_stack == NULL);
776
777 if (th->invoke_type == thread_invoke_type_ractor_proc) {
778 // after rb_ractor_living_threads_remove()
779 // GC will happen anytime and this ractor can be collected (and destroy GVL).
780 // So gvl_release() should be before it.
781 thread_sched_to_dead(TH_SCHED(th), th);
782 rb_ractor_living_threads_remove(th->ractor, th);
783 }
784 else {
785 rb_ractor_living_threads_remove(th->ractor, th);
786 thread_sched_to_dead(TH_SCHED(th), th);
787 }
788
789 return 0;
790}
793 enum thread_invoke_type type;
794
795 // for normal proc thread
796 VALUE args;
797 VALUE proc;
798
799 // for ractor
800 rb_ractor_t *g;
801
802 // for func
803 VALUE (*fn)(void *);
804};
805
806static void thread_specific_storage_alloc(rb_thread_t *th);
807
808static VALUE
809thread_create_core(VALUE thval, struct thread_create_params *params)
810{
811 rb_execution_context_t *ec = GET_EC();
812 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
813 int err;
814
815 thread_specific_storage_alloc(th);
816
817 if (OBJ_FROZEN(current_th->thgroup)) {
818 rb_raise(rb_eThreadError,
819 "can't start a new thread (frozen ThreadGroup)");
820 }
821
822 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
823
824 switch (params->type) {
825 case thread_invoke_type_proc:
826 th->invoke_type = thread_invoke_type_proc;
827 th->invoke_arg.proc.args = params->args;
828 th->invoke_arg.proc.proc = params->proc;
829 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
830 break;
831
832 case thread_invoke_type_ractor_proc:
833#if RACTOR_CHECK_MODE > 0
834 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
835#endif
836 th->invoke_type = thread_invoke_type_ractor_proc;
837 th->ractor = params->g;
838 th->ractor->threads.main = th;
839 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
840 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
841 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
842 rb_ractor_send_parameters(ec, params->g, params->args);
843 break;
844
845 case thread_invoke_type_func:
846 th->invoke_type = thread_invoke_type_func;
847 th->invoke_arg.func.func = params->fn;
848 th->invoke_arg.func.arg = (void *)params->args;
849 break;
850
851 default:
852 rb_bug("unreachable");
853 }
854
855 th->priority = current_th->priority;
856 th->thgroup = current_th->thgroup;
857
858 th->pending_interrupt_queue = rb_ary_hidden_new(0);
859 th->pending_interrupt_queue_checked = 0;
860 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
861 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
862
863 rb_native_mutex_initialize(&th->interrupt_lock);
864
865 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
866
867 rb_ractor_living_threads_insert(th->ractor, th);
868
869 /* kick thread */
870 err = native_thread_create(th);
871 if (err) {
872 th->status = THREAD_KILLED;
873 rb_ractor_living_threads_remove(th->ractor, th);
874 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
875 }
876 return thval;
877}
878
879#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
880
881/*
882 * call-seq:
883 * Thread.new { ... } -> thread
884 * Thread.new(*args, &proc) -> thread
885 * Thread.new(*args) { |args| ... } -> thread
886 *
887 * Creates a new thread executing the given block.
888 *
889 * Any +args+ given to ::new will be passed to the block:
890 *
891 * arr = []
892 * a, b, c = 1, 2, 3
893 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
894 * arr #=> [1, 2, 3]
895 *
896 * A ThreadError exception is raised if ::new is called without a block.
897 *
898 * If you're going to subclass Thread, be sure to call super in your
899 * +initialize+ method, otherwise a ThreadError will be raised.
900 */
901static VALUE
902thread_s_new(int argc, VALUE *argv, VALUE klass)
903{
904 rb_thread_t *th;
905 VALUE thread = rb_thread_alloc(klass);
906
907 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
908 rb_raise(rb_eThreadError, "can't alloc thread");
909 }
910
911 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
912 th = rb_thread_ptr(thread);
913 if (!threadptr_initialized(th)) {
914 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
915 klass);
916 }
917 return thread;
918}
919
920/*
921 * call-seq:
922 * Thread.start([args]*) {|args| block } -> thread
923 * Thread.fork([args]*) {|args| block } -> thread
924 *
925 * Basically the same as ::new. However, if class Thread is subclassed, then
926 * calling +start+ in that subclass will not invoke the subclass's
927 * +initialize+ method.
928 */
929
930static VALUE
931thread_start(VALUE klass, VALUE args)
932{
933 struct thread_create_params params = {
934 .type = thread_invoke_type_proc,
935 .args = args,
936 .proc = rb_block_proc(),
937 };
938 return thread_create_core(rb_thread_alloc(klass), &params);
939}
940
941static VALUE
942threadptr_invoke_proc_location(rb_thread_t *th)
943{
944 if (th->invoke_type == thread_invoke_type_proc) {
945 return rb_proc_location(th->invoke_arg.proc.proc);
946 }
947 else {
948 return Qnil;
949 }
950}
951
952/* :nodoc: */
953static VALUE
954thread_initialize(VALUE thread, VALUE args)
955{
956 rb_thread_t *th = rb_thread_ptr(thread);
957
958 if (!rb_block_given_p()) {
959 rb_raise(rb_eThreadError, "must be called with a block");
960 }
961 else if (th->invoke_type != thread_invoke_type_none) {
962 VALUE loc = threadptr_invoke_proc_location(th);
963 if (!NIL_P(loc)) {
964 rb_raise(rb_eThreadError,
965 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
966 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
967 }
968 else {
969 rb_raise(rb_eThreadError, "already initialized thread");
970 }
971 }
972 else {
973 struct thread_create_params params = {
974 .type = thread_invoke_type_proc,
975 .args = args,
976 .proc = rb_block_proc(),
977 };
978 return thread_create_core(thread, &params);
979 }
980}
981
982VALUE
983rb_thread_create(VALUE (*fn)(void *), void *arg)
984{
985 struct thread_create_params params = {
986 .type = thread_invoke_type_func,
987 .fn = fn,
988 .args = (VALUE)arg,
989 };
990 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
991}
992
993VALUE
994rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
995{
996 struct thread_create_params params = {
997 .type = thread_invoke_type_ractor_proc,
998 .g = r,
999 .args = args,
1000 .proc = proc,
1001 };
1002 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1003}
1004
1006struct join_arg {
1007 struct rb_waiting_list *waiter;
1008 rb_thread_t *target;
1009 VALUE timeout;
1010 rb_hrtime_t *limit;
1011};
1012
1013static VALUE
1014remove_from_join_list(VALUE arg)
1015{
1016 struct join_arg *p = (struct join_arg *)arg;
1017 rb_thread_t *target_thread = p->target;
1018
1019 if (target_thread->status != THREAD_KILLED) {
1020 struct rb_waiting_list **join_list = &target_thread->join_list;
1021
1022 while (*join_list) {
1023 if (*join_list == p->waiter) {
1024 *join_list = (*join_list)->next;
1025 break;
1026 }
1027
1028 join_list = &(*join_list)->next;
1029 }
1030 }
1031
1032 return Qnil;
1033}
1034
1035static int
1036thread_finished(rb_thread_t *th)
1037{
1038 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1039}
1040
1041static VALUE
1042thread_join_sleep(VALUE arg)
1043{
1044 struct join_arg *p = (struct join_arg *)arg;
1045 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1046 rb_hrtime_t end = 0, *limit = p->limit;
1047
1048 if (limit) {
1049 end = rb_hrtime_add(*limit, rb_hrtime_now());
1050 }
1051
1052 while (!thread_finished(target_th)) {
1053 VALUE scheduler = rb_fiber_scheduler_current();
1054
1055 if (scheduler != Qnil) {
1056 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1057 // Check if the target thread is finished after blocking:
1058 if (thread_finished(target_th)) break;
1059 // Otherwise, a timeout occurred:
1060 else return Qfalse;
1061 }
1062 else if (!limit) {
1063 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1064 }
1065 else {
1066 if (hrtime_update_expire(limit, end)) {
1067 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1068 return Qfalse;
1069 }
1070 th->status = THREAD_STOPPED;
1071 native_sleep(th, limit);
1072 }
1073 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1074 th->status = THREAD_RUNNABLE;
1075
1076 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1077 }
1078
1079 return Qtrue;
1080}
1081
1082static VALUE
1083thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1084{
1085 rb_execution_context_t *ec = GET_EC();
1086 rb_thread_t *th = ec->thread_ptr;
1087 rb_fiber_t *fiber = ec->fiber_ptr;
1088
1089 if (th == target_th) {
1090 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1091 }
1092
1093 if (th->ractor->threads.main == target_th) {
1094 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1095 }
1096
1097 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1098
1099 if (target_th->status != THREAD_KILLED) {
1100 struct rb_waiting_list waiter;
1101 waiter.next = target_th->join_list;
1102 waiter.thread = th;
1103 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1104 target_th->join_list = &waiter;
1105
1106 struct join_arg arg;
1107 arg.waiter = &waiter;
1108 arg.target = target_th;
1109 arg.timeout = timeout;
1110 arg.limit = limit;
1111
1112 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1113 return Qnil;
1114 }
1115 }
1116
1117 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1118
1119 if (target_th->ec->errinfo != Qnil) {
1120 VALUE err = target_th->ec->errinfo;
1121
1122 if (FIXNUM_P(err)) {
1123 switch (err) {
1124 case INT2FIX(TAG_FATAL):
1125 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1126
1127 /* OK. killed. */
1128 break;
1129 default:
1130 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1131 }
1132 }
1133 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1134 rb_bug("thread_join: THROW_DATA should not reach here.");
1135 }
1136 else {
1137 /* normal exception */
1138 rb_exc_raise(err);
1139 }
1140 }
1141 return target_th->self;
1142}
1143
1144/*
1145 * call-seq:
1146 * thr.join -> thr
1147 * thr.join(limit) -> thr
1148 *
1149 * The calling thread will suspend execution and run this +thr+.
1150 *
1151 * Does not return until +thr+ exits or until the given +limit+ seconds have
1152 * passed.
1153 *
1154 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1155 * returned.
1156 *
1157 * Any threads not joined will be killed when the main program exits.
1158 *
1159 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1160 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1161 * will be processed at this time.
1162 *
1163 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1164 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1165 * x.join # Let thread x finish, thread a will be killed on exit.
1166 * #=> "axyz"
1167 *
1168 * The following example illustrates the +limit+ parameter.
1169 *
1170 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1171 * puts "Waiting" until y.join(0.15)
1172 *
1173 * This will produce:
1174 *
1175 * tick...
1176 * Waiting
1177 * tick...
1178 * Waiting
1179 * tick...
1180 * tick...
1181 */
1182
1183static VALUE
1184thread_join_m(int argc, VALUE *argv, VALUE self)
1185{
1186 VALUE timeout = Qnil;
1187 rb_hrtime_t rel = 0, *limit = 0;
1188
1189 if (rb_check_arity(argc, 0, 1)) {
1190 timeout = argv[0];
1191 }
1192
1193 // Convert the timeout eagerly, so it's always converted and deterministic
1194 /*
1195 * This supports INFINITY and negative values, so we can't use
1196 * rb_time_interval right now...
1197 */
1198 if (NIL_P(timeout)) {
1199 /* unlimited */
1200 }
1201 else if (FIXNUM_P(timeout)) {
1202 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1203 limit = &rel;
1204 }
1205 else {
1206 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1207 }
1208
1209 return thread_join(rb_thread_ptr(self), timeout, limit);
1210}
1211
1212/*
1213 * call-seq:
1214 * thr.value -> obj
1215 *
1216 * Waits for +thr+ to complete, using #join, and returns its value or raises
1217 * the exception which terminated the thread.
1218 *
1219 * a = Thread.new { 2 + 2 }
1220 * a.value #=> 4
1221 *
1222 * b = Thread.new { raise 'something went wrong' }
1223 * b.value #=> RuntimeError: something went wrong
1224 */
1225
1226static VALUE
1227thread_value(VALUE self)
1228{
1229 rb_thread_t *th = rb_thread_ptr(self);
1230 thread_join(th, Qnil, 0);
1231 if (UNDEF_P(th->value)) {
1232 // If the thread is dead because we forked th->value is still Qundef.
1233 return Qnil;
1234 }
1235 return th->value;
1236}
1237
1238/*
1239 * Thread Scheduling
1240 */
1241
1242static void
1243getclockofday(struct timespec *ts)
1244{
1245#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1246 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1247 return;
1248#endif
1249 rb_timespec_now(ts);
1250}
1251
1252/*
1253 * Don't inline this, since library call is already time consuming
1254 * and we don't want "struct timespec" on stack too long for GC
1255 */
1256NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1257rb_hrtime_t
1258rb_hrtime_now(void)
1259{
1260 struct timespec ts;
1261
1262 getclockofday(&ts);
1263 return rb_timespec2hrtime(&ts);
1264}
1265
1266/*
1267 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1268 * being uninitialized, maybe other versions, too.
1269 */
1270COMPILER_WARNING_PUSH
1271#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1272COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1273#endif
1274#ifndef PRIu64
1275#define PRIu64 PRI_64_PREFIX "u"
1276#endif
1277/*
1278 * @end is the absolute time when @ts is set to expire
1279 * Returns true if @end has past
1280 * Updates @ts and returns false otherwise
1281 */
1282static int
1283hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1284{
1285 rb_hrtime_t now = rb_hrtime_now();
1286
1287 if (now > end) return 1;
1288
1289 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1290
1291 *timeout = end - now;
1292 return 0;
1293}
1294COMPILER_WARNING_POP
1295
1296static int
1297sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1298{
1299 enum rb_thread_status prev_status = th->status;
1300 int woke;
1301 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1302
1303 th->status = THREAD_STOPPED;
1304 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1305 while (th->status == THREAD_STOPPED) {
1306 native_sleep(th, &rel);
1307 woke = vm_check_ints_blocking(th->ec);
1308 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1309 break;
1310 if (hrtime_update_expire(&rel, end))
1311 break;
1312 woke = 1;
1313 }
1314 th->status = prev_status;
1315 return woke;
1316}
1317
1318static int
1319sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1320{
1321 enum rb_thread_status prev_status = th->status;
1322 int woke;
1323 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1324
1325 th->status = THREAD_STOPPED;
1326 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1327 while (th->status == THREAD_STOPPED) {
1328 native_sleep(th, &rel);
1329 woke = vm_check_ints_blocking(th->ec);
1330 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1331 break;
1332 if (hrtime_update_expire(&rel, end))
1333 break;
1334 woke = 1;
1335 }
1336 th->status = prev_status;
1337 return woke;
1338}
1339
1340static void
1341sleep_forever(rb_thread_t *th, unsigned int fl)
1342{
1343 enum rb_thread_status prev_status = th->status;
1344 enum rb_thread_status status;
1345 int woke;
1346
1347 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1348 th->status = status;
1349
1350 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1351
1352 while (th->status == status) {
1353 if (fl & SLEEP_DEADLOCKABLE) {
1354 rb_ractor_sleeper_threads_inc(th->ractor);
1355 rb_check_deadlock(th->ractor);
1356 }
1357 {
1358 native_sleep(th, 0);
1359 }
1360 if (fl & SLEEP_DEADLOCKABLE) {
1361 rb_ractor_sleeper_threads_dec(th->ractor);
1362 }
1363 if (fl & SLEEP_ALLOW_SPURIOUS) {
1364 break;
1365 }
1366
1367 woke = vm_check_ints_blocking(th->ec);
1368
1369 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1370 break;
1371 }
1372 }
1373 th->status = prev_status;
1374}
1375
1376void
1378{
1379 RUBY_DEBUG_LOG("forever");
1380 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1381}
1382
1383void
1385{
1386 RUBY_DEBUG_LOG("deadly");
1387 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1388}
1389
1390static void
1391rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1392{
1393 VALUE scheduler = rb_fiber_scheduler_current();
1394 if (scheduler != Qnil) {
1395 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1396 }
1397 else {
1398 RUBY_DEBUG_LOG("...");
1399 if (end) {
1400 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1401 }
1402 else {
1403 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1404 }
1405 }
1406}
1407
1408void
1409rb_thread_wait_for(struct timeval time)
1410{
1411 rb_thread_t *th = GET_THREAD();
1412
1413 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1414}
1415
1416void
1417rb_ec_check_ints(rb_execution_context_t *ec)
1418{
1419 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1420}
1421
1422/*
1423 * CAUTION: This function causes thread switching.
1424 * rb_thread_check_ints() check ruby's interrupts.
1425 * some interrupt needs thread switching/invoke handlers,
1426 * and so on.
1427 */
1428
1429void
1431{
1432 rb_ec_check_ints(GET_EC());
1433}
1434
1435/*
1436 * Hidden API for tcl/tk wrapper.
1437 * There is no guarantee to perpetuate it.
1438 */
1439int
1440rb_thread_check_trap_pending(void)
1441{
1442 return rb_signal_buff_size() != 0;
1443}
1444
1445/* This function can be called in blocking region. */
1448{
1449 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1450}
1451
1452void
1453rb_thread_sleep(int sec)
1454{
1456}
1457
1458static void
1459rb_thread_schedule_limits(uint32_t limits_us)
1460{
1461 if (!rb_thread_alone()) {
1462 rb_thread_t *th = GET_THREAD();
1463 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1464
1465 if (th->running_time_us >= limits_us) {
1466 RUBY_DEBUG_LOG("switch %s", "start");
1467
1468 RB_VM_SAVE_MACHINE_CONTEXT(th);
1469 thread_sched_yield(TH_SCHED(th), th);
1470 rb_ractor_thread_switch(th->ractor, th);
1471
1472 RUBY_DEBUG_LOG("switch %s", "done");
1473 }
1474 }
1475}
1476
1477void
1479{
1480 rb_thread_schedule_limits(0);
1481 RUBY_VM_CHECK_INTS(GET_EC());
1482}
1483
1484/* blocking region */
1485
1486static inline int
1487blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1488 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1489{
1490#ifdef RUBY_ASSERT_CRITICAL_SECTION
1491 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1492#endif
1493 VM_ASSERT(th == GET_THREAD());
1494
1495 region->prev_status = th->status;
1496 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1497 th->blocking_region_buffer = region;
1498 th->status = THREAD_STOPPED;
1499 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1500
1501 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1502 return TRUE;
1503 }
1504 else {
1505 return FALSE;
1506 }
1507}
1508
1509static inline void
1510blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1511{
1512 /* entry to ubf_list still permitted at this point, make it impossible: */
1513 unblock_function_clear(th);
1514 /* entry to ubf_list impossible at this point, so unregister is safe: */
1515 unregister_ubf_list(th);
1516
1517 thread_sched_to_running(TH_SCHED(th), th);
1518 rb_ractor_thread_switch(th->ractor, th);
1519
1520 th->blocking_region_buffer = 0;
1521 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1522 if (th->status == THREAD_STOPPED) {
1523 th->status = region->prev_status;
1524 }
1525
1526 RUBY_DEBUG_LOG("end");
1527
1528#ifndef _WIN32
1529 // GET_THREAD() clears WSAGetLastError()
1530 VM_ASSERT(th == GET_THREAD());
1531#endif
1532}
1533
1534void *
1535rb_nogvl(void *(*func)(void *), void *data1,
1536 rb_unblock_function_t *ubf, void *data2,
1537 int flags)
1538{
1539 if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1540 VALUE scheduler = rb_fiber_scheduler_current();
1541 if (scheduler != Qnil) {
1543
1544 VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1545
1546 if (!UNDEF_P(result)) {
1547 rb_errno_set(state.saved_errno);
1548 return state.result;
1549 }
1550 }
1551 }
1552
1553 void *val = 0;
1554 rb_execution_context_t *ec = GET_EC();
1555 rb_thread_t *th = rb_ec_thread_ptr(ec);
1556 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1557 bool is_main_thread = vm->ractor.main_thread == th;
1558 int saved_errno = 0;
1559
1560 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1561 ubf = ubf_select;
1562 data2 = th;
1563 }
1564 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1565 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1566 vm->ubf_async_safe = 1;
1567 }
1568 }
1569
1570 rb_vm_t *volatile saved_vm = vm;
1571 BLOCKING_REGION(th, {
1572 val = func(data1);
1573 saved_errno = rb_errno();
1574 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1575 vm = saved_vm;
1576
1577 if (is_main_thread) vm->ubf_async_safe = 0;
1578
1579 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1580 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1581 }
1582
1583 rb_errno_set(saved_errno);
1584
1585 return val;
1586}
1587
1588/*
1589 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1590 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1591 * without interrupt process.
1592 *
1593 * rb_thread_call_without_gvl() does:
1594 * (1) Check interrupts.
1595 * (2) release GVL.
1596 * Other Ruby threads may run in parallel.
1597 * (3) call func with data1
1598 * (4) acquire GVL.
1599 * Other Ruby threads can not run in parallel any more.
1600 * (5) Check interrupts.
1601 *
1602 * rb_thread_call_without_gvl2() does:
1603 * (1) Check interrupt and return if interrupted.
1604 * (2) release GVL.
1605 * (3) call func with data1 and a pointer to the flags.
1606 * (4) acquire GVL.
1607 *
1608 * If another thread interrupts this thread (Thread#kill, signal delivery,
1609 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1610 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1611 * toggling a cancellation flag, canceling the invocation of a call inside
1612 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1613 *
1614 * There are built-in ubfs and you can specify these ubfs:
1615 *
1616 * * RUBY_UBF_IO: ubf for IO operation
1617 * * RUBY_UBF_PROCESS: ubf for process operation
1618 *
1619 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1620 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1621 * provide proper ubf(), your program will not stop for Control+C or other
1622 * shutdown events.
1623 *
1624 * "Check interrupts" on above list means checking asynchronous
1625 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1626 * request, and so on) and calling corresponding procedures
1627 * (such as `trap' for signals, raise an exception for Thread#raise).
1628 * If `func()' finished and received interrupts, you may skip interrupt
1629 * checking. For example, assume the following func() it reads data from file.
1630 *
1631 * read_func(...) {
1632 * // (a) before read
1633 * read(buffer); // (b) reading
1634 * // (c) after read
1635 * }
1636 *
1637 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1638 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1639 * at (c), after *read* operation is completed, checking interrupts is harmful
1640 * because it causes irrevocable side-effect, the read data will vanish. To
1641 * avoid such problem, the `read_func()' should be used with
1642 * `rb_thread_call_without_gvl2()'.
1643 *
1644 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1645 * immediately. This function does not show when the execution was interrupted.
1646 * For example, there are 4 possible timing (a), (b), (c) and before calling
1647 * read_func(). You need to record progress of a read_func() and check
1648 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1649 * `rb_thread_check_ints()' correctly or your program can not process proper
1650 * process such as `trap' and so on.
1651 *
1652 * NOTE: You can not execute most of Ruby C API and touch Ruby
1653 * objects in `func()' and `ubf()', including raising an
1654 * exception, because current thread doesn't acquire GVL
1655 * (it causes synchronization problems). If you need to
1656 * call ruby functions either use rb_thread_call_with_gvl()
1657 * or read source code of C APIs and confirm safety by
1658 * yourself.
1659 *
1660 * NOTE: In short, this API is difficult to use safely. I recommend you
1661 * use other ways if you have. We lack experiences to use this API.
1662 * Please report your problem related on it.
1663 *
1664 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1665 * for a short running `func()'. Be sure to benchmark and use this
1666 * mechanism when `func()' consumes enough time.
1667 *
1668 * Safe C API:
1669 * * rb_thread_interrupted() - check interrupt flag
1670 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1671 * they will work without GVL, and may acquire GVL when GC is needed.
1672 */
1673void *
1674rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1675 rb_unblock_function_t *ubf, void *data2)
1676{
1677 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1678}
1679
1680void *
1681rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1682 rb_unblock_function_t *ubf, void *data2)
1683{
1684 return rb_nogvl(func, data1, ubf, data2, 0);
1685}
1686
1687static int
1688waitfd_to_waiting_flag(int wfd_event)
1689{
1690 return wfd_event << 1;
1691}
1692
1693static void
1694thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
1695{
1696 wfd->fd = fd;
1697 wfd->th = th;
1698 wfd->busy = NULL;
1699
1700 RB_VM_LOCK_ENTER();
1701 {
1702 ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
1703 }
1704 RB_VM_LOCK_LEAVE();
1705}
1706
1707static void
1708thread_io_wake_pending_closer(struct waiting_fd *wfd)
1709{
1710 bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
1711 if (has_waiter) {
1712 rb_mutex_lock(wfd->busy->wakeup_mutex);
1713 }
1714
1715 /* Needs to be protected with RB_VM_LOCK because we don't know if
1716 wfd is on the global list of pending FD ops or if it's on a
1717 struct rb_io_close_wait_list close-waiter. */
1718 RB_VM_LOCK_ENTER();
1719 ccan_list_del(&wfd->wfd_node);
1720 RB_VM_LOCK_LEAVE();
1721
1722 if (has_waiter) {
1723 rb_thread_t *th = rb_thread_ptr(wfd->busy->closing_thread);
1724 if (th->scheduler != Qnil) {
1725 rb_fiber_scheduler_unblock(th->scheduler, wfd->busy->closing_thread, wfd->busy->closing_fiber);
1726 } else {
1727 rb_thread_wakeup(wfd->busy->closing_thread);
1728 }
1729 rb_mutex_unlock(wfd->busy->wakeup_mutex);
1730 }
1731}
1732
1733static bool
1734thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1735{
1736#if defined(USE_MN_THREADS) && USE_MN_THREADS
1737 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1738#else
1739 return false;
1740#endif
1741}
1742
1743// true if need retry
1744static bool
1745thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1746{
1747#if defined(USE_MN_THREADS) && USE_MN_THREADS
1748 if (thread_io_mn_schedulable(th, events, timeout)) {
1749 rb_hrtime_t rel, *prel;
1750
1751 if (timeout) {
1752 rel = rb_timeval2hrtime(timeout);
1753 prel = &rel;
1754 }
1755 else {
1756 prel = NULL;
1757 }
1758
1759 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1760
1761 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1762 // timeout
1763 return false;
1764 }
1765 else {
1766 return true;
1767 }
1768 }
1769#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1770 return false;
1771}
1772
1773// assume read/write
1774static bool
1775blocking_call_retryable_p(int r, int eno)
1776{
1777 if (r != -1) return false;
1778
1779 switch (eno) {
1780 case EAGAIN:
1781#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1782 case EWOULDBLOCK:
1783#endif
1784 return true;
1785 default:
1786 return false;
1787 }
1788}
1789
1790bool
1791rb_thread_mn_schedulable(VALUE thval)
1792{
1793 rb_thread_t *th = rb_thread_ptr(thval);
1794 return th->mn_schedulable;
1795}
1796
1797VALUE
1798rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events)
1799{
1800 rb_execution_context_t *volatile ec = GET_EC();
1801 rb_thread_t *volatile th = rb_ec_thread_ptr(ec);
1802
1803 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), fd, events);
1804
1805 struct waiting_fd waiting_fd;
1806 volatile VALUE val = Qundef; /* shouldn't be used */
1807 volatile int saved_errno = 0;
1808 enum ruby_tag_type state;
1809 volatile bool prev_mn_schedulable = th->mn_schedulable;
1810 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1811
1812 // `errno` is only valid when there is an actual error - but we can't
1813 // extract that from the return value of `func` alone, so we clear any
1814 // prior `errno` value here so that we can later check if it was set by
1815 // `func` or not (as opposed to some previously set value).
1816 errno = 0;
1817
1818 thread_io_setup_wfd(th, fd, &waiting_fd);
1819 {
1820 EC_PUSH_TAG(ec);
1821 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1822 volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1823 retry:
1824 BLOCKING_REGION(waiting_fd.th, {
1825 val = func(data1);
1826 saved_errno = errno;
1827 }, ubf_select, waiting_fd.th, FALSE);
1828
1829 th = rb_ec_thread_ptr(ec);
1830 if (events &&
1831 blocking_call_retryable_p((int)val, saved_errno) &&
1832 thread_io_wait_events(th, fd, events, NULL)) {
1833 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1834 goto retry;
1835 }
1836 state = saved_state;
1837 }
1838 EC_POP_TAG();
1839
1840 th = rb_ec_thread_ptr(ec);
1841 th->mn_schedulable = prev_mn_schedulable;
1842 }
1843 /*
1844 * must be deleted before jump
1845 * this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
1846 */
1847 thread_io_wake_pending_closer(&waiting_fd);
1848
1849 if (state) {
1850 EC_JUMP_TAG(ec, state);
1851 }
1852 /* TODO: check func() */
1853 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1854
1855 // If the error was a timeout, we raise a specific exception for that:
1856 if (saved_errno == ETIMEDOUT) {
1857 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1858 }
1859
1860 errno = saved_errno;
1861
1862 return val;
1863}
1864
1865VALUE
1866rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1867{
1868 return rb_thread_io_blocking_call(func, data1, fd, 0);
1869}
1870
1871/*
1872 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1873 *
1874 * After releasing GVL using
1875 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1876 * methods. If you need to access Ruby you must use this function
1877 * rb_thread_call_with_gvl().
1878 *
1879 * This function rb_thread_call_with_gvl() does:
1880 * (1) acquire GVL.
1881 * (2) call passed function `func'.
1882 * (3) release GVL.
1883 * (4) return a value which is returned at (2).
1884 *
1885 * NOTE: You should not return Ruby object at (2) because such Object
1886 * will not be marked.
1887 *
1888 * NOTE: If an exception is raised in `func', this function DOES NOT
1889 * protect (catch) the exception. If you have any resources
1890 * which should free before throwing exception, you need use
1891 * rb_protect() in `func' and return a value which represents
1892 * exception was raised.
1893 *
1894 * NOTE: This function should not be called by a thread which was not
1895 * created as Ruby thread (created by Thread.new or so). In other
1896 * words, this function *DOES NOT* associate or convert a NON-Ruby
1897 * thread to a Ruby thread.
1898 */
1899void *
1900rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1901{
1902 rb_thread_t *th = ruby_thread_from_native();
1903 struct rb_blocking_region_buffer *brb;
1904 struct rb_unblock_callback prev_unblock;
1905 void *r;
1906
1907 if (th == 0) {
1908 /* Error has occurred, but we can't use rb_bug()
1909 * because this thread is not Ruby's thread.
1910 * What should we do?
1911 */
1912 bp();
1913 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1914 exit(EXIT_FAILURE);
1915 }
1916
1917 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1918 prev_unblock = th->unblock;
1919
1920 if (brb == 0) {
1921 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1922 }
1923
1924 blocking_region_end(th, brb);
1925 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1926 r = (*func)(data1);
1927 /* leave from Ruby world: You can not access Ruby values, etc. */
1928 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1929 RUBY_ASSERT_ALWAYS(released);
1930 RB_VM_SAVE_MACHINE_CONTEXT(th);
1931 thread_sched_to_waiting(TH_SCHED(th), th);
1932 return r;
1933}
1934
1935/*
1936 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1937 *
1938 ***
1939 *** This API is EXPERIMENTAL!
1940 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1941 ***
1942 */
1943
1944int
1945ruby_thread_has_gvl_p(void)
1946{
1947 rb_thread_t *th = ruby_thread_from_native();
1948
1949 if (th && th->blocking_region_buffer == 0) {
1950 return 1;
1951 }
1952 else {
1953 return 0;
1954 }
1955}
1956
1957/*
1958 * call-seq:
1959 * Thread.pass -> nil
1960 *
1961 * Give the thread scheduler a hint to pass execution to another thread.
1962 * A running thread may or may not switch, it depends on OS and processor.
1963 */
1964
1965static VALUE
1966thread_s_pass(VALUE klass)
1967{
1969 return Qnil;
1970}
1971
1972/*****************************************************/
1973
1974/*
1975 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1976 *
1977 * Async events such as an exception thrown by Thread#raise,
1978 * Thread#kill and thread termination (after main thread termination)
1979 * will be queued to th->pending_interrupt_queue.
1980 * - clear: clear the queue.
1981 * - enque: enqueue err object into queue.
1982 * - deque: dequeue err object from queue.
1983 * - active_p: return 1 if the queue should be checked.
1984 *
1985 * All rb_threadptr_pending_interrupt_* functions are called by
1986 * a GVL acquired thread, of course.
1987 * Note that all "rb_" prefix APIs need GVL to call.
1988 */
1989
1990void
1991rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1992{
1993 rb_ary_clear(th->pending_interrupt_queue);
1994}
1995
1996void
1997rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1998{
1999 rb_ary_push(th->pending_interrupt_queue, v);
2000 th->pending_interrupt_queue_checked = 0;
2001}
2002
2003static void
2004threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2005{
2006 if (!th->pending_interrupt_queue) {
2007 rb_raise(rb_eThreadError, "uninitialized thread");
2008 }
2009}
2010
2011enum handle_interrupt_timing {
2012 INTERRUPT_NONE,
2013 INTERRUPT_IMMEDIATE,
2014 INTERRUPT_ON_BLOCKING,
2015 INTERRUPT_NEVER
2016};
2017
2018static enum handle_interrupt_timing
2019rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2020{
2021 if (sym == sym_immediate) {
2022 return INTERRUPT_IMMEDIATE;
2023 }
2024 else if (sym == sym_on_blocking) {
2025 return INTERRUPT_ON_BLOCKING;
2026 }
2027 else if (sym == sym_never) {
2028 return INTERRUPT_NEVER;
2029 }
2030 else {
2031 rb_raise(rb_eThreadError, "unknown mask signature");
2032 }
2033}
2034
2035static enum handle_interrupt_timing
2036rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2037{
2038 VALUE mask;
2039 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2040 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2041 VALUE mod;
2042 long i;
2043
2044 for (i=0; i<mask_stack_len; i++) {
2045 mask = mask_stack[mask_stack_len-(i+1)];
2046
2047 if (SYMBOL_P(mask)) {
2048 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2049 if (err != rb_cInteger) {
2050 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2051 }
2052 else {
2053 continue;
2054 }
2055 }
2056
2057 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2058 VALUE klass = mod;
2059 VALUE sym;
2060
2061 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2062 klass = RBASIC(mod)->klass;
2063 }
2064 else if (mod != RCLASS_ORIGIN(mod)) {
2065 continue;
2066 }
2067
2068 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2069 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2070 }
2071 }
2072 /* try to next mask */
2073 }
2074 return INTERRUPT_NONE;
2075}
2076
2077static int
2078rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2079{
2080 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2081}
2082
2083static int
2084rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2085{
2086 int i;
2087 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2088 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2089 if (rb_obj_is_kind_of(e, err)) {
2090 return TRUE;
2091 }
2092 }
2093 return FALSE;
2094}
2095
2096static VALUE
2097rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2098{
2099#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2100 int i;
2101
2102 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2103 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2104
2105 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2106
2107 switch (mask_timing) {
2108 case INTERRUPT_ON_BLOCKING:
2109 if (timing != INTERRUPT_ON_BLOCKING) {
2110 break;
2111 }
2112 /* fall through */
2113 case INTERRUPT_NONE: /* default: IMMEDIATE */
2114 case INTERRUPT_IMMEDIATE:
2115 rb_ary_delete_at(th->pending_interrupt_queue, i);
2116 return err;
2117 case INTERRUPT_NEVER:
2118 break;
2119 }
2120 }
2121
2122 th->pending_interrupt_queue_checked = 1;
2123 return Qundef;
2124#else
2125 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2126 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2127 th->pending_interrupt_queue_checked = 1;
2128 }
2129 return err;
2130#endif
2131}
2132
2133static int
2134threadptr_pending_interrupt_active_p(rb_thread_t *th)
2135{
2136 /*
2137 * For optimization, we don't check async errinfo queue
2138 * if the queue and the thread interrupt mask were not changed
2139 * since last check.
2140 */
2141 if (th->pending_interrupt_queue_checked) {
2142 return 0;
2143 }
2144
2145 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2146 return 0;
2147 }
2148
2149 return 1;
2150}
2151
2152static int
2153handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2154{
2155 VALUE *maskp = (VALUE *)args;
2156
2157 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2158 rb_raise(rb_eArgError, "unknown mask signature");
2159 }
2160
2161 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2162 *maskp = val;
2163 return ST_CONTINUE;
2164 }
2165
2166 if (RTEST(*maskp)) {
2167 if (!RB_TYPE_P(*maskp, T_HASH)) {
2168 VALUE prev = *maskp;
2169 *maskp = rb_ident_hash_new();
2170 if (SYMBOL_P(prev)) {
2171 rb_hash_aset(*maskp, rb_eException, prev);
2172 }
2173 }
2174 rb_hash_aset(*maskp, key, val);
2175 }
2176 else {
2177 *maskp = Qfalse;
2178 }
2179
2180 return ST_CONTINUE;
2181}
2182
2183/*
2184 * call-seq:
2185 * Thread.handle_interrupt(hash) { ... } -> result of the block
2186 *
2187 * Changes asynchronous interrupt timing.
2188 *
2189 * _interrupt_ means asynchronous event and corresponding procedure
2190 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2191 * and main thread termination (if main thread terminates, then all
2192 * other thread will be killed).
2193 *
2194 * The given +hash+ has pairs like <code>ExceptionClass =>
2195 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2196 * the given block. The TimingSymbol can be one of the following symbols:
2197 *
2198 * [+:immediate+] Invoke interrupts immediately.
2199 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2200 * [+:never+] Never invoke all interrupts.
2201 *
2202 * _BlockingOperation_ means that the operation will block the calling thread,
2203 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2204 * operation executed without GVL.
2205 *
2206 * Masked asynchronous interrupts are delayed until they are enabled.
2207 * This method is similar to sigprocmask(3).
2208 *
2209 * === NOTE
2210 *
2211 * Asynchronous interrupts are difficult to use.
2212 *
2213 * If you need to communicate between threads, please consider to use another way such as Queue.
2214 *
2215 * Or use them with deep understanding about this method.
2216 *
2217 * === Usage
2218 *
2219 * In this example, we can guard from Thread#raise exceptions.
2220 *
2221 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2222 * ignored in the first block of the main thread. In the second
2223 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2224 *
2225 * th = Thread.new do
2226 * Thread.handle_interrupt(RuntimeError => :never) {
2227 * begin
2228 * # You can write resource allocation code safely.
2229 * Thread.handle_interrupt(RuntimeError => :immediate) {
2230 * # ...
2231 * }
2232 * ensure
2233 * # You can write resource deallocation code safely.
2234 * end
2235 * }
2236 * end
2237 * Thread.pass
2238 * # ...
2239 * th.raise "stop"
2240 *
2241 * While we are ignoring the RuntimeError exception, it's safe to write our
2242 * resource allocation code. Then, the ensure block is where we can safely
2243 * deallocate your resources.
2244 *
2245 * ==== Stack control settings
2246 *
2247 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2248 * to control more than one ExceptionClass and TimingSymbol at a time.
2249 *
2250 * Thread.handle_interrupt(FooError => :never) {
2251 * Thread.handle_interrupt(BarError => :never) {
2252 * # FooError and BarError are prohibited.
2253 * }
2254 * }
2255 *
2256 * ==== Inheritance with ExceptionClass
2257 *
2258 * All exceptions inherited from the ExceptionClass parameter will be considered.
2259 *
2260 * Thread.handle_interrupt(Exception => :never) {
2261 * # all exceptions inherited from Exception are prohibited.
2262 * }
2263 *
2264 * For handling all interrupts, use +Object+ and not +Exception+
2265 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2266 */
2267static VALUE
2268rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2269{
2270 VALUE mask = Qundef;
2271 rb_execution_context_t * volatile ec = GET_EC();
2272 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2273 volatile VALUE r = Qnil;
2274 enum ruby_tag_type state;
2275
2276 if (!rb_block_given_p()) {
2277 rb_raise(rb_eArgError, "block is needed.");
2278 }
2279
2280 mask_arg = rb_to_hash_type(mask_arg);
2281
2282 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2283 mask = Qnil;
2284 }
2285
2286 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2287
2288 if (UNDEF_P(mask)) {
2289 return rb_yield(Qnil);
2290 }
2291
2292 if (!RTEST(mask)) {
2293 mask = mask_arg;
2294 }
2295 else if (RB_TYPE_P(mask, T_HASH)) {
2296 OBJ_FREEZE(mask);
2297 }
2298
2299 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2300 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2301 th->pending_interrupt_queue_checked = 0;
2302 RUBY_VM_SET_INTERRUPT(th->ec);
2303 }
2304
2305 EC_PUSH_TAG(th->ec);
2306 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2307 r = rb_yield(Qnil);
2308 }
2309 EC_POP_TAG();
2310
2311 rb_ary_pop(th->pending_interrupt_mask_stack);
2312 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2313 th->pending_interrupt_queue_checked = 0;
2314 RUBY_VM_SET_INTERRUPT(th->ec);
2315 }
2316
2317 RUBY_VM_CHECK_INTS(th->ec);
2318
2319 if (state) {
2320 EC_JUMP_TAG(th->ec, state);
2321 }
2322
2323 return r;
2324}
2325
2326/*
2327 * call-seq:
2328 * target_thread.pending_interrupt?(error = nil) -> true/false
2329 *
2330 * Returns whether or not the asynchronous queue is empty for the target thread.
2331 *
2332 * If +error+ is given, then check only for +error+ type deferred events.
2333 *
2334 * See ::pending_interrupt? for more information.
2335 */
2336static VALUE
2337rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2338{
2339 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2340
2341 if (!target_th->pending_interrupt_queue) {
2342 return Qfalse;
2343 }
2344 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2345 return Qfalse;
2346 }
2347 if (rb_check_arity(argc, 0, 1)) {
2348 VALUE err = argv[0];
2349 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2350 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2351 }
2352 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2353 }
2354 else {
2355 return Qtrue;
2356 }
2357}
2358
2359/*
2360 * call-seq:
2361 * Thread.pending_interrupt?(error = nil) -> true/false
2362 *
2363 * Returns whether or not the asynchronous queue is empty.
2364 *
2365 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2366 * this method can be used to determine if there are any deferred events.
2367 *
2368 * If you find this method returns true, then you may finish +:never+ blocks.
2369 *
2370 * For example, the following method processes deferred asynchronous events
2371 * immediately.
2372 *
2373 * def Thread.kick_interrupt_immediately
2374 * Thread.handle_interrupt(Object => :immediate) {
2375 * Thread.pass
2376 * }
2377 * end
2378 *
2379 * If +error+ is given, then check only for +error+ type deferred events.
2380 *
2381 * === Usage
2382 *
2383 * th = Thread.new{
2384 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2385 * while true
2386 * ...
2387 * # reach safe point to invoke interrupt
2388 * if Thread.pending_interrupt?
2389 * Thread.handle_interrupt(Object => :immediate){}
2390 * end
2391 * ...
2392 * end
2393 * }
2394 * }
2395 * ...
2396 * th.raise # stop thread
2397 *
2398 * This example can also be written as the following, which you should use to
2399 * avoid asynchronous interrupts.
2400 *
2401 * flag = true
2402 * th = Thread.new{
2403 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2404 * while true
2405 * ...
2406 * # reach safe point to invoke interrupt
2407 * break if flag == false
2408 * ...
2409 * end
2410 * }
2411 * }
2412 * ...
2413 * flag = false # stop thread
2414 */
2415
2416static VALUE
2417rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2418{
2419 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2420}
2421
2422NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2423
2424static void
2425rb_threadptr_to_kill(rb_thread_t *th)
2426{
2427 rb_threadptr_pending_interrupt_clear(th);
2428 th->status = THREAD_RUNNABLE;
2429 th->to_kill = 1;
2430 th->ec->errinfo = INT2FIX(TAG_FATAL);
2431 EC_JUMP_TAG(th->ec, TAG_FATAL);
2432}
2433
2434static inline rb_atomic_t
2435threadptr_get_interrupts(rb_thread_t *th)
2436{
2437 rb_execution_context_t *ec = th->ec;
2438 rb_atomic_t interrupt;
2439 rb_atomic_t old;
2440
2441 do {
2442 interrupt = ec->interrupt_flag;
2443 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2444 } while (old != interrupt);
2445 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2446}
2447
2448static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2449
2450int
2451rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2452{
2453 rb_atomic_t interrupt;
2454 int postponed_job_interrupt = 0;
2455 int ret = FALSE;
2456
2457 if (th->ec->raised_flag) return ret;
2458
2459 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2460 int sig;
2461 int timer_interrupt;
2462 int pending_interrupt;
2463 int trap_interrupt;
2464 int terminate_interrupt;
2465
2466 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2467 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2468 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2469 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2470 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2471
2472 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2473 RB_VM_LOCK_ENTER();
2474 RB_VM_LOCK_LEAVE();
2475 }
2476
2477 if (postponed_job_interrupt) {
2478 rb_postponed_job_flush(th->vm);
2479 }
2480
2481 if (trap_interrupt) {
2482 /* signal handling */
2483 if (th == th->vm->ractor.main_thread) {
2484 enum rb_thread_status prev_status = th->status;
2485
2486 th->status = THREAD_RUNNABLE;
2487 {
2488 while ((sig = rb_get_next_signal()) != 0) {
2489 ret |= rb_signal_exec(th, sig);
2490 }
2491 }
2492 th->status = prev_status;
2493 }
2494
2495 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2496 enum rb_thread_status prev_status = th->status;
2497
2498 th->status = THREAD_RUNNABLE;
2499 {
2500 threadptr_interrupt_exec_exec(th);
2501 }
2502 th->status = prev_status;
2503 }
2504 }
2505
2506 /* exception from another thread */
2507 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2508 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2509 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2510 ret = TRUE;
2511
2512 if (UNDEF_P(err)) {
2513 /* no error */
2514 }
2515 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2516 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2517 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2518 terminate_interrupt = 1;
2519 }
2520 else {
2521 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2522 /* the only special exception to be queued across thread */
2523 err = ruby_vm_special_exception_copy(err);
2524 }
2525 /* set runnable if th was slept. */
2526 if (th->status == THREAD_STOPPED ||
2527 th->status == THREAD_STOPPED_FOREVER)
2528 th->status = THREAD_RUNNABLE;
2529 rb_exc_raise(err);
2530 }
2531 }
2532
2533 if (terminate_interrupt) {
2534 rb_threadptr_to_kill(th);
2535 }
2536
2537 if (timer_interrupt) {
2538 uint32_t limits_us = thread_default_quantum_ms * 1000;
2539
2540 if (th->priority > 0)
2541 limits_us <<= th->priority;
2542 else
2543 limits_us >>= -th->priority;
2544
2545 if (th->status == THREAD_RUNNABLE)
2546 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2547
2548 VM_ASSERT(th->ec->cfp);
2549 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2550 0, 0, 0, Qundef);
2551
2552 rb_thread_schedule_limits(limits_us);
2553 }
2554 }
2555 return ret;
2556}
2557
2558void
2559rb_thread_execute_interrupts(VALUE thval)
2560{
2561 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2562}
2563
2564static void
2565rb_threadptr_ready(rb_thread_t *th)
2566{
2567 rb_threadptr_interrupt(th);
2568}
2569
2570static VALUE
2571rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2572{
2573 VALUE exc;
2574
2575 if (rb_threadptr_dead(target_th)) {
2576 return Qnil;
2577 }
2578
2579 if (argc == 0) {
2580 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2581 }
2582 else {
2583 exc = rb_make_exception(argc, argv);
2584 }
2585
2586 /* making an exception object can switch thread,
2587 so we need to check thread deadness again */
2588 if (rb_threadptr_dead(target_th)) {
2589 return Qnil;
2590 }
2591
2592 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2593 rb_threadptr_pending_interrupt_enque(target_th, exc);
2594 rb_threadptr_interrupt(target_th);
2595 return Qnil;
2596}
2597
2598void
2599rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2600{
2601 VALUE argv[2];
2602
2603 argv[0] = rb_eSignal;
2604 argv[1] = INT2FIX(sig);
2605 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2606}
2607
2608void
2609rb_threadptr_signal_exit(rb_thread_t *th)
2610{
2611 VALUE argv[2];
2612
2613 argv[0] = rb_eSystemExit;
2614 argv[1] = rb_str_new2("exit");
2615
2616 // TODO: check signal raise deliverly
2617 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2618}
2619
2620int
2621rb_ec_set_raised(rb_execution_context_t *ec)
2622{
2623 if (ec->raised_flag & RAISED_EXCEPTION) {
2624 return 1;
2625 }
2626 ec->raised_flag |= RAISED_EXCEPTION;
2627 return 0;
2628}
2629
2630int
2631rb_ec_reset_raised(rb_execution_context_t *ec)
2632{
2633 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2634 return 0;
2635 }
2636 ec->raised_flag &= ~RAISED_EXCEPTION;
2637 return 1;
2638}
2639
2640int
2641rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
2642{
2643 rb_vm_t *vm = GET_THREAD()->vm;
2644 struct waiting_fd *wfd = 0, *next;
2645 ccan_list_head_init(&busy->pending_fd_users);
2646 int has_any;
2647 VALUE wakeup_mutex;
2648
2649 RB_VM_LOCK_ENTER();
2650 {
2651 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2652 if (wfd->fd == fd) {
2653 rb_thread_t *th = wfd->th;
2654 VALUE err;
2655
2656 ccan_list_del(&wfd->wfd_node);
2657 ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
2658
2659 wfd->busy = busy;
2660 err = th->vm->special_exceptions[ruby_error_stream_closed];
2661 rb_threadptr_pending_interrupt_enque(th, err);
2662 rb_threadptr_interrupt(th);
2663 }
2664 }
2665 }
2666
2667 has_any = !ccan_list_empty(&busy->pending_fd_users);
2668 busy->closing_thread = rb_thread_current();
2669 busy->closing_fiber = rb_fiber_current();
2670 wakeup_mutex = Qnil;
2671 if (has_any) {
2672 wakeup_mutex = rb_mutex_new();
2673 RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
2674 }
2675 busy->wakeup_mutex = wakeup_mutex;
2676
2677 RB_VM_LOCK_LEAVE();
2678
2679 /* If the caller didn't pass *busy as a pointer to something on the stack,
2680 we need to guard this mutex object on _our_ C stack for the duration
2681 of this function. */
2682 RB_GC_GUARD(wakeup_mutex);
2683 return has_any;
2684}
2685
2686void
2687rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
2688{
2689 if (!RB_TEST(busy->wakeup_mutex)) {
2690 /* There was nobody else using this file when we closed it, so we
2691 never bothered to allocate a mutex*/
2692 return;
2693 }
2694
2695 rb_mutex_lock(busy->wakeup_mutex);
2696 while (!ccan_list_empty(&busy->pending_fd_users)) {
2697 rb_mutex_sleep(busy->wakeup_mutex, Qnil);
2698 }
2699 rb_mutex_unlock(busy->wakeup_mutex);
2700}
2701
2702void
2703rb_thread_fd_close(int fd)
2704{
2705 struct rb_io_close_wait_list busy;
2706
2707 if (rb_notify_fd_close(fd, &busy)) {
2708 rb_notify_fd_close_wait(&busy);
2709 }
2710}
2711
2712/*
2713 * call-seq:
2714 * thr.raise
2715 * thr.raise(string)
2716 * thr.raise(exception [, string [, array]])
2717 *
2718 * Raises an exception from the given thread. The caller does not have to be
2719 * +thr+. See Kernel#raise for more information.
2720 *
2721 * Thread.abort_on_exception = true
2722 * a = Thread.new { sleep(200) }
2723 * a.raise("Gotcha")
2724 *
2725 * This will produce:
2726 *
2727 * prog.rb:3: Gotcha (RuntimeError)
2728 * from prog.rb:2:in `initialize'
2729 * from prog.rb:2:in `new'
2730 * from prog.rb:2
2731 */
2732
2733static VALUE
2734thread_raise_m(int argc, VALUE *argv, VALUE self)
2735{
2736 rb_thread_t *target_th = rb_thread_ptr(self);
2737 const rb_thread_t *current_th = GET_THREAD();
2738
2739 threadptr_check_pending_interrupt_queue(target_th);
2740 rb_threadptr_raise(target_th, argc, argv);
2741
2742 /* To perform Thread.current.raise as Kernel.raise */
2743 if (current_th == target_th) {
2744 RUBY_VM_CHECK_INTS(target_th->ec);
2745 }
2746 return Qnil;
2747}
2748
2749
2750/*
2751 * call-seq:
2752 * thr.exit -> thr
2753 * thr.kill -> thr
2754 * thr.terminate -> thr
2755 *
2756 * Terminates +thr+ and schedules another thread to be run, returning
2757 * the terminated Thread. If this is the main thread, or the last
2758 * thread, exits the process.
2759 */
2760
2762rb_thread_kill(VALUE thread)
2763{
2764 rb_thread_t *target_th = rb_thread_ptr(thread);
2765
2766 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2767 return thread;
2768 }
2769 if (target_th == target_th->vm->ractor.main_thread) {
2770 rb_exit(EXIT_SUCCESS);
2771 }
2772
2773 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2774
2775 if (target_th == GET_THREAD()) {
2776 /* kill myself immediately */
2777 rb_threadptr_to_kill(target_th);
2778 }
2779 else {
2780 threadptr_check_pending_interrupt_queue(target_th);
2781 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2782 rb_threadptr_interrupt(target_th);
2783 }
2784
2785 return thread;
2786}
2787
2788int
2789rb_thread_to_be_killed(VALUE thread)
2790{
2791 rb_thread_t *target_th = rb_thread_ptr(thread);
2792
2793 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2794 return TRUE;
2795 }
2796 return FALSE;
2797}
2798
2799/*
2800 * call-seq:
2801 * Thread.kill(thread) -> thread
2802 *
2803 * Causes the given +thread+ to exit, see also Thread::exit.
2804 *
2805 * count = 0
2806 * a = Thread.new { loop { count += 1 } }
2807 * sleep(0.1) #=> 0
2808 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2809 * count #=> 93947
2810 * a.alive? #=> false
2811 */
2812
2813static VALUE
2814rb_thread_s_kill(VALUE obj, VALUE th)
2815{
2816 return rb_thread_kill(th);
2817}
2818
2819
2820/*
2821 * call-seq:
2822 * Thread.exit -> thread
2823 *
2824 * Terminates the currently running thread and schedules another thread to be
2825 * run.
2826 *
2827 * If this thread is already marked to be killed, ::exit returns the Thread.
2828 *
2829 * If this is the main thread, or the last thread, exit the process.
2830 */
2831
2832static VALUE
2833rb_thread_exit(VALUE _)
2834{
2835 rb_thread_t *th = GET_THREAD();
2836 return rb_thread_kill(th->self);
2837}
2838
2839
2840/*
2841 * call-seq:
2842 * thr.wakeup -> thr
2843 *
2844 * Marks a given thread as eligible for scheduling, however it may still
2845 * remain blocked on I/O.
2846 *
2847 * *Note:* This does not invoke the scheduler, see #run for more information.
2848 *
2849 * c = Thread.new { Thread.stop; puts "hey!" }
2850 * sleep 0.1 while c.status!='sleep'
2851 * c.wakeup
2852 * c.join
2853 * #=> "hey!"
2854 */
2855
2857rb_thread_wakeup(VALUE thread)
2858{
2859 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2860 rb_raise(rb_eThreadError, "killed thread");
2861 }
2862 return thread;
2863}
2864
2867{
2868 rb_thread_t *target_th = rb_thread_ptr(thread);
2869 if (target_th->status == THREAD_KILLED) return Qnil;
2870
2871 rb_threadptr_ready(target_th);
2872
2873 if (target_th->status == THREAD_STOPPED ||
2874 target_th->status == THREAD_STOPPED_FOREVER) {
2875 target_th->status = THREAD_RUNNABLE;
2876 }
2877
2878 return thread;
2879}
2880
2881
2882/*
2883 * call-seq:
2884 * thr.run -> thr
2885 *
2886 * Wakes up +thr+, making it eligible for scheduling.
2887 *
2888 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2889 * sleep 0.1 while a.status!='sleep'
2890 * puts "Got here"
2891 * a.run
2892 * a.join
2893 *
2894 * This will produce:
2895 *
2896 * a
2897 * Got here
2898 * c
2899 *
2900 * See also the instance method #wakeup.
2901 */
2902
2904rb_thread_run(VALUE thread)
2905{
2906 rb_thread_wakeup(thread);
2908 return thread;
2909}
2910
2911
2913rb_thread_stop(void)
2914{
2915 if (rb_thread_alone()) {
2916 rb_raise(rb_eThreadError,
2917 "stopping only thread\n\tnote: use sleep to stop forever");
2918 }
2920 return Qnil;
2921}
2922
2923/*
2924 * call-seq:
2925 * Thread.stop -> nil
2926 *
2927 * Stops execution of the current thread, putting it into a ``sleep'' state,
2928 * and schedules execution of another thread.
2929 *
2930 * a = Thread.new { print "a"; Thread.stop; print "c" }
2931 * sleep 0.1 while a.status!='sleep'
2932 * print "b"
2933 * a.run
2934 * a.join
2935 * #=> "abc"
2936 */
2937
2938static VALUE
2939thread_stop(VALUE _)
2940{
2941 return rb_thread_stop();
2942}
2943
2944/********************************************************************/
2945
2946VALUE
2947rb_thread_list(void)
2948{
2949 // TODO
2950 return rb_ractor_thread_list();
2951}
2952
2953/*
2954 * call-seq:
2955 * Thread.list -> array
2956 *
2957 * Returns an array of Thread objects for all threads that are either runnable
2958 * or stopped.
2959 *
2960 * Thread.new { sleep(200) }
2961 * Thread.new { 1000000.times {|i| i*i } }
2962 * Thread.new { Thread.stop }
2963 * Thread.list.each {|t| p t}
2964 *
2965 * This will produce:
2966 *
2967 * #<Thread:0x401b3e84 sleep>
2968 * #<Thread:0x401b3f38 run>
2969 * #<Thread:0x401b3fb0 sleep>
2970 * #<Thread:0x401bdf4c run>
2971 */
2972
2973static VALUE
2974thread_list(VALUE _)
2975{
2976 return rb_thread_list();
2977}
2978
2981{
2982 return GET_THREAD()->self;
2983}
2984
2985/*
2986 * call-seq:
2987 * Thread.current -> thread
2988 *
2989 * Returns the currently executing thread.
2990 *
2991 * Thread.current #=> #<Thread:0x401bdf4c run>
2992 */
2993
2994static VALUE
2995thread_s_current(VALUE klass)
2996{
2997 return rb_thread_current();
2998}
2999
3001rb_thread_main(void)
3002{
3003 return GET_RACTOR()->threads.main->self;
3004}
3005
3006/*
3007 * call-seq:
3008 * Thread.main -> thread
3009 *
3010 * Returns the main thread.
3011 */
3012
3013static VALUE
3014rb_thread_s_main(VALUE klass)
3015{
3016 return rb_thread_main();
3017}
3018
3019
3020/*
3021 * call-seq:
3022 * Thread.abort_on_exception -> true or false
3023 *
3024 * Returns the status of the global ``abort on exception'' condition.
3025 *
3026 * The default is +false+.
3027 *
3028 * When set to +true+, if any thread is aborted by an exception, the
3029 * raised exception will be re-raised in the main thread.
3030 *
3031 * Can also be specified by the global $DEBUG flag or command line option
3032 * +-d+.
3033 *
3034 * See also ::abort_on_exception=.
3035 *
3036 * There is also an instance level method to set this for a specific thread,
3037 * see #abort_on_exception.
3038 */
3039
3040static VALUE
3041rb_thread_s_abort_exc(VALUE _)
3042{
3043 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3044}
3045
3046
3047/*
3048 * call-seq:
3049 * Thread.abort_on_exception= boolean -> true or false
3050 *
3051 * When set to +true+, if any thread is aborted by an exception, the
3052 * raised exception will be re-raised in the main thread.
3053 * Returns the new state.
3054 *
3055 * Thread.abort_on_exception = true
3056 * t1 = Thread.new do
3057 * puts "In new thread"
3058 * raise "Exception from thread"
3059 * end
3060 * sleep(1)
3061 * puts "not reached"
3062 *
3063 * This will produce:
3064 *
3065 * In new thread
3066 * prog.rb:4: Exception from thread (RuntimeError)
3067 * from prog.rb:2:in `initialize'
3068 * from prog.rb:2:in `new'
3069 * from prog.rb:2
3070 *
3071 * See also ::abort_on_exception.
3072 *
3073 * There is also an instance level method to set this for a specific thread,
3074 * see #abort_on_exception=.
3075 */
3076
3077static VALUE
3078rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3079{
3080 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3081 return val;
3082}
3083
3084
3085/*
3086 * call-seq:
3087 * thr.abort_on_exception -> true or false
3088 *
3089 * Returns the status of the thread-local ``abort on exception'' condition for
3090 * this +thr+.
3091 *
3092 * The default is +false+.
3093 *
3094 * See also #abort_on_exception=.
3095 *
3096 * There is also a class level method to set this for all threads, see
3097 * ::abort_on_exception.
3098 */
3099
3100static VALUE
3101rb_thread_abort_exc(VALUE thread)
3102{
3103 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3104}
3105
3106
3107/*
3108 * call-seq:
3109 * thr.abort_on_exception= boolean -> true or false
3110 *
3111 * When set to +true+, if this +thr+ is aborted by an exception, the
3112 * raised exception will be re-raised in the main thread.
3113 *
3114 * See also #abort_on_exception.
3115 *
3116 * There is also a class level method to set this for all threads, see
3117 * ::abort_on_exception=.
3118 */
3119
3120static VALUE
3121rb_thread_abort_exc_set(VALUE thread, VALUE val)
3122{
3123 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3124 return val;
3125}
3126
3127
3128/*
3129 * call-seq:
3130 * Thread.report_on_exception -> true or false
3131 *
3132 * Returns the status of the global ``report on exception'' condition.
3133 *
3134 * The default is +true+ since Ruby 2.5.
3135 *
3136 * All threads created when this flag is true will report
3137 * a message on $stderr if an exception kills the thread.
3138 *
3139 * Thread.new { 1.times { raise } }
3140 *
3141 * will produce this output on $stderr:
3142 *
3143 * #<Thread:...> terminated with exception (report_on_exception is true):
3144 * Traceback (most recent call last):
3145 * 2: from -e:1:in `block in <main>'
3146 * 1: from -e:1:in `times'
3147 *
3148 * This is done to catch errors in threads early.
3149 * In some cases, you might not want this output.
3150 * There are multiple ways to avoid the extra output:
3151 *
3152 * * If the exception is not intended, the best is to fix the cause of
3153 * the exception so it does not happen anymore.
3154 * * If the exception is intended, it might be better to rescue it closer to
3155 * where it is raised rather then let it kill the Thread.
3156 * * If it is guaranteed the Thread will be joined with Thread#join or
3157 * Thread#value, then it is safe to disable this report with
3158 * <code>Thread.current.report_on_exception = false</code>
3159 * when starting the Thread.
3160 * However, this might handle the exception much later, or not at all
3161 * if the Thread is never joined due to the parent thread being blocked, etc.
3162 *
3163 * See also ::report_on_exception=.
3164 *
3165 * There is also an instance level method to set this for a specific thread,
3166 * see #report_on_exception=.
3167 *
3168 */
3169
3170static VALUE
3171rb_thread_s_report_exc(VALUE _)
3172{
3173 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3174}
3175
3176
3177/*
3178 * call-seq:
3179 * Thread.report_on_exception= boolean -> true or false
3180 *
3181 * Returns the new state.
3182 * When set to +true+, all threads created afterwards will inherit the
3183 * condition and report a message on $stderr if an exception kills a thread:
3184 *
3185 * Thread.report_on_exception = true
3186 * t1 = Thread.new do
3187 * puts "In new thread"
3188 * raise "Exception from thread"
3189 * end
3190 * sleep(1)
3191 * puts "In the main thread"
3192 *
3193 * This will produce:
3194 *
3195 * In new thread
3196 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3197 * Traceback (most recent call last):
3198 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3199 * In the main thread
3200 *
3201 * See also ::report_on_exception.
3202 *
3203 * There is also an instance level method to set this for a specific thread,
3204 * see #report_on_exception=.
3205 */
3206
3207static VALUE
3208rb_thread_s_report_exc_set(VALUE self, VALUE val)
3209{
3210 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3211 return val;
3212}
3213
3214
3215/*
3216 * call-seq:
3217 * Thread.ignore_deadlock -> true or false
3218 *
3219 * Returns the status of the global ``ignore deadlock'' condition.
3220 * The default is +false+, so that deadlock conditions are not ignored.
3221 *
3222 * See also ::ignore_deadlock=.
3223 *
3224 */
3225
3226static VALUE
3227rb_thread_s_ignore_deadlock(VALUE _)
3228{
3229 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3230}
3231
3232
3233/*
3234 * call-seq:
3235 * Thread.ignore_deadlock = boolean -> true or false
3236 *
3237 * Returns the new state.
3238 * When set to +true+, the VM will not check for deadlock conditions.
3239 * It is only useful to set this if your application can break a
3240 * deadlock condition via some other means, such as a signal.
3241 *
3242 * Thread.ignore_deadlock = true
3243 * queue = Thread::Queue.new
3244 *
3245 * trap(:SIGUSR1){queue.push "Received signal"}
3246 *
3247 * # raises fatal error unless ignoring deadlock
3248 * puts queue.pop
3249 *
3250 * See also ::ignore_deadlock.
3251 */
3252
3253static VALUE
3254rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3255{
3256 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3257 return val;
3258}
3259
3260
3261/*
3262 * call-seq:
3263 * thr.report_on_exception -> true or false
3264 *
3265 * Returns the status of the thread-local ``report on exception'' condition for
3266 * this +thr+.
3267 *
3268 * The default value when creating a Thread is the value of
3269 * the global flag Thread.report_on_exception.
3270 *
3271 * See also #report_on_exception=.
3272 *
3273 * There is also a class level method to set this for all new threads, see
3274 * ::report_on_exception=.
3275 */
3276
3277static VALUE
3278rb_thread_report_exc(VALUE thread)
3279{
3280 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3281}
3282
3283
3284/*
3285 * call-seq:
3286 * thr.report_on_exception= boolean -> true or false
3287 *
3288 * When set to +true+, a message is printed on $stderr if an exception
3289 * kills this +thr+. See ::report_on_exception for details.
3290 *
3291 * See also #report_on_exception.
3292 *
3293 * There is also a class level method to set this for all new threads, see
3294 * ::report_on_exception=.
3295 */
3296
3297static VALUE
3298rb_thread_report_exc_set(VALUE thread, VALUE val)
3299{
3300 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3301 return val;
3302}
3303
3304
3305/*
3306 * call-seq:
3307 * thr.group -> thgrp or nil
3308 *
3309 * Returns the ThreadGroup which contains the given thread.
3310 *
3311 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3312 */
3313
3314VALUE
3315rb_thread_group(VALUE thread)
3316{
3317 return rb_thread_ptr(thread)->thgroup;
3318}
3319
3320static const char *
3321thread_status_name(rb_thread_t *th, int detail)
3322{
3323 switch (th->status) {
3324 case THREAD_RUNNABLE:
3325 return th->to_kill ? "aborting" : "run";
3326 case THREAD_STOPPED_FOREVER:
3327 if (detail) return "sleep_forever";
3328 case THREAD_STOPPED:
3329 return "sleep";
3330 case THREAD_KILLED:
3331 return "dead";
3332 default:
3333 return "unknown";
3334 }
3335}
3336
3337static int
3338rb_threadptr_dead(rb_thread_t *th)
3339{
3340 return th->status == THREAD_KILLED;
3341}
3342
3343
3344/*
3345 * call-seq:
3346 * thr.status -> string, false or nil
3347 *
3348 * Returns the status of +thr+.
3349 *
3350 * [<tt>"sleep"</tt>]
3351 * Returned if this thread is sleeping or waiting on I/O
3352 * [<tt>"run"</tt>]
3353 * When this thread is executing
3354 * [<tt>"aborting"</tt>]
3355 * If this thread is aborting
3356 * [+false+]
3357 * When this thread is terminated normally
3358 * [+nil+]
3359 * If terminated with an exception.
3360 *
3361 * a = Thread.new { raise("die now") }
3362 * b = Thread.new { Thread.stop }
3363 * c = Thread.new { Thread.exit }
3364 * d = Thread.new { sleep }
3365 * d.kill #=> #<Thread:0x401b3678 aborting>
3366 * a.status #=> nil
3367 * b.status #=> "sleep"
3368 * c.status #=> false
3369 * d.status #=> "aborting"
3370 * Thread.current.status #=> "run"
3371 *
3372 * See also the instance methods #alive? and #stop?
3373 */
3374
3375static VALUE
3376rb_thread_status(VALUE thread)
3377{
3378 rb_thread_t *target_th = rb_thread_ptr(thread);
3379
3380 if (rb_threadptr_dead(target_th)) {
3381 if (!NIL_P(target_th->ec->errinfo) &&
3382 !FIXNUM_P(target_th->ec->errinfo)) {
3383 return Qnil;
3384 }
3385 else {
3386 return Qfalse;
3387 }
3388 }
3389 else {
3390 return rb_str_new2(thread_status_name(target_th, FALSE));
3391 }
3392}
3393
3394
3395/*
3396 * call-seq:
3397 * thr.alive? -> true or false
3398 *
3399 * Returns +true+ if +thr+ is running or sleeping.
3400 *
3401 * thr = Thread.new { }
3402 * thr.join #=> #<Thread:0x401b3fb0 dead>
3403 * Thread.current.alive? #=> true
3404 * thr.alive? #=> false
3405 *
3406 * See also #stop? and #status.
3407 */
3408
3409static VALUE
3410rb_thread_alive_p(VALUE thread)
3411{
3412 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3413}
3414
3415/*
3416 * call-seq:
3417 * thr.stop? -> true or false
3418 *
3419 * Returns +true+ if +thr+ is dead or sleeping.
3420 *
3421 * a = Thread.new { Thread.stop }
3422 * b = Thread.current
3423 * a.stop? #=> true
3424 * b.stop? #=> false
3425 *
3426 * See also #alive? and #status.
3427 */
3428
3429static VALUE
3430rb_thread_stop_p(VALUE thread)
3431{
3432 rb_thread_t *th = rb_thread_ptr(thread);
3433
3434 if (rb_threadptr_dead(th)) {
3435 return Qtrue;
3436 }
3437 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3438}
3439
3440/*
3441 * call-seq:
3442 * thr.name -> string
3443 *
3444 * show the name of the thread.
3445 */
3446
3447static VALUE
3448rb_thread_getname(VALUE thread)
3449{
3450 return rb_thread_ptr(thread)->name;
3451}
3452
3453/*
3454 * call-seq:
3455 * thr.name=(name) -> string
3456 *
3457 * set given name to the ruby thread.
3458 * On some platform, it may set the name to pthread and/or kernel.
3459 */
3460
3461static VALUE
3462rb_thread_setname(VALUE thread, VALUE name)
3463{
3464 rb_thread_t *target_th = rb_thread_ptr(thread);
3465
3466 if (!NIL_P(name)) {
3467 rb_encoding *enc;
3468 StringValueCStr(name);
3469 enc = rb_enc_get(name);
3470 if (!rb_enc_asciicompat(enc)) {
3471 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3472 rb_enc_name(enc));
3473 }
3474 name = rb_str_new_frozen(name);
3475 }
3476 target_th->name = name;
3477 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3478 native_set_another_thread_name(target_th->nt->thread_id, name);
3479 }
3480 return name;
3481}
3482
3483#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3484/*
3485 * call-seq:
3486 * thr.native_thread_id -> integer
3487 *
3488 * Return the native thread ID which is used by the Ruby thread.
3489 *
3490 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3491 * * On Linux it is TID returned by gettid(2).
3492 * * On macOS it is the system-wide unique integral ID of thread returned
3493 * by pthread_threadid_np(3).
3494 * * On FreeBSD it is the unique integral ID of the thread returned by
3495 * pthread_getthreadid_np(3).
3496 * * On Windows it is the thread identifier returned by GetThreadId().
3497 * * On other platforms, it raises NotImplementedError.
3498 *
3499 * NOTE:
3500 * If the thread is not associated yet or already deassociated with a native
3501 * thread, it returns _nil_.
3502 * If the Ruby implementation uses M:N thread model, the ID may change
3503 * depending on the timing.
3504 */
3505
3506static VALUE
3507rb_thread_native_thread_id(VALUE thread)
3508{
3509 rb_thread_t *target_th = rb_thread_ptr(thread);
3510 if (rb_threadptr_dead(target_th)) return Qnil;
3511 return native_thread_native_thread_id(target_th);
3512}
3513#else
3514# define rb_thread_native_thread_id rb_f_notimplement
3515#endif
3516
3517/*
3518 * call-seq:
3519 * thr.to_s -> string
3520 *
3521 * Dump the name, id, and status of _thr_ to a string.
3522 */
3523
3524static VALUE
3525rb_thread_to_s(VALUE thread)
3526{
3527 VALUE cname = rb_class_path(rb_obj_class(thread));
3528 rb_thread_t *target_th = rb_thread_ptr(thread);
3529 const char *status;
3530 VALUE str, loc;
3531
3532 status = thread_status_name(target_th, TRUE);
3533 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3534 if (!NIL_P(target_th->name)) {
3535 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3536 }
3537 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3538 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3539 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3540 }
3541 rb_str_catf(str, " %s>", status);
3542
3543 return str;
3544}
3545
3546/* variables for recursive traversals */
3547#define recursive_key id__recursive_key__
3548
3549static VALUE
3550threadptr_local_aref(rb_thread_t *th, ID id)
3551{
3552 if (id == recursive_key) {
3553 return th->ec->local_storage_recursive_hash;
3554 }
3555 else {
3556 VALUE val;
3557 struct rb_id_table *local_storage = th->ec->local_storage;
3558
3559 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3560 return val;
3561 }
3562 else {
3563 return Qnil;
3564 }
3565 }
3566}
3567
3569rb_thread_local_aref(VALUE thread, ID id)
3570{
3571 return threadptr_local_aref(rb_thread_ptr(thread), id);
3572}
3573
3574/*
3575 * call-seq:
3576 * thr[sym] -> obj or nil
3577 *
3578 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3579 * if not explicitly inside a Fiber), using either a symbol or a string name.
3580 * If the specified variable does not exist, returns +nil+.
3581 *
3582 * [
3583 * Thread.new { Thread.current["name"] = "A" },
3584 * Thread.new { Thread.current[:name] = "B" },
3585 * Thread.new { Thread.current["name"] = "C" }
3586 * ].each do |th|
3587 * th.join
3588 * puts "#{th.inspect}: #{th[:name]}"
3589 * end
3590 *
3591 * This will produce:
3592 *
3593 * #<Thread:0x00000002a54220 dead>: A
3594 * #<Thread:0x00000002a541a8 dead>: B
3595 * #<Thread:0x00000002a54130 dead>: C
3596 *
3597 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3598 * This confusion did not exist in Ruby 1.8 because
3599 * fibers are only available since Ruby 1.9.
3600 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3601 * following idiom for dynamic scope.
3602 *
3603 * def meth(newvalue)
3604 * begin
3605 * oldvalue = Thread.current[:name]
3606 * Thread.current[:name] = newvalue
3607 * yield
3608 * ensure
3609 * Thread.current[:name] = oldvalue
3610 * end
3611 * end
3612 *
3613 * The idiom may not work as dynamic scope if the methods are thread-local
3614 * and a given block switches fiber.
3615 *
3616 * f = Fiber.new {
3617 * meth(1) {
3618 * Fiber.yield
3619 * }
3620 * }
3621 * meth(2) {
3622 * f.resume
3623 * }
3624 * f.resume
3625 * p Thread.current[:name]
3626 * #=> nil if fiber-local
3627 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3628 *
3629 * For thread-local variables, please see #thread_variable_get and
3630 * #thread_variable_set.
3631 *
3632 */
3633
3634static VALUE
3635rb_thread_aref(VALUE thread, VALUE key)
3636{
3637 ID id = rb_check_id(&key);
3638 if (!id) return Qnil;
3639 return rb_thread_local_aref(thread, id);
3640}
3641
3642/*
3643 * call-seq:
3644 * thr.fetch(sym) -> obj
3645 * thr.fetch(sym) { } -> obj
3646 * thr.fetch(sym, default) -> obj
3647 *
3648 * Returns a fiber-local for the given key. If the key can't be
3649 * found, there are several options: With no other arguments, it will
3650 * raise a KeyError exception; if <i>default</i> is given, then that
3651 * will be returned; if the optional code block is specified, then
3652 * that will be run and its result returned. See Thread#[] and
3653 * Hash#fetch.
3654 */
3655static VALUE
3656rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3657{
3658 VALUE key, val;
3659 ID id;
3660 rb_thread_t *target_th = rb_thread_ptr(self);
3661 int block_given;
3662
3663 rb_check_arity(argc, 1, 2);
3664 key = argv[0];
3665
3666 block_given = rb_block_given_p();
3667 if (block_given && argc == 2) {
3668 rb_warn("block supersedes default value argument");
3669 }
3670
3671 id = rb_check_id(&key);
3672
3673 if (id == recursive_key) {
3674 return target_th->ec->local_storage_recursive_hash;
3675 }
3676 else if (id && target_th->ec->local_storage &&
3677 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3678 return val;
3679 }
3680 else if (block_given) {
3681 return rb_yield(key);
3682 }
3683 else if (argc == 1) {
3684 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3685 }
3686 else {
3687 return argv[1];
3688 }
3689}
3690
3691static VALUE
3692threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3693{
3694 if (id == recursive_key) {
3695 th->ec->local_storage_recursive_hash = val;
3696 return val;
3697 }
3698 else {
3699 struct rb_id_table *local_storage = th->ec->local_storage;
3700
3701 if (NIL_P(val)) {
3702 if (!local_storage) return Qnil;
3703 rb_id_table_delete(local_storage, id);
3704 return Qnil;
3705 }
3706 else {
3707 if (local_storage == NULL) {
3708 th->ec->local_storage = local_storage = rb_id_table_create(0);
3709 }
3710 rb_id_table_insert(local_storage, id, val);
3711 return val;
3712 }
3713 }
3714}
3715
3717rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3718{
3719 if (OBJ_FROZEN(thread)) {
3720 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3721 }
3722
3723 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3724}
3725
3726/*
3727 * call-seq:
3728 * thr[sym] = obj -> obj
3729 *
3730 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3731 * using either a symbol or a string.
3732 *
3733 * See also Thread#[].
3734 *
3735 * For thread-local variables, please see #thread_variable_set and
3736 * #thread_variable_get.
3737 */
3738
3739static VALUE
3740rb_thread_aset(VALUE self, VALUE id, VALUE val)
3741{
3742 return rb_thread_local_aset(self, rb_to_id(id), val);
3743}
3744
3745/*
3746 * call-seq:
3747 * thr.thread_variable_get(key) -> obj or nil
3748 *
3749 * Returns the value of a thread local variable that has been set. Note that
3750 * these are different than fiber local values. For fiber local values,
3751 * please see Thread#[] and Thread#[]=.
3752 *
3753 * Thread local values are carried along with threads, and do not respect
3754 * fibers. For example:
3755 *
3756 * Thread.new {
3757 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3758 * Thread.current["foo"] = "bar" # set a fiber local
3759 *
3760 * Fiber.new {
3761 * Fiber.yield [
3762 * Thread.current.thread_variable_get("foo"), # get the thread local
3763 * Thread.current["foo"], # get the fiber local
3764 * ]
3765 * }.resume
3766 * }.join.value # => ['bar', nil]
3767 *
3768 * The value "bar" is returned for the thread local, where nil is returned
3769 * for the fiber local. The fiber is executed in the same thread, so the
3770 * thread local values are available.
3771 */
3772
3773static VALUE
3774rb_thread_variable_get(VALUE thread, VALUE key)
3775{
3776 VALUE locals;
3777 VALUE symbol = rb_to_symbol(key);
3778
3779 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3780 return Qnil;
3781 }
3782 locals = rb_thread_local_storage(thread);
3783 return rb_hash_aref(locals, symbol);
3784}
3785
3786/*
3787 * call-seq:
3788 * thr.thread_variable_set(key, value)
3789 *
3790 * Sets a thread local with +key+ to +value+. Note that these are local to
3791 * threads, and not to fibers. Please see Thread#thread_variable_get and
3792 * Thread#[] for more information.
3793 */
3794
3795static VALUE
3796rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3797{
3798 VALUE locals;
3799
3800 if (OBJ_FROZEN(thread)) {
3801 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3802 }
3803
3804 locals = rb_thread_local_storage(thread);
3805 return rb_hash_aset(locals, rb_to_symbol(key), val);
3806}
3807
3808/*
3809 * call-seq:
3810 * thr.key?(sym) -> true or false
3811 *
3812 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3813 * variable.
3814 *
3815 * me = Thread.current
3816 * me[:oliver] = "a"
3817 * me.key?(:oliver) #=> true
3818 * me.key?(:stanley) #=> false
3819 */
3820
3821static VALUE
3822rb_thread_key_p(VALUE self, VALUE key)
3823{
3824 VALUE val;
3825 ID id = rb_check_id(&key);
3826 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3827
3828 if (!id || local_storage == NULL) {
3829 return Qfalse;
3830 }
3831 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3832}
3833
3834static enum rb_id_table_iterator_result
3835thread_keys_i(ID key, VALUE value, void *ary)
3836{
3837 rb_ary_push((VALUE)ary, ID2SYM(key));
3838 return ID_TABLE_CONTINUE;
3839}
3840
3842rb_thread_alone(void)
3843{
3844 // TODO
3845 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3846}
3847
3848/*
3849 * call-seq:
3850 * thr.keys -> array
3851 *
3852 * Returns an array of the names of the fiber-local variables (as Symbols).
3853 *
3854 * thr = Thread.new do
3855 * Thread.current[:cat] = 'meow'
3856 * Thread.current["dog"] = 'woof'
3857 * end
3858 * thr.join #=> #<Thread:0x401b3f10 dead>
3859 * thr.keys #=> [:dog, :cat]
3860 */
3861
3862static VALUE
3863rb_thread_keys(VALUE self)
3864{
3865 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3866 VALUE ary = rb_ary_new();
3867
3868 if (local_storage) {
3869 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3870 }
3871 return ary;
3872}
3873
3874static int
3875keys_i(VALUE key, VALUE value, VALUE ary)
3876{
3877 rb_ary_push(ary, key);
3878 return ST_CONTINUE;
3879}
3880
3881/*
3882 * call-seq:
3883 * thr.thread_variables -> array
3884 *
3885 * Returns an array of the names of the thread-local variables (as Symbols).
3886 *
3887 * thr = Thread.new do
3888 * Thread.current.thread_variable_set(:cat, 'meow')
3889 * Thread.current.thread_variable_set("dog", 'woof')
3890 * end
3891 * thr.join #=> #<Thread:0x401b3f10 dead>
3892 * thr.thread_variables #=> [:dog, :cat]
3893 *
3894 * Note that these are not fiber local variables. Please see Thread#[] and
3895 * Thread#thread_variable_get for more details.
3896 */
3897
3898static VALUE
3899rb_thread_variables(VALUE thread)
3900{
3901 VALUE locals;
3902 VALUE ary;
3903
3904 ary = rb_ary_new();
3905 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3906 return ary;
3907 }
3908 locals = rb_thread_local_storage(thread);
3909 rb_hash_foreach(locals, keys_i, ary);
3910
3911 return ary;
3912}
3913
3914/*
3915 * call-seq:
3916 * thr.thread_variable?(key) -> true or false
3917 *
3918 * Returns +true+ if the given string (or symbol) exists as a thread-local
3919 * variable.
3920 *
3921 * me = Thread.current
3922 * me.thread_variable_set(:oliver, "a")
3923 * me.thread_variable?(:oliver) #=> true
3924 * me.thread_variable?(:stanley) #=> false
3925 *
3926 * Note that these are not fiber local variables. Please see Thread#[] and
3927 * Thread#thread_variable_get for more details.
3928 */
3929
3930static VALUE
3931rb_thread_variable_p(VALUE thread, VALUE key)
3932{
3933 VALUE locals;
3934 VALUE symbol = rb_to_symbol(key);
3935
3936 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3937 return Qfalse;
3938 }
3939 locals = rb_thread_local_storage(thread);
3940
3941 return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
3942}
3943
3944/*
3945 * call-seq:
3946 * thr.priority -> integer
3947 *
3948 * Returns the priority of <i>thr</i>. Default is inherited from the
3949 * current thread which creating the new thread, or zero for the
3950 * initial main thread; higher-priority thread will run more frequently
3951 * than lower-priority threads (but lower-priority threads can also run).
3952 *
3953 * This is just hint for Ruby thread scheduler. It may be ignored on some
3954 * platform.
3955 *
3956 * Thread.current.priority #=> 0
3957 */
3958
3959static VALUE
3960rb_thread_priority(VALUE thread)
3961{
3962 return INT2NUM(rb_thread_ptr(thread)->priority);
3963}
3964
3965
3966/*
3967 * call-seq:
3968 * thr.priority= integer -> thr
3969 *
3970 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3971 * will run more frequently than lower-priority threads (but lower-priority
3972 * threads can also run).
3973 *
3974 * This is just hint for Ruby thread scheduler. It may be ignored on some
3975 * platform.
3976 *
3977 * count1 = count2 = 0
3978 * a = Thread.new do
3979 * loop { count1 += 1 }
3980 * end
3981 * a.priority = -1
3982 *
3983 * b = Thread.new do
3984 * loop { count2 += 1 }
3985 * end
3986 * b.priority = -2
3987 * sleep 1 #=> 1
3988 * count1 #=> 622504
3989 * count2 #=> 5832
3990 */
3991
3992static VALUE
3993rb_thread_priority_set(VALUE thread, VALUE prio)
3994{
3995 rb_thread_t *target_th = rb_thread_ptr(thread);
3996 int priority;
3997
3998#if USE_NATIVE_THREAD_PRIORITY
3999 target_th->priority = NUM2INT(prio);
4000 native_thread_apply_priority(th);
4001#else
4002 priority = NUM2INT(prio);
4003 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4004 priority = RUBY_THREAD_PRIORITY_MAX;
4005 }
4006 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4007 priority = RUBY_THREAD_PRIORITY_MIN;
4008 }
4009 target_th->priority = (int8_t)priority;
4010#endif
4011 return INT2NUM(target_th->priority);
4012}
4013
4014/* for IO */
4015
4016#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4017
4018/*
4019 * several Unix platforms support file descriptors bigger than FD_SETSIZE
4020 * in select(2) system call.
4021 *
4022 * - Linux 2.2.12 (?)
4023 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4024 * select(2) documents how to allocate fd_set dynamically.
4025 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4026 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4027 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4028 * select(2) documents how to allocate fd_set dynamically.
4029 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4030 * - Solaris 8 has select_large_fdset
4031 * - Mac OS X 10.7 (Lion)
4032 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4033 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4034 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4035 *
4036 * When fd_set is not big enough to hold big file descriptors,
4037 * it should be allocated dynamically.
4038 * Note that this assumes fd_set is structured as bitmap.
4039 *
4040 * rb_fd_init allocates the memory.
4041 * rb_fd_term free the memory.
4042 * rb_fd_set may re-allocates bitmap.
4043 *
4044 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4045 */
4046
4047void
4049{
4050 fds->maxfd = 0;
4051 fds->fdset = ALLOC(fd_set);
4052 FD_ZERO(fds->fdset);
4053}
4054
4055void
4056rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4057{
4058 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4059
4060 if (size < sizeof(fd_set))
4061 size = sizeof(fd_set);
4062 dst->maxfd = src->maxfd;
4063 dst->fdset = xmalloc(size);
4064 memcpy(dst->fdset, src->fdset, size);
4065}
4066
4067void
4069{
4070 xfree(fds->fdset);
4071 fds->maxfd = 0;
4072 fds->fdset = 0;
4073}
4074
4075void
4077{
4078 if (fds->fdset)
4079 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4080}
4081
4082static void
4083rb_fd_resize(int n, rb_fdset_t *fds)
4084{
4085 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4086 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4087
4088 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4089 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4090
4091 if (m > o) {
4092 fds->fdset = xrealloc(fds->fdset, m);
4093 memset((char *)fds->fdset + o, 0, m - o);
4094 }
4095 if (n >= fds->maxfd) fds->maxfd = n + 1;
4096}
4097
4098void
4099rb_fd_set(int n, rb_fdset_t *fds)
4100{
4101 rb_fd_resize(n, fds);
4102 FD_SET(n, fds->fdset);
4103}
4104
4105void
4106rb_fd_clr(int n, rb_fdset_t *fds)
4107{
4108 if (n >= fds->maxfd) return;
4109 FD_CLR(n, fds->fdset);
4110}
4111
4112int
4113rb_fd_isset(int n, const rb_fdset_t *fds)
4114{
4115 if (n >= fds->maxfd) return 0;
4116 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4117}
4118
4119void
4120rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4121{
4122 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4123
4124 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4125 dst->maxfd = max;
4126 dst->fdset = xrealloc(dst->fdset, size);
4127 memcpy(dst->fdset, src, size);
4128}
4129
4130void
4131rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4132{
4133 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4134
4135 if (size < sizeof(fd_set))
4136 size = sizeof(fd_set);
4137 dst->maxfd = src->maxfd;
4138 dst->fdset = xrealloc(dst->fdset, size);
4139 memcpy(dst->fdset, src->fdset, size);
4140}
4141
4142int
4143rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4144{
4145 fd_set *r = NULL, *w = NULL, *e = NULL;
4146 if (readfds) {
4147 rb_fd_resize(n - 1, readfds);
4148 r = rb_fd_ptr(readfds);
4149 }
4150 if (writefds) {
4151 rb_fd_resize(n - 1, writefds);
4152 w = rb_fd_ptr(writefds);
4153 }
4154 if (exceptfds) {
4155 rb_fd_resize(n - 1, exceptfds);
4156 e = rb_fd_ptr(exceptfds);
4157 }
4158 return select(n, r, w, e, timeout);
4159}
4160
4161#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4162
4163#undef FD_ZERO
4164#undef FD_SET
4165#undef FD_CLR
4166#undef FD_ISSET
4167
4168#define FD_ZERO(f) rb_fd_zero(f)
4169#define FD_SET(i, f) rb_fd_set((i), (f))
4170#define FD_CLR(i, f) rb_fd_clr((i), (f))
4171#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4172
4173#elif defined(_WIN32)
4174
4175void
4177{
4178 set->capa = FD_SETSIZE;
4179 set->fdset = ALLOC(fd_set);
4180 FD_ZERO(set->fdset);
4181}
4182
4183void
4184rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4185{
4186 rb_fd_init(dst);
4187 rb_fd_dup(dst, src);
4188}
4189
4190void
4192{
4193 xfree(set->fdset);
4194 set->fdset = NULL;
4195 set->capa = 0;
4196}
4197
4198void
4199rb_fd_set(int fd, rb_fdset_t *set)
4200{
4201 unsigned int i;
4202 SOCKET s = rb_w32_get_osfhandle(fd);
4203
4204 for (i = 0; i < set->fdset->fd_count; i++) {
4205 if (set->fdset->fd_array[i] == s) {
4206 return;
4207 }
4208 }
4209 if (set->fdset->fd_count >= (unsigned)set->capa) {
4210 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4211 set->fdset =
4212 rb_xrealloc_mul_add(
4213 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4214 }
4215 set->fdset->fd_array[set->fdset->fd_count++] = s;
4216}
4217
4218#undef FD_ZERO
4219#undef FD_SET
4220#undef FD_CLR
4221#undef FD_ISSET
4222
4223#define FD_ZERO(f) rb_fd_zero(f)
4224#define FD_SET(i, f) rb_fd_set((i), (f))
4225#define FD_CLR(i, f) rb_fd_clr((i), (f))
4226#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4227
4228#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4229
4230#endif
4231
4232#ifndef rb_fd_no_init
4233#define rb_fd_no_init(fds) (void)(fds)
4234#endif
4235
4236static int
4237wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4238{
4239 int r = *result;
4240 if (r < 0) {
4241 switch (errnum) {
4242 case EINTR:
4243#ifdef ERESTART
4244 case ERESTART:
4245#endif
4246 *result = 0;
4247 if (rel && hrtime_update_expire(rel, end)) {
4248 *rel = 0;
4249 }
4250 return TRUE;
4251 }
4252 return FALSE;
4253 }
4254 else if (r == 0) {
4255 /* check for spurious wakeup */
4256 if (rel) {
4257 return !hrtime_update_expire(rel, end);
4258 }
4259 return TRUE;
4260 }
4261 return FALSE;
4262}
4264struct select_set {
4265 int max;
4266 rb_thread_t *th;
4267 rb_fdset_t *rset;
4268 rb_fdset_t *wset;
4269 rb_fdset_t *eset;
4270 rb_fdset_t orig_rset;
4271 rb_fdset_t orig_wset;
4272 rb_fdset_t orig_eset;
4273 struct timeval *timeout;
4274};
4275
4276static VALUE
4277select_set_free(VALUE p)
4278{
4279 struct select_set *set = (struct select_set *)p;
4280
4281 rb_fd_term(&set->orig_rset);
4282 rb_fd_term(&set->orig_wset);
4283 rb_fd_term(&set->orig_eset);
4284
4285 return Qfalse;
4286}
4287
4288static VALUE
4289do_select(VALUE p)
4290{
4291 struct select_set *set = (struct select_set *)p;
4292 volatile int result = 0;
4293 int lerrno;
4294 rb_hrtime_t *to, rel, end = 0;
4295
4296 timeout_prepare(&to, &rel, &end, set->timeout);
4297 volatile rb_hrtime_t endtime = end;
4298#define restore_fdset(dst, src) \
4299 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4300#define do_select_update() \
4301 (restore_fdset(set->rset, &set->orig_rset), \
4302 restore_fdset(set->wset, &set->orig_wset), \
4303 restore_fdset(set->eset, &set->orig_eset), \
4304 TRUE)
4305
4306 do {
4307 lerrno = 0;
4308
4309 BLOCKING_REGION(set->th, {
4310 struct timeval tv;
4311
4312 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4313 result = native_fd_select(set->max,
4314 set->rset, set->wset, set->eset,
4315 rb_hrtime2timeval(&tv, to), set->th);
4316 if (result < 0) lerrno = errno;
4317 }
4318 }, ubf_select, set->th, TRUE);
4319
4320 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4321 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4322
4323 if (result < 0) {
4324 errno = lerrno;
4325 }
4326
4327 return (VALUE)result;
4328}
4329
4331rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4332 struct timeval *timeout)
4333{
4334 struct select_set set;
4335
4336 set.th = GET_THREAD();
4337 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4338 set.max = max;
4339 set.rset = read;
4340 set.wset = write;
4341 set.eset = except;
4342 set.timeout = timeout;
4343
4344 if (!set.rset && !set.wset && !set.eset) {
4345 if (!timeout) {
4347 return 0;
4348 }
4349 rb_thread_wait_for(*timeout);
4350 return 0;
4351 }
4352
4353#define fd_init_copy(f) do { \
4354 if (set.f) { \
4355 rb_fd_resize(set.max - 1, set.f); \
4356 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4357 rb_fd_init_copy(&set.orig_##f, set.f); \
4358 } \
4359 } \
4360 else { \
4361 rb_fd_no_init(&set.orig_##f); \
4362 } \
4363 } while (0)
4364 fd_init_copy(rset);
4365 fd_init_copy(wset);
4366 fd_init_copy(eset);
4367#undef fd_init_copy
4368
4369 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4370}
4371
4372#ifdef USE_POLL
4373
4374/* The same with linux kernel. TODO: make platform independent definition. */
4375#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4376#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4377#define POLLEX_SET (POLLPRI)
4378
4379#ifndef POLLERR_SET /* defined for FreeBSD for now */
4380# define POLLERR_SET (0)
4381#endif
4382
4383static int
4384wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4385 rb_hrtime_t *const to, volatile int *lerrno)
4386{
4387 struct timespec ts;
4388 volatile int result = 0;
4389
4390 *lerrno = 0;
4391 BLOCKING_REGION(th, {
4392 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4393 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4394 if (result < 0) *lerrno = errno;
4395 }
4396 }, ubf_select, th, TRUE);
4397 return result;
4398}
4399
4400/*
4401 * returns a mask of events
4402 */
4403int
4404rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4405{
4406 struct pollfd fds[1] = {{
4407 .fd = fd,
4408 .events = (short)events,
4409 .revents = 0,
4410 }};
4411 volatile int result = 0;
4412 nfds_t nfds;
4413 struct waiting_fd wfd;
4414 enum ruby_tag_type state;
4415 volatile int lerrno;
4416
4417 rb_execution_context_t *ec = GET_EC();
4418 rb_thread_t *th = rb_ec_thread_ptr(ec);
4419
4420 thread_io_setup_wfd(th, fd, &wfd);
4421
4422 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4423 // fd is readable
4424 state = 0;
4425 fds[0].revents = events;
4426 errno = 0;
4427 }
4428 else {
4429 EC_PUSH_TAG(wfd.th->ec);
4430 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4431 rb_hrtime_t *to, rel, end = 0;
4432 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4433 timeout_prepare(&to, &rel, &end, timeout);
4434 do {
4435 nfds = numberof(fds);
4436 result = wait_for_single_fd_blocking_region(wfd.th, fds, nfds, to, &lerrno);
4437
4438 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4439 } while (wait_retryable(&result, lerrno, to, end));
4440 }
4441 EC_POP_TAG();
4442 }
4443
4444 thread_io_wake_pending_closer(&wfd);
4445
4446 if (state) {
4447 EC_JUMP_TAG(wfd.th->ec, state);
4448 }
4449
4450 if (result < 0) {
4451 errno = lerrno;
4452 return -1;
4453 }
4454
4455 if (fds[0].revents & POLLNVAL) {
4456 errno = EBADF;
4457 return -1;
4458 }
4459
4460 /*
4461 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4462 * Therefore we need to fix it up.
4463 */
4464 result = 0;
4465 if (fds[0].revents & POLLIN_SET)
4466 result |= RB_WAITFD_IN;
4467 if (fds[0].revents & POLLOUT_SET)
4468 result |= RB_WAITFD_OUT;
4469 if (fds[0].revents & POLLEX_SET)
4470 result |= RB_WAITFD_PRI;
4471
4472 /* all requested events are ready if there is an error */
4473 if (fds[0].revents & POLLERR_SET)
4474 result |= events;
4475
4476 return result;
4477}
4478#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4479struct select_args {
4480 union {
4481 int fd;
4482 int error;
4483 } as;
4484 rb_fdset_t *read;
4485 rb_fdset_t *write;
4486 rb_fdset_t *except;
4487 struct waiting_fd wfd;
4488 struct timeval *tv;
4489};
4490
4491static VALUE
4492select_single(VALUE ptr)
4493{
4494 struct select_args *args = (struct select_args *)ptr;
4495 int r;
4496
4497 r = rb_thread_fd_select(args->as.fd + 1,
4498 args->read, args->write, args->except, args->tv);
4499 if (r == -1)
4500 args->as.error = errno;
4501 if (r > 0) {
4502 r = 0;
4503 if (args->read && rb_fd_isset(args->as.fd, args->read))
4504 r |= RB_WAITFD_IN;
4505 if (args->write && rb_fd_isset(args->as.fd, args->write))
4506 r |= RB_WAITFD_OUT;
4507 if (args->except && rb_fd_isset(args->as.fd, args->except))
4508 r |= RB_WAITFD_PRI;
4509 }
4510 return (VALUE)r;
4511}
4512
4513static VALUE
4514select_single_cleanup(VALUE ptr)
4515{
4516 struct select_args *args = (struct select_args *)ptr;
4517
4518 thread_io_wake_pending_closer(&args->wfd);
4519 if (args->read) rb_fd_term(args->read);
4520 if (args->write) rb_fd_term(args->write);
4521 if (args->except) rb_fd_term(args->except);
4522
4523 return (VALUE)-1;
4524}
4525
4526static rb_fdset_t *
4527init_set_fd(int fd, rb_fdset_t *fds)
4528{
4529 if (fd < 0) {
4530 return 0;
4531 }
4532 rb_fd_init(fds);
4533 rb_fd_set(fd, fds);
4534
4535 return fds;
4536}
4537
4538int
4539rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4540{
4541 rb_fdset_t rfds, wfds, efds;
4542 struct select_args args;
4543 int r;
4544 VALUE ptr = (VALUE)&args;
4545 rb_execution_context_t *ec = GET_EC();
4546 rb_thread_t *th = rb_ec_thread_ptr(ec);
4547
4548 args.as.fd = fd;
4549 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4550 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4551 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4552 args.tv = timeout;
4553 thread_io_setup_wfd(th, fd, &args.wfd);
4554
4555 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4556 if (r == -1)
4557 errno = args.as.error;
4558
4559 return r;
4560}
4561#endif /* ! USE_POLL */
4562
4563/*
4564 * for GC
4565 */
4566
4567#ifdef USE_CONSERVATIVE_STACK_END
4568void
4569rb_gc_set_stack_end(VALUE **stack_end_p)
4570{
4571 VALUE stack_end;
4572COMPILER_WARNING_PUSH
4573#if __has_warning("-Wdangling-pointer")
4574COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4575#endif
4576 *stack_end_p = &stack_end;
4577COMPILER_WARNING_POP
4578}
4579#endif
4580
4581/*
4582 *
4583 */
4584
4585void
4586rb_threadptr_check_signal(rb_thread_t *mth)
4587{
4588 /* mth must be main_thread */
4589 if (rb_signal_buff_size() > 0) {
4590 /* wakeup main thread */
4591 threadptr_trap_interrupt(mth);
4592 }
4593}
4594
4595static void
4596async_bug_fd(const char *mesg, int errno_arg, int fd)
4597{
4598 char buff[64];
4599 size_t n = strlcpy(buff, mesg, sizeof(buff));
4600 if (n < sizeof(buff)-3) {
4601 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4602 }
4603 rb_async_bug_errno(buff, errno_arg);
4604}
4605
4606/* VM-dependent API is not available for this function */
4607static int
4608consume_communication_pipe(int fd)
4609{
4610#if USE_EVENTFD
4611 uint64_t buff[1];
4612#else
4613 /* buffer can be shared because no one refers to them. */
4614 static char buff[1024];
4615#endif
4616 ssize_t result;
4617 int ret = FALSE; /* for rb_sigwait_sleep */
4618
4619 while (1) {
4620 result = read(fd, buff, sizeof(buff));
4621#if USE_EVENTFD
4622 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4623#else
4624 RUBY_DEBUG_LOG("result:%d", (int)result);
4625#endif
4626 if (result > 0) {
4627 ret = TRUE;
4628 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4629 return ret;
4630 }
4631 }
4632 else if (result == 0) {
4633 return ret;
4634 }
4635 else if (result < 0) {
4636 int e = errno;
4637 switch (e) {
4638 case EINTR:
4639 continue; /* retry */
4640 case EAGAIN:
4641#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4642 case EWOULDBLOCK:
4643#endif
4644 return ret;
4645 default:
4646 async_bug_fd("consume_communication_pipe: read", e, fd);
4647 }
4648 }
4649 }
4650}
4651
4652void
4653rb_thread_stop_timer_thread(void)
4654{
4655 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4656 native_reset_timer_thread();
4657 }
4658}
4659
4660void
4661rb_thread_reset_timer_thread(void)
4662{
4663 native_reset_timer_thread();
4664}
4665
4666void
4667rb_thread_start_timer_thread(void)
4668{
4669 system_working = 1;
4670 rb_thread_create_timer_thread();
4671}
4672
4673static int
4674clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4675{
4676 int i;
4677 VALUE coverage = (VALUE)val;
4678 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4679 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4680
4681 if (lines) {
4682 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4683 rb_ary_clear(lines);
4684 }
4685 else {
4686 int i;
4687 for (i = 0; i < RARRAY_LEN(lines); i++) {
4688 if (RARRAY_AREF(lines, i) != Qnil)
4689 RARRAY_ASET(lines, i, INT2FIX(0));
4690 }
4691 }
4692 }
4693 if (branches) {
4694 VALUE counters = RARRAY_AREF(branches, 1);
4695 for (i = 0; i < RARRAY_LEN(counters); i++) {
4696 RARRAY_ASET(counters, i, INT2FIX(0));
4697 }
4698 }
4699
4700 return ST_CONTINUE;
4701}
4702
4703void
4704rb_clear_coverages(void)
4705{
4706 VALUE coverages = rb_get_coverages();
4707 if (RTEST(coverages)) {
4708 rb_hash_foreach(coverages, clear_coverage_i, 0);
4709 }
4710}
4711
4712#if defined(HAVE_WORKING_FORK)
4713
4714static void
4715rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4716{
4717 rb_thread_t *i = 0;
4718 rb_vm_t *vm = th->vm;
4719 rb_ractor_t *r = th->ractor;
4720 vm->ractor.main_ractor = r;
4721 vm->ractor.main_thread = th;
4722 r->threads.main = th;
4723 r->status_ = ractor_created;
4724
4725 thread_sched_atfork(TH_SCHED(th));
4726 ubf_list_atfork();
4727
4728 // OK. Only this thread accesses:
4729 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4730 ccan_list_for_each(&r->threads.set, i, lt_node) {
4731 atfork(i, th);
4732 }
4733 }
4734 rb_vm_living_threads_init(vm);
4735
4736 rb_ractor_atfork(vm, th);
4737 rb_vm_postponed_job_atfork();
4738
4739 /* may be held by any thread in parent */
4740 rb_native_mutex_initialize(&th->interrupt_lock);
4741 ccan_list_head_init(&th->interrupt_exec_tasks);
4742
4743 vm->fork_gen++;
4744 rb_ractor_sleeper_threads_clear(th->ractor);
4745 rb_clear_coverages();
4746
4747 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4748 rb_thread_reset_timer_thread();
4749 rb_thread_start_timer_thread();
4750
4751 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4752 VM_ASSERT(vm->ractor.cnt == 1);
4753}
4754
4755static void
4756terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4757{
4758 if (th != current_th) {
4759 rb_mutex_abandon_keeping_mutexes(th);
4760 rb_mutex_abandon_locking_mutex(th);
4761 thread_cleanup_func(th, TRUE);
4762 }
4763}
4764
4765void rb_fiber_atfork(rb_thread_t *);
4766void
4767rb_thread_atfork(void)
4768{
4769 rb_thread_t *th = GET_THREAD();
4770 rb_threadptr_pending_interrupt_clear(th);
4771 rb_thread_atfork_internal(th, terminate_atfork_i);
4772 th->join_list = NULL;
4773 rb_fiber_atfork(th);
4774
4775 /* We don't want reproduce CVE-2003-0900. */
4777}
4778
4779static void
4780terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4781{
4782 if (th != current_th) {
4783 thread_cleanup_func_before_exec(th);
4784 }
4785}
4786
4787void
4789{
4790 rb_thread_t *th = GET_THREAD();
4791 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4792}
4793#else
4794void
4795rb_thread_atfork(void)
4796{
4797}
4798
4799void
4801{
4802}
4803#endif
4805struct thgroup {
4806 int enclosed;
4807};
4808
4809static const rb_data_type_t thgroup_data_type = {
4810 "thgroup",
4811 {
4812 0,
4814 NULL, // No external memory to report
4815 },
4816 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
4817};
4818
4819/*
4820 * Document-class: ThreadGroup
4821 *
4822 * ThreadGroup provides a means of keeping track of a number of threads as a
4823 * group.
4824 *
4825 * A given Thread object can only belong to one ThreadGroup at a time; adding
4826 * a thread to a new group will remove it from any previous group.
4827 *
4828 * Newly created threads belong to the same group as the thread from which they
4829 * were created.
4830 */
4831
4832/*
4833 * Document-const: Default
4834 *
4835 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4836 * by default.
4837 */
4838static VALUE
4839thgroup_s_alloc(VALUE klass)
4840{
4841 VALUE group;
4842 struct thgroup *data;
4843
4844 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4845 data->enclosed = 0;
4846
4847 return group;
4848}
4849
4850/*
4851 * call-seq:
4852 * thgrp.list -> array
4853 *
4854 * Returns an array of all existing Thread objects that belong to this group.
4855 *
4856 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4857 */
4858
4859static VALUE
4860thgroup_list(VALUE group)
4861{
4862 VALUE ary = rb_ary_new();
4863 rb_thread_t *th = 0;
4864 rb_ractor_t *r = GET_RACTOR();
4865
4866 ccan_list_for_each(&r->threads.set, th, lt_node) {
4867 if (th->thgroup == group) {
4868 rb_ary_push(ary, th->self);
4869 }
4870 }
4871 return ary;
4872}
4873
4874
4875/*
4876 * call-seq:
4877 * thgrp.enclose -> thgrp
4878 *
4879 * Prevents threads from being added to or removed from the receiving
4880 * ThreadGroup.
4881 *
4882 * New threads can still be started in an enclosed ThreadGroup.
4883 *
4884 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4885 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4886 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4887 * tg.add thr
4888 * #=> ThreadError: can't move from the enclosed thread group
4889 */
4890
4891static VALUE
4892thgroup_enclose(VALUE group)
4893{
4894 struct thgroup *data;
4895
4896 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4897 data->enclosed = 1;
4898
4899 return group;
4900}
4901
4902
4903/*
4904 * call-seq:
4905 * thgrp.enclosed? -> true or false
4906 *
4907 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4908 */
4909
4910static VALUE
4911thgroup_enclosed_p(VALUE group)
4912{
4913 struct thgroup *data;
4914
4915 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4916 return RBOOL(data->enclosed);
4917}
4918
4919
4920/*
4921 * call-seq:
4922 * thgrp.add(thread) -> thgrp
4923 *
4924 * Adds the given +thread+ to this group, removing it from any other
4925 * group to which it may have previously been a member.
4926 *
4927 * puts "Initial group is #{ThreadGroup::Default.list}"
4928 * tg = ThreadGroup.new
4929 * t1 = Thread.new { sleep }
4930 * t2 = Thread.new { sleep }
4931 * puts "t1 is #{t1}"
4932 * puts "t2 is #{t2}"
4933 * tg.add(t1)
4934 * puts "Initial group now #{ThreadGroup::Default.list}"
4935 * puts "tg group now #{tg.list}"
4936 *
4937 * This will produce:
4938 *
4939 * Initial group is #<Thread:0x401bdf4c>
4940 * t1 is #<Thread:0x401b3c90>
4941 * t2 is #<Thread:0x401b3c18>
4942 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4943 * tg group now #<Thread:0x401b3c90>
4944 */
4945
4946static VALUE
4947thgroup_add(VALUE group, VALUE thread)
4948{
4949 rb_thread_t *target_th = rb_thread_ptr(thread);
4950 struct thgroup *data;
4951
4952 if (OBJ_FROZEN(group)) {
4953 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4954 }
4955 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4956 if (data->enclosed) {
4957 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4958 }
4959
4960 if (OBJ_FROZEN(target_th->thgroup)) {
4961 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4962 }
4963 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4964 if (data->enclosed) {
4965 rb_raise(rb_eThreadError,
4966 "can't move from the enclosed thread group");
4967 }
4968
4969 target_th->thgroup = group;
4970 return group;
4971}
4972
4973/*
4974 * Document-class: ThreadShield
4975 */
4976static void
4977thread_shield_mark(void *ptr)
4978{
4979 rb_gc_mark((VALUE)ptr);
4980}
4981
4982static const rb_data_type_t thread_shield_data_type = {
4983 "thread_shield",
4984 {thread_shield_mark, 0, 0,},
4985 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4986};
4987
4988static VALUE
4989thread_shield_alloc(VALUE klass)
4990{
4991 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4992}
4993
4994#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4995#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4996#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4997#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4998STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
4999static inline unsigned int
5000rb_thread_shield_waiting(VALUE b)
5001{
5002 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5003}
5004
5005static inline void
5006rb_thread_shield_waiting_inc(VALUE b)
5007{
5008 unsigned int w = rb_thread_shield_waiting(b);
5009 w++;
5010 if (w > THREAD_SHIELD_WAITING_MAX)
5011 rb_raise(rb_eRuntimeError, "waiting count overflow");
5012 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5013 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5014}
5015
5016static inline void
5017rb_thread_shield_waiting_dec(VALUE b)
5018{
5019 unsigned int w = rb_thread_shield_waiting(b);
5020 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5021 w--;
5022 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5023 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5024}
5025
5026VALUE
5027rb_thread_shield_new(void)
5028{
5029 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5030 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5031 return thread_shield;
5032}
5033
5034bool
5035rb_thread_shield_owned(VALUE self)
5036{
5037 VALUE mutex = GetThreadShieldPtr(self);
5038 if (!mutex) return false;
5039
5040 rb_mutex_t *m = mutex_ptr(mutex);
5041
5042 return m->fiber == GET_EC()->fiber_ptr;
5043}
5044
5045/*
5046 * Wait a thread shield.
5047 *
5048 * Returns
5049 * true: acquired the thread shield
5050 * false: the thread shield was destroyed and no other threads waiting
5051 * nil: the thread shield was destroyed but still in use
5052 */
5053VALUE
5054rb_thread_shield_wait(VALUE self)
5055{
5056 VALUE mutex = GetThreadShieldPtr(self);
5057 rb_mutex_t *m;
5058
5059 if (!mutex) return Qfalse;
5060 m = mutex_ptr(mutex);
5061 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5062 rb_thread_shield_waiting_inc(self);
5063 rb_mutex_lock(mutex);
5064 rb_thread_shield_waiting_dec(self);
5065 if (DATA_PTR(self)) return Qtrue;
5066 rb_mutex_unlock(mutex);
5067 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5068}
5069
5070static VALUE
5071thread_shield_get_mutex(VALUE self)
5072{
5073 VALUE mutex = GetThreadShieldPtr(self);
5074 if (!mutex)
5075 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5076 return mutex;
5077}
5078
5079/*
5080 * Release a thread shield, and return true if it has waiting threads.
5081 */
5082VALUE
5083rb_thread_shield_release(VALUE self)
5084{
5085 VALUE mutex = thread_shield_get_mutex(self);
5086 rb_mutex_unlock(mutex);
5087 return RBOOL(rb_thread_shield_waiting(self) > 0);
5088}
5089
5090/*
5091 * Release and destroy a thread shield, and return true if it has waiting threads.
5092 */
5093VALUE
5094rb_thread_shield_destroy(VALUE self)
5095{
5096 VALUE mutex = thread_shield_get_mutex(self);
5097 DATA_PTR(self) = 0;
5098 rb_mutex_unlock(mutex);
5099 return RBOOL(rb_thread_shield_waiting(self) > 0);
5100}
5101
5102static VALUE
5103threadptr_recursive_hash(rb_thread_t *th)
5104{
5105 return th->ec->local_storage_recursive_hash;
5106}
5107
5108static void
5109threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5110{
5111 th->ec->local_storage_recursive_hash = hash;
5112}
5113
5115
5116/*
5117 * Returns the current "recursive list" used to detect recursion.
5118 * This list is a hash table, unique for the current thread and for
5119 * the current __callee__.
5120 */
5121
5122static VALUE
5123recursive_list_access(VALUE sym)
5124{
5125 rb_thread_t *th = GET_THREAD();
5126 VALUE hash = threadptr_recursive_hash(th);
5127 VALUE list;
5128 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5129 hash = rb_ident_hash_new();
5130 threadptr_recursive_hash_set(th, hash);
5131 list = Qnil;
5132 }
5133 else {
5134 list = rb_hash_aref(hash, sym);
5135 }
5136 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5137 list = rb_ident_hash_new();
5138 rb_hash_aset(hash, sym, list);
5139 }
5140 return list;
5141}
5142
5143/*
5144 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5145 * in the recursion list.
5146 * Assumes the recursion list is valid.
5147 */
5148
5149static bool
5150recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5151{
5152#if SIZEOF_LONG == SIZEOF_VOIDP
5153 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5154#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5155 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5156 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5157#endif
5158
5159 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5160 if (UNDEF_P(pair_list))
5161 return false;
5162 if (paired_obj_id) {
5163 if (!RB_TYPE_P(pair_list, T_HASH)) {
5164 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5165 return false;
5166 }
5167 else {
5168 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5169 return false;
5170 }
5171 }
5172 return true;
5173}
5174
5175/*
5176 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5177 * For a single obj, it sets list[obj] to Qtrue.
5178 * For a pair, it sets list[obj] to paired_obj_id if possible,
5179 * otherwise list[obj] becomes a hash like:
5180 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5181 * Assumes the recursion list is valid.
5182 */
5183
5184static void
5185recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5186{
5187 VALUE pair_list;
5188
5189 if (!paired_obj) {
5190 rb_hash_aset(list, obj, Qtrue);
5191 }
5192 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5193 rb_hash_aset(list, obj, paired_obj);
5194 }
5195 else {
5196 if (!RB_TYPE_P(pair_list, T_HASH)){
5197 VALUE other_paired_obj = pair_list;
5198 pair_list = rb_hash_new();
5199 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5200 rb_hash_aset(list, obj, pair_list);
5201 }
5202 rb_hash_aset(pair_list, paired_obj, Qtrue);
5203 }
5204}
5205
5206/*
5207 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5208 * For a pair, if list[obj] is a hash, then paired_obj_id is
5209 * removed from the hash and no attempt is made to simplify
5210 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5211 * Assumes the recursion list is valid.
5212 */
5213
5214static int
5215recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5216{
5217 if (paired_obj) {
5218 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5219 if (UNDEF_P(pair_list)) {
5220 return 0;
5221 }
5222 if (RB_TYPE_P(pair_list, T_HASH)) {
5223 rb_hash_delete_entry(pair_list, paired_obj);
5224 if (!RHASH_EMPTY_P(pair_list)) {
5225 return 1; /* keep hash until is empty */
5226 }
5227 }
5228 }
5229 rb_hash_delete_entry(list, obj);
5230 return 1;
5231}
5233struct exec_recursive_params {
5234 VALUE (*func) (VALUE, VALUE, int);
5235 VALUE list;
5236 VALUE obj;
5237 VALUE pairid;
5238 VALUE arg;
5239};
5240
5241static VALUE
5242exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5243{
5244 struct exec_recursive_params *p = (void *)data;
5245 return (*p->func)(p->obj, p->arg, FALSE);
5246}
5247
5248/*
5249 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5250 * current method is called recursively on obj, or on the pair <obj, pairid>
5251 * If outer is 0, then the innermost func will be called with recursive set
5252 * to true, otherwise the outermost func will be called. In the latter case,
5253 * all inner func are short-circuited by throw.
5254 * Implementation details: the value thrown is the recursive list which is
5255 * proper to the current method and unlikely to be caught anywhere else.
5256 * list[recursive_key] is used as a flag for the outermost call.
5257 */
5258
5259static VALUE
5260exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5261{
5262 VALUE result = Qundef;
5263 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5264 struct exec_recursive_params p;
5265 int outermost;
5266 p.list = recursive_list_access(sym);
5267 p.obj = obj;
5268 p.pairid = pairid;
5269 p.arg = arg;
5270 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5271
5272 if (recursive_check(p.list, p.obj, pairid)) {
5273 if (outer && !outermost) {
5274 rb_throw_obj(p.list, p.list);
5275 }
5276 return (*func)(obj, arg, TRUE);
5277 }
5278 else {
5279 enum ruby_tag_type state;
5280
5281 p.func = func;
5282
5283 if (outermost) {
5284 recursive_push(p.list, ID2SYM(recursive_key), 0);
5285 recursive_push(p.list, p.obj, p.pairid);
5286 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5287 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5288 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5289 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5290 if (result == p.list) {
5291 result = (*func)(obj, arg, TRUE);
5292 }
5293 }
5294 else {
5295 volatile VALUE ret = Qundef;
5296 recursive_push(p.list, p.obj, p.pairid);
5297 EC_PUSH_TAG(GET_EC());
5298 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5299 ret = (*func)(obj, arg, FALSE);
5300 }
5301 EC_POP_TAG();
5302 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5303 goto invalid;
5304 }
5305 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5306 result = ret;
5307 }
5308 }
5309 *(volatile struct exec_recursive_params *)&p;
5310 return result;
5311
5312 invalid:
5313 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5314 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5315 sym, rb_thread_current());
5317}
5318
5319/*
5320 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5321 * current method is called recursively on obj
5322 */
5323
5324VALUE
5325rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5326{
5327 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5328}
5329
5330/*
5331 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5332 * current method is called recursively on the ordered pair <obj, paired_obj>
5333 */
5334
5335VALUE
5336rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5337{
5338 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5339}
5340
5341/*
5342 * If recursion is detected on the current method and obj, the outermost
5343 * func will be called with (obj, arg, true). All inner func will be
5344 * short-circuited using throw.
5345 */
5346
5347VALUE
5348rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5349{
5350 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5351}
5352
5353VALUE
5354rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5355{
5356 return exec_recursive(func, obj, 0, arg, 1, mid);
5357}
5358
5359/*
5360 * If recursion is detected on the current method, obj and paired_obj,
5361 * the outermost func will be called with (obj, arg, true). All inner
5362 * func will be short-circuited using throw.
5363 */
5364
5365VALUE
5366rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5367{
5368 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5369}
5370
5371/*
5372 * call-seq:
5373 * thread.backtrace -> array or nil
5374 *
5375 * Returns the current backtrace of the target thread.
5376 *
5377 */
5378
5379static VALUE
5380rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5381{
5382 return rb_vm_thread_backtrace(argc, argv, thval);
5383}
5384
5385/* call-seq:
5386 * thread.backtrace_locations(*args) -> array or nil
5387 *
5388 * Returns the execution stack for the target thread---an array containing
5389 * backtrace location objects.
5390 *
5391 * See Thread::Backtrace::Location for more information.
5392 *
5393 * This method behaves similarly to Kernel#caller_locations except it applies
5394 * to a specific thread.
5395 */
5396static VALUE
5397rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5398{
5399 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5400}
5401
5402void
5403Init_Thread_Mutex(void)
5404{
5405 rb_thread_t *th = GET_THREAD();
5406
5407 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5408 rb_native_mutex_initialize(&th->interrupt_lock);
5409}
5410
5411/*
5412 * Document-class: ThreadError
5413 *
5414 * Raised when an invalid operation is attempted on a thread.
5415 *
5416 * For example, when no other thread has been started:
5417 *
5418 * Thread.stop
5419 *
5420 * This will raises the following exception:
5421 *
5422 * ThreadError: stopping only thread
5423 * note: use sleep to stop forever
5424 */
5425
5426void
5427Init_Thread(void)
5428{
5429 VALUE cThGroup;
5430 rb_thread_t *th = GET_THREAD();
5431
5432 sym_never = ID2SYM(rb_intern_const("never"));
5433 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5434 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5435
5436 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5437 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5438 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5439 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5440 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5441 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5442 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5443 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5444 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5445 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5446 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5447 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5448 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5449 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5450 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5451 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5452 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5453 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5454 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5455
5456 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5457 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5458 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5459 rb_define_method(rb_cThread, "value", thread_value, 0);
5460 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5461 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5462 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5463 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5464 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5465 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5466 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5467 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5468 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5469 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5470 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5471 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5472 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5473 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5474 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5475 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5476 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5477 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5478 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5479 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5480 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5481 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5482 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5483 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5484 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5485 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5486
5487 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5488 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5489 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5490 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5491 rb_define_alias(rb_cThread, "inspect", "to_s");
5492
5493 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5494 "stream closed in another thread");
5495
5496 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5497 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5498 rb_define_method(cThGroup, "list", thgroup_list, 0);
5499 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5500 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5501 rb_define_method(cThGroup, "add", thgroup_add, 1);
5502
5503 const char * ptr = getenv("RUBY_THREAD_TIMESLICE");
5504
5505 if (ptr) {
5506 long quantum = strtol(ptr, NULL, 0);
5507 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5508 thread_default_quantum_ms = (uint32_t)quantum;
5509 }
5510 else if (0) {
5511 fprintf(stderr, "Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5512 }
5513 }
5514
5515 {
5516 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5517 rb_define_const(cThGroup, "Default", th->thgroup);
5518 }
5519
5521
5522 /* init thread core */
5523 {
5524 /* main thread setting */
5525 {
5526 /* acquire global vm lock */
5527#ifdef HAVE_PTHREAD_NP_H
5528 VM_ASSERT(TH_SCHED(th)->running == th);
5529#endif
5530 // thread_sched_to_running() should not be called because
5531 // it assumes blocked by thread_sched_to_waiting().
5532 // thread_sched_to_running(sched, th);
5533
5534 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5535 th->pending_interrupt_queue_checked = 0;
5536 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5537 }
5538 }
5539
5540 rb_thread_create_timer_thread();
5541
5542 Init_thread_sync();
5543
5544 // TODO: Suppress unused function warning for now
5545 // if (0) rb_thread_sched_destroy(NULL);
5546}
5547
5550{
5551 rb_thread_t *th = ruby_thread_from_native();
5552
5553 return th != 0;
5554}
5555
5556#ifdef NON_SCALAR_THREAD_ID
5557 #define thread_id_str(th) (NULL)
5558#else
5559 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5560#endif
5561
5562static void
5563debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5564{
5565 rb_thread_t *th = 0;
5566 VALUE sep = rb_str_new_cstr("\n ");
5567
5568 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5569 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5570 (void *)GET_THREAD(), (void *)r->threads.main);
5571
5572 ccan_list_for_each(&r->threads.set, th, lt_node) {
5573 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5574 "native:%p int:%u",
5575 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5576
5577 if (th->locking_mutex) {
5578 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5579 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5580 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5581 }
5582
5583 {
5584 struct rb_waiting_list *list = th->join_list;
5585 while (list) {
5586 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5587 list = list->next;
5588 }
5589 }
5590 rb_str_catf(msg, "\n ");
5591 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5592 rb_str_catf(msg, "\n");
5593 }
5594}
5595
5596static void
5597rb_check_deadlock(rb_ractor_t *r)
5598{
5599 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5600
5601#ifdef RUBY_THREAD_PTHREAD_H
5602 if (r->threads.sched.readyq_cnt > 0) return;
5603#endif
5604
5605 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5606 int ltnum = rb_ractor_living_thread_num(r);
5607
5608 if (ltnum > sleeper_num) return;
5609 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5610
5611 int found = 0;
5612 rb_thread_t *th = NULL;
5613
5614 ccan_list_for_each(&r->threads.set, th, lt_node) {
5615 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5616 found = 1;
5617 }
5618 else if (th->locking_mutex) {
5619 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5620 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5621 found = 1;
5622 }
5623 }
5624 if (found)
5625 break;
5626 }
5627
5628 if (!found) {
5629 VALUE argv[2];
5630 argv[0] = rb_eFatal;
5631 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5632 debug_deadlock_check(r, argv[1]);
5633 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5634 rb_threadptr_raise(r->threads.main, 2, argv);
5635 }
5636}
5637
5638// Used for VM memsize reporting. Returns the size of a list of waiting_fd
5639// structs. Defined here because the struct definition lives here as well.
5640size_t
5641rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5642{
5643 struct waiting_fd *waitfd = 0;
5644 size_t size = 0;
5645
5646 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5647 size += sizeof(struct waiting_fd);
5648 }
5649
5650 return size;
5651}
5652
5653static void
5654update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5655{
5656 const rb_control_frame_t *cfp = GET_EC()->cfp;
5657 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5658 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5659 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5660 if (lines) {
5661 long line = rb_sourceline() - 1;
5662 long count;
5663 VALUE num;
5664 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5665 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5666 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5667 rb_ary_push(lines, LONG2FIX(line + 1));
5668 return;
5669 }
5670 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5671 return;
5672 }
5673 num = RARRAY_AREF(lines, line);
5674 if (!FIXNUM_P(num)) return;
5675 count = FIX2LONG(num) + 1;
5676 if (POSFIXABLE(count)) {
5677 RARRAY_ASET(lines, line, LONG2FIX(count));
5678 }
5679 }
5680 }
5681}
5682
5683static void
5684update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5685{
5686 const rb_control_frame_t *cfp = GET_EC()->cfp;
5687 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5688 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5689 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5690 if (branches) {
5691 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5692 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5693 VALUE counters = RARRAY_AREF(branches, 1);
5694 VALUE num = RARRAY_AREF(counters, idx);
5695 count = FIX2LONG(num) + 1;
5696 if (POSFIXABLE(count)) {
5697 RARRAY_ASET(counters, idx, LONG2FIX(count));
5698 }
5699 }
5700 }
5701}
5702
5703const rb_method_entry_t *
5704rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5705{
5706 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5707
5708 if (!me->def) return NULL; // negative cme
5709
5710 retry:
5711 switch (me->def->type) {
5712 case VM_METHOD_TYPE_ISEQ: {
5713 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5714 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5715 path = rb_iseq_path(iseq);
5716 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5717 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5718 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5719 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5720 break;
5721 }
5722 case VM_METHOD_TYPE_BMETHOD: {
5723 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5724 if (iseq) {
5725 rb_iseq_location_t *loc;
5726 rb_iseq_check(iseq);
5727 path = rb_iseq_path(iseq);
5728 loc = &ISEQ_BODY(iseq)->location;
5729 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5730 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5731 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5732 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5733 break;
5734 }
5735 return NULL;
5736 }
5737 case VM_METHOD_TYPE_ALIAS:
5738 me = me->def->body.alias.original_me;
5739 goto retry;
5740 case VM_METHOD_TYPE_REFINED:
5741 me = me->def->body.refined.orig_me;
5742 if (!me) return NULL;
5743 goto retry;
5744 default:
5745 return NULL;
5746 }
5747
5748 /* found */
5749 if (RB_TYPE_P(path, T_ARRAY)) {
5750 path = rb_ary_entry(path, 1);
5751 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5752 }
5753 if (resolved_location) {
5754 resolved_location[0] = path;
5755 resolved_location[1] = beg_pos_lineno;
5756 resolved_location[2] = beg_pos_column;
5757 resolved_location[3] = end_pos_lineno;
5758 resolved_location[4] = end_pos_column;
5759 }
5760 return me;
5761}
5762
5763static void
5764update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5765{
5766 const rb_control_frame_t *cfp = GET_EC()->cfp;
5767 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5768 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5769 VALUE rcount;
5770 long count;
5771
5772 me = rb_resolve_me_location(me, 0);
5773 if (!me) return;
5774
5775 rcount = rb_hash_aref(me2counter, (VALUE) me);
5776 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5777 if (POSFIXABLE(count)) {
5778 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5779 }
5780}
5781
5782VALUE
5783rb_get_coverages(void)
5784{
5785 return GET_VM()->coverages;
5786}
5787
5788int
5789rb_get_coverage_mode(void)
5790{
5791 return GET_VM()->coverage_mode;
5792}
5793
5794void
5795rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5796{
5797 GET_VM()->coverages = coverages;
5798 GET_VM()->me2counter = me2counter;
5799 GET_VM()->coverage_mode = mode;
5800}
5801
5802void
5803rb_resume_coverages(void)
5804{
5805 int mode = GET_VM()->coverage_mode;
5806 VALUE me2counter = GET_VM()->me2counter;
5807 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5808 if (mode & COVERAGE_TARGET_BRANCHES) {
5809 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5810 }
5811 if (mode & COVERAGE_TARGET_METHODS) {
5812 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5813 }
5814}
5815
5816void
5817rb_suspend_coverages(void)
5818{
5819 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5820 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5821 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5822 }
5823 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5824 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5825 }
5826}
5827
5828/* Make coverage arrays empty so old covered files are no longer tracked. */
5829void
5830rb_reset_coverages(void)
5831{
5832 rb_clear_coverages();
5833 rb_iseq_remove_coverage_all();
5834 GET_VM()->coverages = Qfalse;
5835}
5836
5837VALUE
5838rb_default_coverage(int n)
5839{
5840 VALUE coverage = rb_ary_hidden_new_fill(3);
5841 VALUE lines = Qfalse, branches = Qfalse;
5842 int mode = GET_VM()->coverage_mode;
5843
5844 if (mode & COVERAGE_TARGET_LINES) {
5845 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5846 }
5847 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5848
5849 if (mode & COVERAGE_TARGET_BRANCHES) {
5850 branches = rb_ary_hidden_new_fill(2);
5851 /* internal data structures for branch coverage:
5852 *
5853 * { branch base node =>
5854 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5855 * branch target id =>
5856 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5857 * ...
5858 * }],
5859 * ...
5860 * }
5861 *
5862 * Example:
5863 * { NODE_CASE =>
5864 * [1, 0, 4, 3, {
5865 * NODE_WHEN => [2, 8, 2, 9, 0],
5866 * NODE_WHEN => [3, 8, 3, 9, 1],
5867 * ...
5868 * }],
5869 * ...
5870 * }
5871 */
5872 VALUE structure = rb_hash_new();
5873 rb_obj_hide(structure);
5874 RARRAY_ASET(branches, 0, structure);
5875 /* branch execution counters */
5876 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5877 }
5878 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5879
5880 return coverage;
5881}
5882
5883static VALUE
5884uninterruptible_exit(VALUE v)
5885{
5886 rb_thread_t *cur_th = GET_THREAD();
5887 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5888
5889 cur_th->pending_interrupt_queue_checked = 0;
5890 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5891 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5892 }
5893 return Qnil;
5894}
5895
5896VALUE
5897rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5898{
5899 VALUE interrupt_mask = rb_ident_hash_new();
5900 rb_thread_t *cur_th = GET_THREAD();
5901
5902 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5903 OBJ_FREEZE(interrupt_mask);
5904 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5905
5906 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5907
5908 RUBY_VM_CHECK_INTS(cur_th->ec);
5909 return ret;
5910}
5911
5912static void
5913thread_specific_storage_alloc(rb_thread_t *th)
5914{
5915 VM_ASSERT(th->specific_storage == NULL);
5916
5917 if (UNLIKELY(specific_key_count > 0)) {
5918 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5919 }
5920}
5921
5922rb_internal_thread_specific_key_t
5924{
5925 rb_vm_t *vm = GET_VM();
5926
5927 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
5928 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
5929 }
5930 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
5931 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5932 }
5933 else {
5934 rb_internal_thread_specific_key_t key = specific_key_count++;
5935
5936 if (key == 0) {
5937 // allocate
5938 rb_ractor_t *cr = GET_RACTOR();
5939 rb_thread_t *th;
5940
5941 ccan_list_for_each(&cr->threads.set, th, lt_node) {
5942 thread_specific_storage_alloc(th);
5943 }
5944 }
5945 return key;
5946 }
5947}
5948
5949// async and native thread safe.
5950void *
5951rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
5952{
5953 rb_thread_t *th = DATA_PTR(thread_val);
5954
5955 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5956 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5957 VM_ASSERT(th->specific_storage);
5958
5959 return th->specific_storage[key];
5960}
5961
5962// async and native thread safe.
5963void
5964rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
5965{
5966 rb_thread_t *th = DATA_PTR(thread_val);
5967
5968 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5969 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5970 VM_ASSERT(th->specific_storage);
5971
5972 th->specific_storage[key] = data;
5973}
5974
5975// interrupt_exec
5978 struct ccan_list_node node;
5979
5980 rb_interrupt_exec_func_t *func;
5981 void *data;
5982 enum rb_interrupt_exec_flag flags;
5983};
5984
5985void
5986rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
5987{
5988 struct rb_interrupt_exec_task *task;
5989
5990 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
5991 if (task->flags & rb_interrupt_exec_flag_value_data) {
5992 rb_gc_mark((VALUE)task->data);
5993 }
5994 }
5995}
5996
5997// native thread safe
5998// th should be available
5999void
6000rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6001{
6002 // should not use ALLOC
6004 *task = (struct rb_interrupt_exec_task) {
6005 .flags = flags,
6006 .func = func,
6007 .data = data,
6008 };
6009
6010 rb_native_mutex_lock(&th->interrupt_lock);
6011 {
6012 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6013 threadptr_interrupt_locked(th, true);
6014 }
6015 rb_native_mutex_unlock(&th->interrupt_lock);
6016}
6017
6018static void
6019threadptr_interrupt_exec_exec(rb_thread_t *th)
6020{
6021 while (1) {
6022 struct rb_interrupt_exec_task *task;
6023
6024 rb_native_mutex_lock(&th->interrupt_lock);
6025 {
6026 task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6027 }
6028 rb_native_mutex_unlock(&th->interrupt_lock);
6029
6030 if (task) {
6031 (*task->func)(task->data);
6032 ruby_xfree(task);
6033 }
6034 else {
6035 break;
6036 }
6037 }
6038}
6039
6040static void
6041threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6042{
6043 rb_native_mutex_lock(&th->interrupt_lock);
6044 {
6045 struct rb_interrupt_exec_task *task;
6046
6047 while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6048 ruby_xfree(task);
6049 }
6050 }
6051 rb_native_mutex_unlock(&th->interrupt_lock);
6052}
6055 rb_interrupt_exec_func_t *func;
6056 void *data;
6057};
6058
6059static VALUE
6060interrupt_ractor_new_thread_func(void *data)
6061{
6063 ruby_xfree(data);
6064
6065 d.func(d.data);
6066 return Qnil;
6067}
6068
6069static VALUE
6070interrupt_ractor_func(void *data)
6071{
6072 rb_thread_create(interrupt_ractor_new_thread_func, data);
6073 return Qnil;
6074}
6075
6076// native thread safe
6077// func/data should be native thread safe
6078void
6079rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6080 rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6081{
6083
6084 d->func = func;
6085 d->data = data;
6086 rb_thread_t *main_th = target_r->threads.main;
6087 rb_threadptr_interrupt_exec(main_th, interrupt_ractor_func, d, flags);
6088
6089 // TODO MEMO: we can create a new thread in a ractor, but not sure how to do that now.
6090}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:313
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:980
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2345
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1159
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:949
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:936
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:137
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:287
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:486
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:675
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1423
VALUE rb_eIOError
IOError exception.
Definition io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1427
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:4117
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1468
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1422
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:954
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4353
VALUE rb_eSignal
SignalException exception.
Definition error.c:1425
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2097
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:104
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_cThread
Thread class.
Definition vm.c:530
VALUE rb_cModule
Module class.
Definition object.c:67
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3690
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:865
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:845
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1787
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1462
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3919
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1446
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3568
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2761
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:3000
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1376
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition thread.c:2702
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1408
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:2912
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4799
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1429
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:2903
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:2856
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1383
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4794
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2979
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3841
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3716
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1477
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:2865
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1452
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:2000
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2949
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1844
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1335
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:293
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1887
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12489
ID rb_to_id(VALUE str)
Definition string.c:12479
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:190
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:5950
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:5963
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:5922
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1534
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1899
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition thread.h:73
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1673
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1354
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2493
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:449
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5548
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:753
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:228
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:392
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:189
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:411
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4330
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:62
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Definition method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:295
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:301
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:283
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:289
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376