Ruby 3.5.0dev (2025-04-03 revision 1dddc6c78b5f6dc6ae18ee04ebe44abfce3b0433)
thread.c (1dddc6c78b5f6dc6ae18ee04ebe44abfce3b0433)
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "ruby/debug.h"
93#include "ruby/io.h"
94#include "ruby/thread.h"
95#include "ruby/thread_native.h"
96#include "timev.h"
97#include "vm_core.h"
98#include "ractor_core.h"
99#include "vm_debug.h"
100#include "vm_sync.h"
101
102#ifndef USE_NATIVE_THREAD_PRIORITY
103#define USE_NATIVE_THREAD_PRIORITY 0
104#define RUBY_THREAD_PRIORITY_MAX 3
105#define RUBY_THREAD_PRIORITY_MIN -3
106#endif
107
108static VALUE rb_cThreadShield;
109static VALUE cThGroup;
110
111static VALUE sym_immediate;
112static VALUE sym_on_blocking;
113static VALUE sym_never;
114
115static uint32_t thread_default_quantum_ms = 100;
116
117#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
118#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
119
120static inline VALUE
121rb_thread_local_storage(VALUE thread)
122{
123 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
124 rb_ivar_set(thread, idLocals, rb_hash_new());
125 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
126 }
127 return rb_ivar_get(thread, idLocals);
128}
129
130enum SLEEP_FLAGS {
131 SLEEP_DEADLOCKABLE = 0x01,
132 SLEEP_SPURIOUS_CHECK = 0x02,
133 SLEEP_ALLOW_SPURIOUS = 0x04,
134 SLEEP_NO_CHECKINTS = 0x08,
135};
136
137static void sleep_forever(rb_thread_t *th, unsigned int fl);
138static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
139
140static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
141static int rb_threadptr_dead(rb_thread_t *th);
142static void rb_check_deadlock(rb_ractor_t *r);
143static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
144static const char *thread_status_name(rb_thread_t *th, int detail);
145static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
146NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
147MAYBE_UNUSED(static int consume_communication_pipe(int fd));
148
149static volatile int system_working = 1;
150static rb_internal_thread_specific_key_t specific_key_count;
151
153 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
154 rb_thread_t *th;
155 int fd;
156 struct rb_io_close_wait_list *busy;
157};
158
159/********************************************************************************/
160
161#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
162
164 enum rb_thread_status prev_status;
165};
166
167static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
168static void unblock_function_clear(rb_thread_t *th);
169
170static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
171 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
172static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
173
174#define THREAD_BLOCKING_BEGIN(th) do { \
175 struct rb_thread_sched * const sched = TH_SCHED(th); \
176 RB_VM_SAVE_MACHINE_CONTEXT(th); \
177 thread_sched_to_waiting((sched), (th));
178
179#define THREAD_BLOCKING_END(th) \
180 thread_sched_to_running((sched), (th)); \
181 rb_ractor_thread_switch(th->ractor, th); \
182} while(0)
183
184#ifdef __GNUC__
185#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
186#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
187#else
188#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
189#endif
190#else
191#define only_if_constant(expr, notconst) notconst
192#endif
193#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
194 struct rb_blocking_region_buffer __region; \
195 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
196 /* always return true unless fail_if_interrupted */ \
197 !only_if_constant(fail_if_interrupted, TRUE)) { \
198 /* Important that this is inlined into the macro, and not part of \
199 * blocking_region_begin - see bug #20493 */ \
200 RB_VM_SAVE_MACHINE_CONTEXT(th); \
201 thread_sched_to_waiting(TH_SCHED(th), th); \
202 exec; \
203 blocking_region_end(th, &__region); \
204 }; \
205} while(0)
206
207/*
208 * returns true if this thread was spuriously interrupted, false otherwise
209 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
210 */
211#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
212static inline int
213vm_check_ints_blocking(rb_execution_context_t *ec)
214{
215 rb_thread_t *th = rb_ec_thread_ptr(ec);
216
217 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
218 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
219 }
220 else {
221 th->pending_interrupt_queue_checked = 0;
222 RUBY_VM_SET_INTERRUPT(ec);
223 }
224 return rb_threadptr_execute_interrupts(th, 1);
225}
226
227int
228rb_vm_check_ints_blocking(rb_execution_context_t *ec)
229{
230 return vm_check_ints_blocking(ec);
231}
232
233/*
234 * poll() is supported by many OSes, but so far Linux is the only
235 * one we know of that supports using poll() in all places select()
236 * would work.
237 */
238#if defined(HAVE_POLL)
239# if defined(__linux__)
240# define USE_POLL
241# endif
242# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
243# define USE_POLL
244 /* FreeBSD does not set POLLOUT when POLLHUP happens */
245# define POLLERR_SET (POLLHUP | POLLERR)
246# endif
247#endif
248
249static void
250timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
251 const struct timeval *timeout)
252{
253 if (timeout) {
254 *rel = rb_timeval2hrtime(timeout);
255 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
256 *to = rel;
257 }
258 else {
259 *to = 0;
260 }
261}
262
263MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
264MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
265MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
266
267#include THREAD_IMPL_SRC
268
269/*
270 * TODO: somebody with win32 knowledge should be able to get rid of
271 * timer-thread by busy-waiting on signals. And it should be possible
272 * to make the GVL in thread_pthread.c be platform-independent.
273 */
274#ifndef BUSY_WAIT_SIGNALS
275# define BUSY_WAIT_SIGNALS (0)
276#endif
277
278#ifndef USE_EVENTFD
279# define USE_EVENTFD (0)
280#endif
281
282#include "thread_sync.c"
283
284void
285rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
286{
288}
289
290void
291rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
292{
294}
295
296void
297rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
298{
300}
301
302void
303rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
304{
306}
307
308static int
309unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
310{
311 do {
312 if (fail_if_interrupted) {
313 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
314 return FALSE;
315 }
316 }
317 else {
318 RUBY_VM_CHECK_INTS(th->ec);
319 }
320
321 rb_native_mutex_lock(&th->interrupt_lock);
322 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
323 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
324
325 VM_ASSERT(th->unblock.func == NULL);
326
327 th->unblock.func = func;
328 th->unblock.arg = arg;
329 rb_native_mutex_unlock(&th->interrupt_lock);
330
331 return TRUE;
332}
333
334static void
335unblock_function_clear(rb_thread_t *th)
336{
337 rb_native_mutex_lock(&th->interrupt_lock);
338 th->unblock.func = 0;
339 rb_native_mutex_unlock(&th->interrupt_lock);
340}
341
342static void
343threadptr_interrupt_locked(rb_thread_t *th, bool trap)
344{
345 // th->interrupt_lock should be acquired here
346
347 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
348
349 if (trap) {
350 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
351 }
352 else {
353 RUBY_VM_SET_INTERRUPT(th->ec);
354 }
355
356 if (th->unblock.func != NULL) {
357 (th->unblock.func)(th->unblock.arg);
358 }
359 else {
360 /* none */
361 }
362}
363
364static void
365threadptr_interrupt(rb_thread_t *th, int trap)
366{
367 rb_native_mutex_lock(&th->interrupt_lock);
368 {
369 threadptr_interrupt_locked(th, trap);
370 }
371 rb_native_mutex_unlock(&th->interrupt_lock);
372}
373
374void
375rb_threadptr_interrupt(rb_thread_t *th)
376{
377 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
378 threadptr_interrupt(th, false);
379}
380
381static void
382threadptr_trap_interrupt(rb_thread_t *th)
383{
384 threadptr_interrupt(th, true);
385}
386
387static void
388terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
389{
390 rb_thread_t *th = 0;
391
392 ccan_list_for_each(&r->threads.set, th, lt_node) {
393 if (th != main_thread) {
394 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
395
396 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
397 rb_threadptr_interrupt(th);
398
399 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
400 }
401 else {
402 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
403 }
404 }
405}
406
407static void
408rb_threadptr_join_list_wakeup(rb_thread_t *thread)
409{
410 while (thread->join_list) {
411 struct rb_waiting_list *join_list = thread->join_list;
412
413 // Consume the entry from the join list:
414 thread->join_list = join_list->next;
415
416 rb_thread_t *target_thread = join_list->thread;
417
418 if (target_thread->scheduler != Qnil && join_list->fiber) {
419 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
420 }
421 else {
422 rb_threadptr_interrupt(target_thread);
423
424 switch (target_thread->status) {
425 case THREAD_STOPPED:
426 case THREAD_STOPPED_FOREVER:
427 target_thread->status = THREAD_RUNNABLE;
428 break;
429 default:
430 break;
431 }
432 }
433 }
434}
435
436void
437rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
438{
439 while (th->keeping_mutexes) {
440 rb_mutex_t *mutex = th->keeping_mutexes;
441 th->keeping_mutexes = mutex->next_mutex;
442
443 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
444
445 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
446 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
447 }
448}
449
450void
451rb_thread_terminate_all(rb_thread_t *th)
452{
453 rb_ractor_t *cr = th->ractor;
454 rb_execution_context_t * volatile ec = th->ec;
455 volatile int sleeping = 0;
456
457 if (cr->threads.main != th) {
458 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
459 (void *)cr->threads.main, (void *)th);
460 }
461
462 /* unlock all locking mutexes */
463 rb_threadptr_unlock_all_locking_mutexes(th);
464
465 EC_PUSH_TAG(ec);
466 if (EC_EXEC_TAG() == TAG_NONE) {
467 retry:
468 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
469
470 terminate_all(cr, th);
471
472 while (rb_ractor_living_thread_num(cr) > 1) {
473 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
474 /*q
475 * Thread exiting routine in thread_start_func_2 notify
476 * me when the last sub-thread exit.
477 */
478 sleeping = 1;
479 native_sleep(th, &rel);
480 RUBY_VM_CHECK_INTS_BLOCKING(ec);
481 sleeping = 0;
482 }
483 }
484 else {
485 /*
486 * When caught an exception (e.g. Ctrl+C), let's broadcast
487 * kill request again to ensure killing all threads even
488 * if they are blocked on sleep, mutex, etc.
489 */
490 if (sleeping) {
491 sleeping = 0;
492 goto retry;
493 }
494 }
495 EC_POP_TAG();
496}
497
498void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
499static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
500
501static void
502thread_cleanup_func_before_exec(void *th_ptr)
503{
504 rb_thread_t *th = th_ptr;
505 th->status = THREAD_KILLED;
506
507 // The thread stack doesn't exist in the forked process:
508 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
509
510 threadptr_interrupt_exec_cleanup(th);
511 rb_threadptr_root_fiber_terminate(th);
512}
513
514static void
515thread_cleanup_func(void *th_ptr, int atfork)
516{
517 rb_thread_t *th = th_ptr;
518
519 th->locking_mutex = Qfalse;
520 thread_cleanup_func_before_exec(th_ptr);
521
522 /*
523 * Unfortunately, we can't release native threading resource at fork
524 * because libc may have unstable locking state therefore touching
525 * a threading resource may cause a deadlock.
526 */
527 if (atfork) {
528 th->nt = NULL;
529 return;
530 }
531
532 rb_native_mutex_destroy(&th->interrupt_lock);
533}
534
535static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
536static VALUE rb_thread_to_s(VALUE thread);
537
538void
539ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
540{
541 native_thread_init_stack(th, local_in_parent_frame);
542}
543
544const VALUE *
545rb_vm_proc_local_ep(VALUE proc)
546{
547 const VALUE *ep = vm_proc_ep(proc);
548
549 if (ep) {
550 return rb_vm_ep_local_ep(ep);
551 }
552 else {
553 return NULL;
554 }
555}
556
557// for ractor, defined in vm.c
558VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
559 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
560
561static VALUE
562thread_do_start_proc(rb_thread_t *th)
563{
564 VALUE args = th->invoke_arg.proc.args;
565 const VALUE *args_ptr;
566 int args_len;
567 VALUE procval = th->invoke_arg.proc.proc;
568 rb_proc_t *proc;
569 GetProcPtr(procval, proc);
570
571 th->ec->errinfo = Qnil;
572 th->ec->root_lep = rb_vm_proc_local_ep(procval);
573 th->ec->root_svar = Qfalse;
574
575 vm_check_ints_blocking(th->ec);
576
577 if (th->invoke_type == thread_invoke_type_ractor_proc) {
578 VALUE self = rb_ractor_self(th->ractor);
579 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
580
581 VM_ASSERT(FIXNUM_P(args));
582 args_len = FIX2INT(args);
583 args_ptr = ALLOCA_N(VALUE, args_len);
584 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
585 vm_check_ints_blocking(th->ec);
586
587 return rb_vm_invoke_proc_with_self(
588 th->ec, proc, self,
589 args_len, args_ptr,
590 th->invoke_arg.proc.kw_splat,
591 VM_BLOCK_HANDLER_NONE
592 );
593 }
594 else {
595 args_len = RARRAY_LENINT(args);
596 if (args_len < 8) {
597 /* free proc.args if the length is enough small */
598 args_ptr = ALLOCA_N(VALUE, args_len);
599 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
600 th->invoke_arg.proc.args = Qnil;
601 }
602 else {
603 args_ptr = RARRAY_CONST_PTR(args);
604 }
605
606 vm_check_ints_blocking(th->ec);
607
608 return rb_vm_invoke_proc(
609 th->ec, proc,
610 args_len, args_ptr,
611 th->invoke_arg.proc.kw_splat,
612 VM_BLOCK_HANDLER_NONE
613 );
614 }
615}
616
617static VALUE
618thread_do_start(rb_thread_t *th)
619{
620 native_set_thread_name(th);
621 VALUE result = Qundef;
622
623 switch (th->invoke_type) {
624 case thread_invoke_type_proc:
625 result = thread_do_start_proc(th);
626 break;
627
628 case thread_invoke_type_ractor_proc:
629 result = thread_do_start_proc(th);
630 rb_ractor_atexit(th->ec, result);
631 break;
632
633 case thread_invoke_type_func:
634 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
635 break;
636
637 case thread_invoke_type_none:
638 rb_bug("unreachable");
639 }
640
641 return result;
642}
643
644void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
645
646static int
647thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
648{
649 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
650 VM_ASSERT(th != th->vm->ractor.main_thread);
651
652 enum ruby_tag_type state;
653 VALUE errinfo = Qnil;
654 rb_thread_t *ractor_main_th = th->ractor->threads.main;
655
656 // setup ractor
657 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
658 RB_VM_LOCK();
659 {
660 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
661 rb_ractor_t *r = th->ractor;
662 r->r_stdin = rb_io_prep_stdin();
663 r->r_stdout = rb_io_prep_stdout();
664 r->r_stderr = rb_io_prep_stderr();
665 }
666 RB_VM_UNLOCK();
667 }
668
669 // Ensure that we are not joinable.
670 VM_ASSERT(UNDEF_P(th->value));
671
672 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
673 VALUE result = Qundef;
674
675 EC_PUSH_TAG(th->ec);
676
677 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
678 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
679
680 result = thread_do_start(th);
681 }
682
683 if (!fiber_scheduler_closed) {
684 fiber_scheduler_closed = 1;
686 }
687
688 if (!event_thread_end_hooked) {
689 event_thread_end_hooked = 1;
690 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
691 }
692
693 if (state == TAG_NONE) {
694 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
695 th->value = result;
696 } else {
697 errinfo = th->ec->errinfo;
698
699 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
700 if (!NIL_P(exc)) errinfo = exc;
701
702 if (state == TAG_FATAL) {
703 if (th->invoke_type == thread_invoke_type_ractor_proc) {
704 rb_ractor_atexit(th->ec, Qnil);
705 }
706 /* fatal error within this thread, need to stop whole script */
707 }
708 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
709 /* exit on main_thread. */
710 }
711 else {
712 if (th->report_on_exception) {
713 VALUE mesg = rb_thread_to_s(th->self);
714 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
715 rb_write_error_str(mesg);
716 rb_ec_error_print(th->ec, errinfo);
717 }
718
719 if (th->invoke_type == thread_invoke_type_ractor_proc) {
720 rb_ractor_atexit_exception(th->ec);
721 }
722
723 if (th->vm->thread_abort_on_exception ||
724 th->abort_on_exception || RTEST(ruby_debug)) {
725 /* exit on main_thread */
726 }
727 else {
728 errinfo = Qnil;
729 }
730 }
731 th->value = Qnil;
732 }
733
734 // The thread is effectively finished and can be joined.
735 VM_ASSERT(!UNDEF_P(th->value));
736
737 rb_threadptr_join_list_wakeup(th);
738 rb_threadptr_unlock_all_locking_mutexes(th);
739
740 if (th->invoke_type == thread_invoke_type_ractor_proc) {
741 rb_thread_terminate_all(th);
742 rb_ractor_teardown(th->ec);
743 }
744
745 th->status = THREAD_KILLED;
746 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
747
748 if (th->vm->ractor.main_thread == th) {
749 ruby_stop(0);
750 }
751
752 if (RB_TYPE_P(errinfo, T_OBJECT)) {
753 /* treat with normal error object */
754 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
755 }
756
757 EC_POP_TAG();
758
759 rb_ec_clear_current_thread_trace_func(th->ec);
760
761 /* locking_mutex must be Qfalse */
762 if (th->locking_mutex != Qfalse) {
763 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
764 (void *)th, th->locking_mutex);
765 }
766
767 if (ractor_main_th->status == THREAD_KILLED &&
768 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
769 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
770 rb_threadptr_interrupt(ractor_main_th);
771 }
772
773 rb_check_deadlock(th->ractor);
774
775 rb_fiber_close(th->ec->fiber_ptr);
776
777 thread_cleanup_func(th, FALSE);
778 VM_ASSERT(th->ec->vm_stack == NULL);
779
780 if (th->invoke_type == thread_invoke_type_ractor_proc) {
781 // after rb_ractor_living_threads_remove()
782 // GC will happen anytime and this ractor can be collected (and destroy GVL).
783 // So gvl_release() should be before it.
784 thread_sched_to_dead(TH_SCHED(th), th);
785 rb_ractor_living_threads_remove(th->ractor, th);
786 }
787 else {
788 rb_ractor_living_threads_remove(th->ractor, th);
789 thread_sched_to_dead(TH_SCHED(th), th);
790 }
791
792 return 0;
793}
796 enum thread_invoke_type type;
797
798 // for normal proc thread
799 VALUE args;
800 VALUE proc;
801
802 // for ractor
803 rb_ractor_t *g;
804
805 // for func
806 VALUE (*fn)(void *);
807};
808
809static void thread_specific_storage_alloc(rb_thread_t *th);
810
811static VALUE
812thread_create_core(VALUE thval, struct thread_create_params *params)
813{
814 rb_execution_context_t *ec = GET_EC();
815 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
816 int err;
817
818 thread_specific_storage_alloc(th);
819
820 if (OBJ_FROZEN(current_th->thgroup)) {
821 rb_raise(rb_eThreadError,
822 "can't start a new thread (frozen ThreadGroup)");
823 }
824
825 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
826
827 switch (params->type) {
828 case thread_invoke_type_proc:
829 th->invoke_type = thread_invoke_type_proc;
830 th->invoke_arg.proc.args = params->args;
831 th->invoke_arg.proc.proc = params->proc;
832 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
833 break;
834
835 case thread_invoke_type_ractor_proc:
836#if RACTOR_CHECK_MODE > 0
837 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
838#endif
839 th->invoke_type = thread_invoke_type_ractor_proc;
840 th->ractor = params->g;
841 th->ractor->threads.main = th;
842 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
843 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
844 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
845 rb_ractor_send_parameters(ec, params->g, params->args);
846 break;
847
848 case thread_invoke_type_func:
849 th->invoke_type = thread_invoke_type_func;
850 th->invoke_arg.func.func = params->fn;
851 th->invoke_arg.func.arg = (void *)params->args;
852 break;
853
854 default:
855 rb_bug("unreachable");
856 }
857
858 th->priority = current_th->priority;
859 th->thgroup = current_th->thgroup;
860
861 th->pending_interrupt_queue = rb_ary_hidden_new(0);
862 th->pending_interrupt_queue_checked = 0;
863 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
864 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
865
866 rb_native_mutex_initialize(&th->interrupt_lock);
867
868 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
869
870 rb_ractor_living_threads_insert(th->ractor, th);
871
872 /* kick thread */
873 err = native_thread_create(th);
874 if (err) {
875 th->status = THREAD_KILLED;
876 rb_ractor_living_threads_remove(th->ractor, th);
877 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
878 }
879 return thval;
880}
881
882#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
883
884/*
885 * call-seq:
886 * Thread.new { ... } -> thread
887 * Thread.new(*args, &proc) -> thread
888 * Thread.new(*args) { |args| ... } -> thread
889 *
890 * Creates a new thread executing the given block.
891 *
892 * Any +args+ given to ::new will be passed to the block:
893 *
894 * arr = []
895 * a, b, c = 1, 2, 3
896 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
897 * arr #=> [1, 2, 3]
898 *
899 * A ThreadError exception is raised if ::new is called without a block.
900 *
901 * If you're going to subclass Thread, be sure to call super in your
902 * +initialize+ method, otherwise a ThreadError will be raised.
903 */
904static VALUE
905thread_s_new(int argc, VALUE *argv, VALUE klass)
906{
907 rb_thread_t *th;
908 VALUE thread = rb_thread_alloc(klass);
909
910 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
911 rb_raise(rb_eThreadError, "can't alloc thread");
912 }
913
914 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
915 th = rb_thread_ptr(thread);
916 if (!threadptr_initialized(th)) {
917 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
918 klass);
919 }
920 return thread;
921}
922
923/*
924 * call-seq:
925 * Thread.start([args]*) {|args| block } -> thread
926 * Thread.fork([args]*) {|args| block } -> thread
927 *
928 * Basically the same as ::new. However, if class Thread is subclassed, then
929 * calling +start+ in that subclass will not invoke the subclass's
930 * +initialize+ method.
931 */
932
933static VALUE
934thread_start(VALUE klass, VALUE args)
935{
936 struct thread_create_params params = {
937 .type = thread_invoke_type_proc,
938 .args = args,
939 .proc = rb_block_proc(),
940 };
941 return thread_create_core(rb_thread_alloc(klass), &params);
942}
943
944static VALUE
945threadptr_invoke_proc_location(rb_thread_t *th)
946{
947 if (th->invoke_type == thread_invoke_type_proc) {
948 return rb_proc_location(th->invoke_arg.proc.proc);
949 }
950 else {
951 return Qnil;
952 }
953}
954
955/* :nodoc: */
956static VALUE
957thread_initialize(VALUE thread, VALUE args)
958{
959 rb_thread_t *th = rb_thread_ptr(thread);
960
961 if (!rb_block_given_p()) {
962 rb_raise(rb_eThreadError, "must be called with a block");
963 }
964 else if (th->invoke_type != thread_invoke_type_none) {
965 VALUE loc = threadptr_invoke_proc_location(th);
966 if (!NIL_P(loc)) {
967 rb_raise(rb_eThreadError,
968 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
969 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
970 }
971 else {
972 rb_raise(rb_eThreadError, "already initialized thread");
973 }
974 }
975 else {
976 struct thread_create_params params = {
977 .type = thread_invoke_type_proc,
978 .args = args,
979 .proc = rb_block_proc(),
980 };
981 return thread_create_core(thread, &params);
982 }
983}
984
985VALUE
986rb_thread_create(VALUE (*fn)(void *), void *arg)
987{
988 struct thread_create_params params = {
989 .type = thread_invoke_type_func,
990 .fn = fn,
991 .args = (VALUE)arg,
992 };
993 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
994}
995
996VALUE
997rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
998{
999 struct thread_create_params params = {
1000 .type = thread_invoke_type_ractor_proc,
1001 .g = r,
1002 .args = args,
1003 .proc = proc,
1004 };
1005 return thread_create_core(rb_thread_alloc(rb_cThread), &params);;
1006}
1007
1009struct join_arg {
1010 struct rb_waiting_list *waiter;
1011 rb_thread_t *target;
1012 VALUE timeout;
1013 rb_hrtime_t *limit;
1014};
1015
1016static VALUE
1017remove_from_join_list(VALUE arg)
1018{
1019 struct join_arg *p = (struct join_arg *)arg;
1020 rb_thread_t *target_thread = p->target;
1021
1022 if (target_thread->status != THREAD_KILLED) {
1023 struct rb_waiting_list **join_list = &target_thread->join_list;
1024
1025 while (*join_list) {
1026 if (*join_list == p->waiter) {
1027 *join_list = (*join_list)->next;
1028 break;
1029 }
1030
1031 join_list = &(*join_list)->next;
1032 }
1033 }
1034
1035 return Qnil;
1036}
1037
1038static int
1039thread_finished(rb_thread_t *th)
1040{
1041 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1042}
1043
1044static VALUE
1045thread_join_sleep(VALUE arg)
1046{
1047 struct join_arg *p = (struct join_arg *)arg;
1048 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1049 rb_hrtime_t end = 0, *limit = p->limit;
1050
1051 if (limit) {
1052 end = rb_hrtime_add(*limit, rb_hrtime_now());
1053 }
1054
1055 while (!thread_finished(target_th)) {
1056 VALUE scheduler = rb_fiber_scheduler_current();
1057
1058 if (scheduler != Qnil) {
1059 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1060 // Check if the target thread is finished after blocking:
1061 if (thread_finished(target_th)) break;
1062 // Otherwise, a timeout occurred:
1063 else return Qfalse;
1064 }
1065 else if (!limit) {
1066 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1067 }
1068 else {
1069 if (hrtime_update_expire(limit, end)) {
1070 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1071 return Qfalse;
1072 }
1073 th->status = THREAD_STOPPED;
1074 native_sleep(th, limit);
1075 }
1076 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1077 th->status = THREAD_RUNNABLE;
1078
1079 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1080 }
1081
1082 return Qtrue;
1083}
1084
1085static VALUE
1086thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1087{
1088 rb_execution_context_t *ec = GET_EC();
1089 rb_thread_t *th = ec->thread_ptr;
1090 rb_fiber_t *fiber = ec->fiber_ptr;
1091
1092 if (th == target_th) {
1093 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1094 }
1095
1096 if (th->ractor->threads.main == target_th) {
1097 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1098 }
1099
1100 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1101
1102 if (target_th->status != THREAD_KILLED) {
1103 struct rb_waiting_list waiter;
1104 waiter.next = target_th->join_list;
1105 waiter.thread = th;
1106 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1107 target_th->join_list = &waiter;
1108
1109 struct join_arg arg;
1110 arg.waiter = &waiter;
1111 arg.target = target_th;
1112 arg.timeout = timeout;
1113 arg.limit = limit;
1114
1115 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1116 return Qnil;
1117 }
1118 }
1119
1120 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1121
1122 if (target_th->ec->errinfo != Qnil) {
1123 VALUE err = target_th->ec->errinfo;
1124
1125 if (FIXNUM_P(err)) {
1126 switch (err) {
1127 case INT2FIX(TAG_FATAL):
1128 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1129
1130 /* OK. killed. */
1131 break;
1132 default:
1133 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1134 }
1135 }
1136 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1137 rb_bug("thread_join: THROW_DATA should not reach here.");
1138 }
1139 else {
1140 /* normal exception */
1141 rb_exc_raise(err);
1142 }
1143 }
1144 return target_th->self;
1145}
1146
1147/*
1148 * call-seq:
1149 * thr.join -> thr
1150 * thr.join(limit) -> thr
1151 *
1152 * The calling thread will suspend execution and run this +thr+.
1153 *
1154 * Does not return until +thr+ exits or until the given +limit+ seconds have
1155 * passed.
1156 *
1157 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1158 * returned.
1159 *
1160 * Any threads not joined will be killed when the main program exits.
1161 *
1162 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1163 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1164 * will be processed at this time.
1165 *
1166 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1167 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1168 * x.join # Let thread x finish, thread a will be killed on exit.
1169 * #=> "axyz"
1170 *
1171 * The following example illustrates the +limit+ parameter.
1172 *
1173 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1174 * puts "Waiting" until y.join(0.15)
1175 *
1176 * This will produce:
1177 *
1178 * tick...
1179 * Waiting
1180 * tick...
1181 * Waiting
1182 * tick...
1183 * tick...
1184 */
1185
1186static VALUE
1187thread_join_m(int argc, VALUE *argv, VALUE self)
1188{
1189 VALUE timeout = Qnil;
1190 rb_hrtime_t rel = 0, *limit = 0;
1191
1192 if (rb_check_arity(argc, 0, 1)) {
1193 timeout = argv[0];
1194 }
1195
1196 // Convert the timeout eagerly, so it's always converted and deterministic
1197 /*
1198 * This supports INFINITY and negative values, so we can't use
1199 * rb_time_interval right now...
1200 */
1201 if (NIL_P(timeout)) {
1202 /* unlimited */
1203 }
1204 else if (FIXNUM_P(timeout)) {
1205 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1206 limit = &rel;
1207 }
1208 else {
1209 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1210 }
1211
1212 return thread_join(rb_thread_ptr(self), timeout, limit);
1213}
1214
1215/*
1216 * call-seq:
1217 * thr.value -> obj
1218 *
1219 * Waits for +thr+ to complete, using #join, and returns its value or raises
1220 * the exception which terminated the thread.
1221 *
1222 * a = Thread.new { 2 + 2 }
1223 * a.value #=> 4
1224 *
1225 * b = Thread.new { raise 'something went wrong' }
1226 * b.value #=> RuntimeError: something went wrong
1227 */
1228
1229static VALUE
1230thread_value(VALUE self)
1231{
1232 rb_thread_t *th = rb_thread_ptr(self);
1233 thread_join(th, Qnil, 0);
1234 if (UNDEF_P(th->value)) {
1235 // If the thread is dead because we forked th->value is still Qundef.
1236 return Qnil;
1237 }
1238 return th->value;
1239}
1240
1241/*
1242 * Thread Scheduling
1243 */
1244
1245static void
1246getclockofday(struct timespec *ts)
1247{
1248#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1249 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1250 return;
1251#endif
1252 rb_timespec_now(ts);
1253}
1254
1255/*
1256 * Don't inline this, since library call is already time consuming
1257 * and we don't want "struct timespec" on stack too long for GC
1258 */
1259NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1260rb_hrtime_t
1261rb_hrtime_now(void)
1262{
1263 struct timespec ts;
1264
1265 getclockofday(&ts);
1266 return rb_timespec2hrtime(&ts);
1267}
1268
1269/*
1270 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1271 * being uninitialized, maybe other versions, too.
1272 */
1273COMPILER_WARNING_PUSH
1274#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1275COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1276#endif
1277#ifndef PRIu64
1278#define PRIu64 PRI_64_PREFIX "u"
1279#endif
1280/*
1281 * @end is the absolute time when @ts is set to expire
1282 * Returns true if @end has past
1283 * Updates @ts and returns false otherwise
1284 */
1285static int
1286hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1287{
1288 rb_hrtime_t now = rb_hrtime_now();
1289
1290 if (now > end) return 1;
1291
1292 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1293
1294 *timeout = end - now;
1295 return 0;
1296}
1297COMPILER_WARNING_POP
1298
1299static int
1300sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1301{
1302 enum rb_thread_status prev_status = th->status;
1303 int woke;
1304 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1305
1306 th->status = THREAD_STOPPED;
1307 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1308 while (th->status == THREAD_STOPPED) {
1309 native_sleep(th, &rel);
1310 woke = vm_check_ints_blocking(th->ec);
1311 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1312 break;
1313 if (hrtime_update_expire(&rel, end))
1314 break;
1315 woke = 1;
1316 }
1317 th->status = prev_status;
1318 return woke;
1319}
1320
1321static int
1322sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1323{
1324 enum rb_thread_status prev_status = th->status;
1325 int woke;
1326 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1327
1328 th->status = THREAD_STOPPED;
1329 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1330 while (th->status == THREAD_STOPPED) {
1331 native_sleep(th, &rel);
1332 woke = vm_check_ints_blocking(th->ec);
1333 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1334 break;
1335 if (hrtime_update_expire(&rel, end))
1336 break;
1337 woke = 1;
1338 }
1339 th->status = prev_status;
1340 return woke;
1341}
1342
1343static void
1344sleep_forever(rb_thread_t *th, unsigned int fl)
1345{
1346 enum rb_thread_status prev_status = th->status;
1347 enum rb_thread_status status;
1348 int woke;
1349
1350 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1351 th->status = status;
1352
1353 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1354
1355 while (th->status == status) {
1356 if (fl & SLEEP_DEADLOCKABLE) {
1357 rb_ractor_sleeper_threads_inc(th->ractor);
1358 rb_check_deadlock(th->ractor);
1359 }
1360 {
1361 native_sleep(th, 0);
1362 }
1363 if (fl & SLEEP_DEADLOCKABLE) {
1364 rb_ractor_sleeper_threads_dec(th->ractor);
1365 }
1366 if (fl & SLEEP_ALLOW_SPURIOUS) {
1367 break;
1368 }
1369
1370 woke = vm_check_ints_blocking(th->ec);
1371
1372 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1373 break;
1374 }
1375 }
1376 th->status = prev_status;
1377}
1378
1379void
1381{
1382 RUBY_DEBUG_LOG("forever");
1383 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1384}
1385
1386void
1388{
1389 RUBY_DEBUG_LOG("deadly");
1390 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1391}
1392
1393static void
1394rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1395{
1396 VALUE scheduler = rb_fiber_scheduler_current();
1397 if (scheduler != Qnil) {
1398 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1399 }
1400 else {
1401 RUBY_DEBUG_LOG("...");
1402 if (end) {
1403 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1404 }
1405 else {
1406 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1407 }
1408 }
1409}
1410
1411void
1412rb_thread_wait_for(struct timeval time)
1413{
1414 rb_thread_t *th = GET_THREAD();
1415
1416 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1417}
1418
1419void
1420rb_ec_check_ints(rb_execution_context_t *ec)
1421{
1422 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1423}
1424
1425/*
1426 * CAUTION: This function causes thread switching.
1427 * rb_thread_check_ints() check ruby's interrupts.
1428 * some interrupt needs thread switching/invoke handlers,
1429 * and so on.
1430 */
1431
1432void
1434{
1435 rb_ec_check_ints(GET_EC());
1436}
1437
1438/*
1439 * Hidden API for tcl/tk wrapper.
1440 * There is no guarantee to perpetuate it.
1441 */
1442int
1443rb_thread_check_trap_pending(void)
1444{
1445 return rb_signal_buff_size() != 0;
1446}
1447
1448/* This function can be called in blocking region. */
1451{
1452 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1453}
1454
1455void
1456rb_thread_sleep(int sec)
1457{
1459}
1460
1461static void
1462rb_thread_schedule_limits(uint32_t limits_us)
1463{
1464 if (!rb_thread_alone()) {
1465 rb_thread_t *th = GET_THREAD();
1466 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1467
1468 if (th->running_time_us >= limits_us) {
1469 RUBY_DEBUG_LOG("switch %s", "start");
1470
1471 RB_VM_SAVE_MACHINE_CONTEXT(th);
1472 thread_sched_yield(TH_SCHED(th), th);
1473 rb_ractor_thread_switch(th->ractor, th);
1474
1475 RUBY_DEBUG_LOG("switch %s", "done");
1476 }
1477 }
1478}
1479
1480void
1482{
1483 rb_thread_schedule_limits(0);
1484 RUBY_VM_CHECK_INTS(GET_EC());
1485}
1486
1487/* blocking region */
1488
1489static inline int
1490blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1491 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1492{
1493#ifdef RUBY_ASSERT_CRITICAL_SECTION
1494 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1495#endif
1496 VM_ASSERT(th == GET_THREAD());
1497
1498 region->prev_status = th->status;
1499 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1500 th->blocking_region_buffer = region;
1501 th->status = THREAD_STOPPED;
1502 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1503
1504 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1505 return TRUE;
1506 }
1507 else {
1508 return FALSE;
1509 }
1510}
1511
1512static inline void
1513blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1514{
1515 /* entry to ubf_list still permitted at this point, make it impossible: */
1516 unblock_function_clear(th);
1517 /* entry to ubf_list impossible at this point, so unregister is safe: */
1518 unregister_ubf_list(th);
1519
1520 thread_sched_to_running(TH_SCHED(th), th);
1521 rb_ractor_thread_switch(th->ractor, th);
1522
1523 th->blocking_region_buffer = 0;
1524 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1525 if (th->status == THREAD_STOPPED) {
1526 th->status = region->prev_status;
1527 }
1528
1529 RUBY_DEBUG_LOG("end");
1530
1531#ifndef _WIN32
1532 // GET_THREAD() clears WSAGetLastError()
1533 VM_ASSERT(th == GET_THREAD());
1534#endif
1535}
1536
1537void *
1538rb_nogvl(void *(*func)(void *), void *data1,
1539 rb_unblock_function_t *ubf, void *data2,
1540 int flags)
1541{
1542 if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1543 VALUE scheduler = rb_fiber_scheduler_current();
1544 if (scheduler != Qnil) {
1546
1547 VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1548
1549 if (!UNDEF_P(result)) {
1550 rb_errno_set(state.saved_errno);
1551 return state.result;
1552 }
1553 }
1554 }
1555
1556 void *val = 0;
1557 rb_execution_context_t *ec = GET_EC();
1558 rb_thread_t *th = rb_ec_thread_ptr(ec);
1559 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1560 bool is_main_thread = vm->ractor.main_thread == th;
1561 int saved_errno = 0;
1562
1563 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1564 ubf = ubf_select;
1565 data2 = th;
1566 }
1567 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1568 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1569 vm->ubf_async_safe = 1;
1570 }
1571 }
1572
1573 rb_vm_t *volatile saved_vm = vm;
1574 BLOCKING_REGION(th, {
1575 val = func(data1);
1576 saved_errno = rb_errno();
1577 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1578 vm = saved_vm;
1579
1580 if (is_main_thread) vm->ubf_async_safe = 0;
1581
1582 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1583 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1584 }
1585
1586 rb_errno_set(saved_errno);
1587
1588 return val;
1589}
1590
1591/*
1592 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1593 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1594 * without interrupt process.
1595 *
1596 * rb_thread_call_without_gvl() does:
1597 * (1) Check interrupts.
1598 * (2) release GVL.
1599 * Other Ruby threads may run in parallel.
1600 * (3) call func with data1
1601 * (4) acquire GVL.
1602 * Other Ruby threads can not run in parallel any more.
1603 * (5) Check interrupts.
1604 *
1605 * rb_thread_call_without_gvl2() does:
1606 * (1) Check interrupt and return if interrupted.
1607 * (2) release GVL.
1608 * (3) call func with data1 and a pointer to the flags.
1609 * (4) acquire GVL.
1610 *
1611 * If another thread interrupts this thread (Thread#kill, signal delivery,
1612 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1613 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1614 * toggling a cancellation flag, canceling the invocation of a call inside
1615 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1616 *
1617 * There are built-in ubfs and you can specify these ubfs:
1618 *
1619 * * RUBY_UBF_IO: ubf for IO operation
1620 * * RUBY_UBF_PROCESS: ubf for process operation
1621 *
1622 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1623 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1624 * provide proper ubf(), your program will not stop for Control+C or other
1625 * shutdown events.
1626 *
1627 * "Check interrupts" on above list means checking asynchronous
1628 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1629 * request, and so on) and calling corresponding procedures
1630 * (such as `trap' for signals, raise an exception for Thread#raise).
1631 * If `func()' finished and received interrupts, you may skip interrupt
1632 * checking. For example, assume the following func() it reads data from file.
1633 *
1634 * read_func(...) {
1635 * // (a) before read
1636 * read(buffer); // (b) reading
1637 * // (c) after read
1638 * }
1639 *
1640 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1641 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1642 * at (c), after *read* operation is completed, checking interrupts is harmful
1643 * because it causes irrevocable side-effect, the read data will vanish. To
1644 * avoid such problem, the `read_func()' should be used with
1645 * `rb_thread_call_without_gvl2()'.
1646 *
1647 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1648 * immediately. This function does not show when the execution was interrupted.
1649 * For example, there are 4 possible timing (a), (b), (c) and before calling
1650 * read_func(). You need to record progress of a read_func() and check
1651 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1652 * `rb_thread_check_ints()' correctly or your program can not process proper
1653 * process such as `trap' and so on.
1654 *
1655 * NOTE: You can not execute most of Ruby C API and touch Ruby
1656 * objects in `func()' and `ubf()', including raising an
1657 * exception, because current thread doesn't acquire GVL
1658 * (it causes synchronization problems). If you need to
1659 * call ruby functions either use rb_thread_call_with_gvl()
1660 * or read source code of C APIs and confirm safety by
1661 * yourself.
1662 *
1663 * NOTE: In short, this API is difficult to use safely. I recommend you
1664 * use other ways if you have. We lack experiences to use this API.
1665 * Please report your problem related on it.
1666 *
1667 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1668 * for a short running `func()'. Be sure to benchmark and use this
1669 * mechanism when `func()' consumes enough time.
1670 *
1671 * Safe C API:
1672 * * rb_thread_interrupted() - check interrupt flag
1673 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1674 * they will work without GVL, and may acquire GVL when GC is needed.
1675 */
1676void *
1677rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1678 rb_unblock_function_t *ubf, void *data2)
1679{
1680 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1681}
1682
1683void *
1684rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1685 rb_unblock_function_t *ubf, void *data2)
1686{
1687 return rb_nogvl(func, data1, ubf, data2, 0);
1688}
1689
1690static int
1691waitfd_to_waiting_flag(int wfd_event)
1692{
1693 return wfd_event << 1;
1694}
1695
1696static void
1697thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
1698{
1699 wfd->fd = fd;
1700 wfd->th = th;
1701 wfd->busy = NULL;
1702
1703 RB_VM_LOCK_ENTER();
1704 {
1705 ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
1706 }
1707 RB_VM_LOCK_LEAVE();
1708}
1709
1710static void
1711thread_io_wake_pending_closer(struct waiting_fd *wfd)
1712{
1713 bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
1714 if (has_waiter) {
1715 rb_mutex_lock(wfd->busy->wakeup_mutex);
1716 }
1717
1718 /* Needs to be protected with RB_VM_LOCK because we don't know if
1719 wfd is on the global list of pending FD ops or if it's on a
1720 struct rb_io_close_wait_list close-waiter. */
1721 RB_VM_LOCK_ENTER();
1722 ccan_list_del(&wfd->wfd_node);
1723 RB_VM_LOCK_LEAVE();
1724
1725 if (has_waiter) {
1726 rb_thread_t *th = rb_thread_ptr(wfd->busy->closing_thread);
1727 if (th->scheduler != Qnil) {
1728 rb_fiber_scheduler_unblock(th->scheduler, wfd->busy->closing_thread, wfd->busy->closing_fiber);
1729 } else {
1730 rb_thread_wakeup(wfd->busy->closing_thread);
1731 }
1732 rb_mutex_unlock(wfd->busy->wakeup_mutex);
1733 }
1734}
1735
1736static bool
1737thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1738{
1739#if defined(USE_MN_THREADS) && USE_MN_THREADS
1740 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1741#else
1742 return false;
1743#endif
1744}
1745
1746// true if need retry
1747static bool
1748thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1749{
1750#if defined(USE_MN_THREADS) && USE_MN_THREADS
1751 if (thread_io_mn_schedulable(th, events, timeout)) {
1752 rb_hrtime_t rel, *prel;
1753
1754 if (timeout) {
1755 rel = rb_timeval2hrtime(timeout);
1756 prel = &rel;
1757 }
1758 else {
1759 prel = NULL;
1760 }
1761
1762 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1763
1764 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1765 // timeout
1766 return false;
1767 }
1768 else {
1769 return true;
1770 }
1771 }
1772#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1773 return false;
1774}
1775
1776// assume read/write
1777static bool
1778blocking_call_retryable_p(int r, int eno)
1779{
1780 if (r != -1) return false;
1781
1782 switch (eno) {
1783 case EAGAIN:
1784#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1785 case EWOULDBLOCK:
1786#endif
1787 return true;
1788 default:
1789 return false;
1790 }
1791}
1792
1793bool
1794rb_thread_mn_schedulable(VALUE thval)
1795{
1796 rb_thread_t *th = rb_thread_ptr(thval);
1797 return th->mn_schedulable;
1798}
1799
1800VALUE
1801rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events)
1802{
1803 rb_execution_context_t *volatile ec = GET_EC();
1804 rb_thread_t *volatile th = rb_ec_thread_ptr(ec);
1805
1806 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), fd, events);
1807
1808 struct waiting_fd waiting_fd;
1809 volatile VALUE val = Qundef; /* shouldn't be used */
1810 volatile int saved_errno = 0;
1811 enum ruby_tag_type state;
1812 volatile bool prev_mn_schedulable = th->mn_schedulable;
1813 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1814
1815 // `errno` is only valid when there is an actual error - but we can't
1816 // extract that from the return value of `func` alone, so we clear any
1817 // prior `errno` value here so that we can later check if it was set by
1818 // `func` or not (as opposed to some previously set value).
1819 errno = 0;
1820
1821 thread_io_setup_wfd(th, fd, &waiting_fd);
1822 {
1823 EC_PUSH_TAG(ec);
1824 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1825 volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1826 retry:
1827 BLOCKING_REGION(waiting_fd.th, {
1828 val = func(data1);
1829 saved_errno = errno;
1830 }, ubf_select, waiting_fd.th, FALSE);
1831
1832 th = rb_ec_thread_ptr(ec);
1833 if (events &&
1834 blocking_call_retryable_p((int)val, saved_errno) &&
1835 thread_io_wait_events(th, fd, events, NULL)) {
1836 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1837 goto retry;
1838 }
1839 state = saved_state;
1840 }
1841 EC_POP_TAG();
1842
1843 th = rb_ec_thread_ptr(ec);
1844 th->mn_schedulable = prev_mn_schedulable;
1845 }
1846 /*
1847 * must be deleted before jump
1848 * this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
1849 */
1850 thread_io_wake_pending_closer(&waiting_fd);
1851
1852 if (state) {
1853 EC_JUMP_TAG(ec, state);
1854 }
1855 /* TODO: check func() */
1856 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1857
1858 // If the error was a timeout, we raise a specific exception for that:
1859 if (saved_errno == ETIMEDOUT) {
1860 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1861 }
1862
1863 errno = saved_errno;
1864
1865 return val;
1866}
1867
1868VALUE
1869rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1870{
1871 return rb_thread_io_blocking_call(func, data1, fd, 0);
1872}
1873
1874/*
1875 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1876 *
1877 * After releasing GVL using
1878 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1879 * methods. If you need to access Ruby you must use this function
1880 * rb_thread_call_with_gvl().
1881 *
1882 * This function rb_thread_call_with_gvl() does:
1883 * (1) acquire GVL.
1884 * (2) call passed function `func'.
1885 * (3) release GVL.
1886 * (4) return a value which is returned at (2).
1887 *
1888 * NOTE: You should not return Ruby object at (2) because such Object
1889 * will not be marked.
1890 *
1891 * NOTE: If an exception is raised in `func', this function DOES NOT
1892 * protect (catch) the exception. If you have any resources
1893 * which should free before throwing exception, you need use
1894 * rb_protect() in `func' and return a value which represents
1895 * exception was raised.
1896 *
1897 * NOTE: This function should not be called by a thread which was not
1898 * created as Ruby thread (created by Thread.new or so). In other
1899 * words, this function *DOES NOT* associate or convert a NON-Ruby
1900 * thread to a Ruby thread.
1901 */
1902void *
1903rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1904{
1905 rb_thread_t *th = ruby_thread_from_native();
1906 struct rb_blocking_region_buffer *brb;
1907 struct rb_unblock_callback prev_unblock;
1908 void *r;
1909
1910 if (th == 0) {
1911 /* Error has occurred, but we can't use rb_bug()
1912 * because this thread is not Ruby's thread.
1913 * What should we do?
1914 */
1915 bp();
1916 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1917 exit(EXIT_FAILURE);
1918 }
1919
1920 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1921 prev_unblock = th->unblock;
1922
1923 if (brb == 0) {
1924 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1925 }
1926
1927 blocking_region_end(th, brb);
1928 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1929 r = (*func)(data1);
1930 /* leave from Ruby world: You can not access Ruby values, etc. */
1931 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1932 RUBY_ASSERT_ALWAYS(released);
1933 RB_VM_SAVE_MACHINE_CONTEXT(th);
1934 thread_sched_to_waiting(TH_SCHED(th), th);
1935 return r;
1936}
1937
1938/*
1939 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1940 *
1941 ***
1942 *** This API is EXPERIMENTAL!
1943 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1944 ***
1945 */
1946
1947int
1948ruby_thread_has_gvl_p(void)
1949{
1950 rb_thread_t *th = ruby_thread_from_native();
1951
1952 if (th && th->blocking_region_buffer == 0) {
1953 return 1;
1954 }
1955 else {
1956 return 0;
1957 }
1958}
1959
1960/*
1961 * call-seq:
1962 * Thread.pass -> nil
1963 *
1964 * Give the thread scheduler a hint to pass execution to another thread.
1965 * A running thread may or may not switch, it depends on OS and processor.
1966 */
1967
1968static VALUE
1969thread_s_pass(VALUE klass)
1970{
1972 return Qnil;
1973}
1974
1975/*****************************************************/
1976
1977/*
1978 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1979 *
1980 * Async events such as an exception thrown by Thread#raise,
1981 * Thread#kill and thread termination (after main thread termination)
1982 * will be queued to th->pending_interrupt_queue.
1983 * - clear: clear the queue.
1984 * - enque: enqueue err object into queue.
1985 * - deque: dequeue err object from queue.
1986 * - active_p: return 1 if the queue should be checked.
1987 *
1988 * All rb_threadptr_pending_interrupt_* functions are called by
1989 * a GVL acquired thread, of course.
1990 * Note that all "rb_" prefix APIs need GVL to call.
1991 */
1992
1993void
1994rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1995{
1996 rb_ary_clear(th->pending_interrupt_queue);
1997}
1998
1999void
2000rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
2001{
2002 rb_ary_push(th->pending_interrupt_queue, v);
2003 th->pending_interrupt_queue_checked = 0;
2004}
2005
2006static void
2007threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2008{
2009 if (!th->pending_interrupt_queue) {
2010 rb_raise(rb_eThreadError, "uninitialized thread");
2011 }
2012}
2013
2014enum handle_interrupt_timing {
2015 INTERRUPT_NONE,
2016 INTERRUPT_IMMEDIATE,
2017 INTERRUPT_ON_BLOCKING,
2018 INTERRUPT_NEVER
2019};
2020
2021static enum handle_interrupt_timing
2022rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2023{
2024 if (sym == sym_immediate) {
2025 return INTERRUPT_IMMEDIATE;
2026 }
2027 else if (sym == sym_on_blocking) {
2028 return INTERRUPT_ON_BLOCKING;
2029 }
2030 else if (sym == sym_never) {
2031 return INTERRUPT_NEVER;
2032 }
2033 else {
2034 rb_raise(rb_eThreadError, "unknown mask signature");
2035 }
2036}
2037
2038static enum handle_interrupt_timing
2039rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2040{
2041 VALUE mask;
2042 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2043 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2044 VALUE mod;
2045 long i;
2046
2047 for (i=0; i<mask_stack_len; i++) {
2048 mask = mask_stack[mask_stack_len-(i+1)];
2049
2050 if (SYMBOL_P(mask)) {
2051 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2052 if (err != rb_cInteger) {
2053 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2054 }
2055 else {
2056 continue;
2057 }
2058 }
2059
2060 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2061 VALUE klass = mod;
2062 VALUE sym;
2063
2064 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2065 klass = RBASIC(mod)->klass;
2066 }
2067 else if (mod != RCLASS_ORIGIN(mod)) {
2068 continue;
2069 }
2070
2071 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2072 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2073 }
2074 }
2075 /* try to next mask */
2076 }
2077 return INTERRUPT_NONE;
2078}
2079
2080static int
2081rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2082{
2083 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2084}
2085
2086static int
2087rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2088{
2089 int i;
2090 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2091 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2092 if (rb_obj_is_kind_of(e, err)) {
2093 return TRUE;
2094 }
2095 }
2096 return FALSE;
2097}
2098
2099static VALUE
2100rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2101{
2102#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2103 int i;
2104
2105 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2106 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2107
2108 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2109
2110 switch (mask_timing) {
2111 case INTERRUPT_ON_BLOCKING:
2112 if (timing != INTERRUPT_ON_BLOCKING) {
2113 break;
2114 }
2115 /* fall through */
2116 case INTERRUPT_NONE: /* default: IMMEDIATE */
2117 case INTERRUPT_IMMEDIATE:
2118 rb_ary_delete_at(th->pending_interrupt_queue, i);
2119 return err;
2120 case INTERRUPT_NEVER:
2121 break;
2122 }
2123 }
2124
2125 th->pending_interrupt_queue_checked = 1;
2126 return Qundef;
2127#else
2128 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2129 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2130 th->pending_interrupt_queue_checked = 1;
2131 }
2132 return err;
2133#endif
2134}
2135
2136static int
2137threadptr_pending_interrupt_active_p(rb_thread_t *th)
2138{
2139 /*
2140 * For optimization, we don't check async errinfo queue
2141 * if the queue and the thread interrupt mask were not changed
2142 * since last check.
2143 */
2144 if (th->pending_interrupt_queue_checked) {
2145 return 0;
2146 }
2147
2148 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2149 return 0;
2150 }
2151
2152 return 1;
2153}
2154
2155static int
2156handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2157{
2158 VALUE *maskp = (VALUE *)args;
2159
2160 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2161 rb_raise(rb_eArgError, "unknown mask signature");
2162 }
2163
2164 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2165 *maskp = val;
2166 return ST_CONTINUE;
2167 }
2168
2169 if (RTEST(*maskp)) {
2170 if (!RB_TYPE_P(*maskp, T_HASH)) {
2171 VALUE prev = *maskp;
2172 *maskp = rb_ident_hash_new();
2173 if (SYMBOL_P(prev)) {
2174 rb_hash_aset(*maskp, rb_eException, prev);
2175 }
2176 }
2177 rb_hash_aset(*maskp, key, val);
2178 }
2179 else {
2180 *maskp = Qfalse;
2181 }
2182
2183 return ST_CONTINUE;
2184}
2185
2186/*
2187 * call-seq:
2188 * Thread.handle_interrupt(hash) { ... } -> result of the block
2189 *
2190 * Changes asynchronous interrupt timing.
2191 *
2192 * _interrupt_ means asynchronous event and corresponding procedure
2193 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2194 * and main thread termination (if main thread terminates, then all
2195 * other thread will be killed).
2196 *
2197 * The given +hash+ has pairs like <code>ExceptionClass =>
2198 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2199 * the given block. The TimingSymbol can be one of the following symbols:
2200 *
2201 * [+:immediate+] Invoke interrupts immediately.
2202 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2203 * [+:never+] Never invoke all interrupts.
2204 *
2205 * _BlockingOperation_ means that the operation will block the calling thread,
2206 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2207 * operation executed without GVL.
2208 *
2209 * Masked asynchronous interrupts are delayed until they are enabled.
2210 * This method is similar to sigprocmask(3).
2211 *
2212 * === NOTE
2213 *
2214 * Asynchronous interrupts are difficult to use.
2215 *
2216 * If you need to communicate between threads, please consider to use another way such as Queue.
2217 *
2218 * Or use them with deep understanding about this method.
2219 *
2220 * === Usage
2221 *
2222 * In this example, we can guard from Thread#raise exceptions.
2223 *
2224 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2225 * ignored in the first block of the main thread. In the second
2226 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2227 *
2228 * th = Thread.new do
2229 * Thread.handle_interrupt(RuntimeError => :never) {
2230 * begin
2231 * # You can write resource allocation code safely.
2232 * Thread.handle_interrupt(RuntimeError => :immediate) {
2233 * # ...
2234 * }
2235 * ensure
2236 * # You can write resource deallocation code safely.
2237 * end
2238 * }
2239 * end
2240 * Thread.pass
2241 * # ...
2242 * th.raise "stop"
2243 *
2244 * While we are ignoring the RuntimeError exception, it's safe to write our
2245 * resource allocation code. Then, the ensure block is where we can safely
2246 * deallocate your resources.
2247 *
2248 * ==== Stack control settings
2249 *
2250 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2251 * to control more than one ExceptionClass and TimingSymbol at a time.
2252 *
2253 * Thread.handle_interrupt(FooError => :never) {
2254 * Thread.handle_interrupt(BarError => :never) {
2255 * # FooError and BarError are prohibited.
2256 * }
2257 * }
2258 *
2259 * ==== Inheritance with ExceptionClass
2260 *
2261 * All exceptions inherited from the ExceptionClass parameter will be considered.
2262 *
2263 * Thread.handle_interrupt(Exception => :never) {
2264 * # all exceptions inherited from Exception are prohibited.
2265 * }
2266 *
2267 * For handling all interrupts, use +Object+ and not +Exception+
2268 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2269 */
2270static VALUE
2271rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2272{
2273 VALUE mask = Qundef;
2274 rb_execution_context_t * volatile ec = GET_EC();
2275 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2276 volatile VALUE r = Qnil;
2277 enum ruby_tag_type state;
2278
2279 if (!rb_block_given_p()) {
2280 rb_raise(rb_eArgError, "block is needed.");
2281 }
2282
2283 mask_arg = rb_to_hash_type(mask_arg);
2284
2285 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2286 mask = Qnil;
2287 }
2288
2289 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2290
2291 if (UNDEF_P(mask)) {
2292 return rb_yield(Qnil);
2293 }
2294
2295 if (!RTEST(mask)) {
2296 mask = mask_arg;
2297 }
2298 else if (RB_TYPE_P(mask, T_HASH)) {
2299 OBJ_FREEZE(mask);
2300 }
2301
2302 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2303 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2304 th->pending_interrupt_queue_checked = 0;
2305 RUBY_VM_SET_INTERRUPT(th->ec);
2306 }
2307
2308 EC_PUSH_TAG(th->ec);
2309 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2310 r = rb_yield(Qnil);
2311 }
2312 EC_POP_TAG();
2313
2314 rb_ary_pop(th->pending_interrupt_mask_stack);
2315 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2316 th->pending_interrupt_queue_checked = 0;
2317 RUBY_VM_SET_INTERRUPT(th->ec);
2318 }
2319
2320 RUBY_VM_CHECK_INTS(th->ec);
2321
2322 if (state) {
2323 EC_JUMP_TAG(th->ec, state);
2324 }
2325
2326 return r;
2327}
2328
2329/*
2330 * call-seq:
2331 * target_thread.pending_interrupt?(error = nil) -> true/false
2332 *
2333 * Returns whether or not the asynchronous queue is empty for the target thread.
2334 *
2335 * If +error+ is given, then check only for +error+ type deferred events.
2336 *
2337 * See ::pending_interrupt? for more information.
2338 */
2339static VALUE
2340rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2341{
2342 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2343
2344 if (!target_th->pending_interrupt_queue) {
2345 return Qfalse;
2346 }
2347 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2348 return Qfalse;
2349 }
2350 if (rb_check_arity(argc, 0, 1)) {
2351 VALUE err = argv[0];
2352 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2353 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2354 }
2355 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2356 }
2357 else {
2358 return Qtrue;
2359 }
2360}
2361
2362/*
2363 * call-seq:
2364 * Thread.pending_interrupt?(error = nil) -> true/false
2365 *
2366 * Returns whether or not the asynchronous queue is empty.
2367 *
2368 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2369 * this method can be used to determine if there are any deferred events.
2370 *
2371 * If you find this method returns true, then you may finish +:never+ blocks.
2372 *
2373 * For example, the following method processes deferred asynchronous events
2374 * immediately.
2375 *
2376 * def Thread.kick_interrupt_immediately
2377 * Thread.handle_interrupt(Object => :immediate) {
2378 * Thread.pass
2379 * }
2380 * end
2381 *
2382 * If +error+ is given, then check only for +error+ type deferred events.
2383 *
2384 * === Usage
2385 *
2386 * th = Thread.new{
2387 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2388 * while true
2389 * ...
2390 * # reach safe point to invoke interrupt
2391 * if Thread.pending_interrupt?
2392 * Thread.handle_interrupt(Object => :immediate){}
2393 * end
2394 * ...
2395 * end
2396 * }
2397 * }
2398 * ...
2399 * th.raise # stop thread
2400 *
2401 * This example can also be written as the following, which you should use to
2402 * avoid asynchronous interrupts.
2403 *
2404 * flag = true
2405 * th = Thread.new{
2406 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2407 * while true
2408 * ...
2409 * # reach safe point to invoke interrupt
2410 * break if flag == false
2411 * ...
2412 * end
2413 * }
2414 * }
2415 * ...
2416 * flag = false # stop thread
2417 */
2418
2419static VALUE
2420rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2421{
2422 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2423}
2424
2425NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2426
2427static void
2428rb_threadptr_to_kill(rb_thread_t *th)
2429{
2430 rb_threadptr_pending_interrupt_clear(th);
2431 th->status = THREAD_RUNNABLE;
2432 th->to_kill = 1;
2433 th->ec->errinfo = INT2FIX(TAG_FATAL);
2434 EC_JUMP_TAG(th->ec, TAG_FATAL);
2435}
2436
2437static inline rb_atomic_t
2438threadptr_get_interrupts(rb_thread_t *th)
2439{
2440 rb_execution_context_t *ec = th->ec;
2441 rb_atomic_t interrupt;
2442 rb_atomic_t old;
2443
2444 do {
2445 interrupt = ec->interrupt_flag;
2446 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2447 } while (old != interrupt);
2448 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2449}
2450
2451static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2452
2453int
2454rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2455{
2456 rb_atomic_t interrupt;
2457 int postponed_job_interrupt = 0;
2458 int ret = FALSE;
2459
2460 if (th->ec->raised_flag) return ret;
2461
2462 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2463 int sig;
2464 int timer_interrupt;
2465 int pending_interrupt;
2466 int trap_interrupt;
2467 int terminate_interrupt;
2468
2469 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2470 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2471 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2472 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2473 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2474
2475 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2476 RB_VM_LOCK_ENTER();
2477 RB_VM_LOCK_LEAVE();
2478 }
2479
2480 if (postponed_job_interrupt) {
2481 rb_postponed_job_flush(th->vm);
2482 }
2483
2484 if (trap_interrupt) {
2485 /* signal handling */
2486 if (th == th->vm->ractor.main_thread) {
2487 enum rb_thread_status prev_status = th->status;
2488
2489 th->status = THREAD_RUNNABLE;
2490 {
2491 while ((sig = rb_get_next_signal()) != 0) {
2492 ret |= rb_signal_exec(th, sig);
2493 }
2494 }
2495 th->status = prev_status;
2496 }
2497
2498 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2499 enum rb_thread_status prev_status = th->status;
2500
2501 th->status = THREAD_RUNNABLE;
2502 {
2503 threadptr_interrupt_exec_exec(th);
2504 }
2505 th->status = prev_status;
2506 }
2507 }
2508
2509 /* exception from another thread */
2510 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2511 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2512 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2513 ret = TRUE;
2514
2515 if (UNDEF_P(err)) {
2516 /* no error */
2517 }
2518 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2519 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2520 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2521 terminate_interrupt = 1;
2522 }
2523 else {
2524 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2525 /* the only special exception to be queued across thread */
2526 err = ruby_vm_special_exception_copy(err);
2527 }
2528 /* set runnable if th was slept. */
2529 if (th->status == THREAD_STOPPED ||
2530 th->status == THREAD_STOPPED_FOREVER)
2531 th->status = THREAD_RUNNABLE;
2532 rb_exc_raise(err);
2533 }
2534 }
2535
2536 if (terminate_interrupt) {
2537 rb_threadptr_to_kill(th);
2538 }
2539
2540 if (timer_interrupt) {
2541 uint32_t limits_us = thread_default_quantum_ms * 1000;
2542
2543 if (th->priority > 0)
2544 limits_us <<= th->priority;
2545 else
2546 limits_us >>= -th->priority;
2547
2548 if (th->status == THREAD_RUNNABLE)
2549 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2550
2551 VM_ASSERT(th->ec->cfp);
2552 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2553 0, 0, 0, Qundef);
2554
2555 rb_thread_schedule_limits(limits_us);
2556 }
2557 }
2558 return ret;
2559}
2560
2561void
2562rb_thread_execute_interrupts(VALUE thval)
2563{
2564 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2565}
2566
2567static void
2568rb_threadptr_ready(rb_thread_t *th)
2569{
2570 rb_threadptr_interrupt(th);
2571}
2572
2573static VALUE
2574rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2575{
2576 VALUE exc;
2577
2578 if (rb_threadptr_dead(target_th)) {
2579 return Qnil;
2580 }
2581
2582 if (argc == 0) {
2583 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2584 }
2585 else {
2586 exc = rb_make_exception(argc, argv);
2587 }
2588
2589 /* making an exception object can switch thread,
2590 so we need to check thread deadness again */
2591 if (rb_threadptr_dead(target_th)) {
2592 return Qnil;
2593 }
2594
2595 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2596 rb_threadptr_pending_interrupt_enque(target_th, exc);
2597 rb_threadptr_interrupt(target_th);
2598 return Qnil;
2599}
2600
2601void
2602rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2603{
2604 VALUE argv[2];
2605
2606 argv[0] = rb_eSignal;
2607 argv[1] = INT2FIX(sig);
2608 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2609}
2610
2611void
2612rb_threadptr_signal_exit(rb_thread_t *th)
2613{
2614 VALUE argv[2];
2615
2616 argv[0] = rb_eSystemExit;
2617 argv[1] = rb_str_new2("exit");
2618
2619 // TODO: check signal raise deliverly
2620 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2621}
2622
2623int
2624rb_ec_set_raised(rb_execution_context_t *ec)
2625{
2626 if (ec->raised_flag & RAISED_EXCEPTION) {
2627 return 1;
2628 }
2629 ec->raised_flag |= RAISED_EXCEPTION;
2630 return 0;
2631}
2632
2633int
2634rb_ec_reset_raised(rb_execution_context_t *ec)
2635{
2636 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2637 return 0;
2638 }
2639 ec->raised_flag &= ~RAISED_EXCEPTION;
2640 return 1;
2641}
2642
2643int
2644rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
2645{
2646 rb_vm_t *vm = GET_THREAD()->vm;
2647 struct waiting_fd *wfd = 0, *next;
2648 ccan_list_head_init(&busy->pending_fd_users);
2649 int has_any;
2650 VALUE wakeup_mutex;
2651
2652 RB_VM_LOCK_ENTER();
2653 {
2654 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2655 if (wfd->fd == fd) {
2656 rb_thread_t *th = wfd->th;
2657 VALUE err;
2658
2659 ccan_list_del(&wfd->wfd_node);
2660 ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
2661
2662 wfd->busy = busy;
2663 err = th->vm->special_exceptions[ruby_error_stream_closed];
2664 rb_threadptr_pending_interrupt_enque(th, err);
2665 rb_threadptr_interrupt(th);
2666 }
2667 }
2668 }
2669
2670 has_any = !ccan_list_empty(&busy->pending_fd_users);
2671 busy->closing_thread = rb_thread_current();
2672 busy->closing_fiber = rb_fiber_current();
2673 wakeup_mutex = Qnil;
2674 if (has_any) {
2675 wakeup_mutex = rb_mutex_new();
2676 RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
2677 }
2678 busy->wakeup_mutex = wakeup_mutex;
2679
2680 RB_VM_LOCK_LEAVE();
2681
2682 /* If the caller didn't pass *busy as a pointer to something on the stack,
2683 we need to guard this mutex object on _our_ C stack for the duration
2684 of this function. */
2685 RB_GC_GUARD(wakeup_mutex);
2686 return has_any;
2687}
2688
2689void
2690rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
2691{
2692 if (!RB_TEST(busy->wakeup_mutex)) {
2693 /* There was nobody else using this file when we closed it, so we
2694 never bothered to allocate a mutex*/
2695 return;
2696 }
2697
2698 rb_mutex_lock(busy->wakeup_mutex);
2699 while (!ccan_list_empty(&busy->pending_fd_users)) {
2700 rb_mutex_sleep(busy->wakeup_mutex, Qnil);
2701 }
2702 rb_mutex_unlock(busy->wakeup_mutex);
2703}
2704
2705void
2706rb_thread_fd_close(int fd)
2707{
2708 struct rb_io_close_wait_list busy;
2709
2710 if (rb_notify_fd_close(fd, &busy)) {
2711 rb_notify_fd_close_wait(&busy);
2712 }
2713}
2714
2715/*
2716 * call-seq:
2717 * thr.raise
2718 * thr.raise(string)
2719 * thr.raise(exception [, string [, array]])
2720 *
2721 * Raises an exception from the given thread. The caller does not have to be
2722 * +thr+. See Kernel#raise for more information.
2723 *
2724 * Thread.abort_on_exception = true
2725 * a = Thread.new { sleep(200) }
2726 * a.raise("Gotcha")
2727 *
2728 * This will produce:
2729 *
2730 * prog.rb:3: Gotcha (RuntimeError)
2731 * from prog.rb:2:in `initialize'
2732 * from prog.rb:2:in `new'
2733 * from prog.rb:2
2734 */
2735
2736static VALUE
2737thread_raise_m(int argc, VALUE *argv, VALUE self)
2738{
2739 rb_thread_t *target_th = rb_thread_ptr(self);
2740 const rb_thread_t *current_th = GET_THREAD();
2741
2742 threadptr_check_pending_interrupt_queue(target_th);
2743 rb_threadptr_raise(target_th, argc, argv);
2744
2745 /* To perform Thread.current.raise as Kernel.raise */
2746 if (current_th == target_th) {
2747 RUBY_VM_CHECK_INTS(target_th->ec);
2748 }
2749 return Qnil;
2750}
2751
2752
2753/*
2754 * call-seq:
2755 * thr.exit -> thr
2756 * thr.kill -> thr
2757 * thr.terminate -> thr
2758 *
2759 * Terminates +thr+ and schedules another thread to be run, returning
2760 * the terminated Thread. If this is the main thread, or the last
2761 * thread, exits the process.
2762 */
2763
2765rb_thread_kill(VALUE thread)
2766{
2767 rb_thread_t *target_th = rb_thread_ptr(thread);
2768
2769 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2770 return thread;
2771 }
2772 if (target_th == target_th->vm->ractor.main_thread) {
2773 rb_exit(EXIT_SUCCESS);
2774 }
2775
2776 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2777
2778 if (target_th == GET_THREAD()) {
2779 /* kill myself immediately */
2780 rb_threadptr_to_kill(target_th);
2781 }
2782 else {
2783 threadptr_check_pending_interrupt_queue(target_th);
2784 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2785 rb_threadptr_interrupt(target_th);
2786 }
2787
2788 return thread;
2789}
2790
2791int
2792rb_thread_to_be_killed(VALUE thread)
2793{
2794 rb_thread_t *target_th = rb_thread_ptr(thread);
2795
2796 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2797 return TRUE;
2798 }
2799 return FALSE;
2800}
2801
2802/*
2803 * call-seq:
2804 * Thread.kill(thread) -> thread
2805 *
2806 * Causes the given +thread+ to exit, see also Thread::exit.
2807 *
2808 * count = 0
2809 * a = Thread.new { loop { count += 1 } }
2810 * sleep(0.1) #=> 0
2811 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2812 * count #=> 93947
2813 * a.alive? #=> false
2814 */
2815
2816static VALUE
2817rb_thread_s_kill(VALUE obj, VALUE th)
2818{
2819 return rb_thread_kill(th);
2820}
2821
2822
2823/*
2824 * call-seq:
2825 * Thread.exit -> thread
2826 *
2827 * Terminates the currently running thread and schedules another thread to be
2828 * run.
2829 *
2830 * If this thread is already marked to be killed, ::exit returns the Thread.
2831 *
2832 * If this is the main thread, or the last thread, exit the process.
2833 */
2834
2835static VALUE
2836rb_thread_exit(VALUE _)
2837{
2838 rb_thread_t *th = GET_THREAD();
2839 return rb_thread_kill(th->self);
2840}
2841
2842
2843/*
2844 * call-seq:
2845 * thr.wakeup -> thr
2846 *
2847 * Marks a given thread as eligible for scheduling, however it may still
2848 * remain blocked on I/O.
2849 *
2850 * *Note:* This does not invoke the scheduler, see #run for more information.
2851 *
2852 * c = Thread.new { Thread.stop; puts "hey!" }
2853 * sleep 0.1 while c.status!='sleep'
2854 * c.wakeup
2855 * c.join
2856 * #=> "hey!"
2857 */
2858
2860rb_thread_wakeup(VALUE thread)
2861{
2862 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2863 rb_raise(rb_eThreadError, "killed thread");
2864 }
2865 return thread;
2866}
2867
2870{
2871 rb_thread_t *target_th = rb_thread_ptr(thread);
2872 if (target_th->status == THREAD_KILLED) return Qnil;
2873
2874 rb_threadptr_ready(target_th);
2875
2876 if (target_th->status == THREAD_STOPPED ||
2877 target_th->status == THREAD_STOPPED_FOREVER) {
2878 target_th->status = THREAD_RUNNABLE;
2879 }
2880
2881 return thread;
2882}
2883
2884
2885/*
2886 * call-seq:
2887 * thr.run -> thr
2888 *
2889 * Wakes up +thr+, making it eligible for scheduling.
2890 *
2891 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2892 * sleep 0.1 while a.status!='sleep'
2893 * puts "Got here"
2894 * a.run
2895 * a.join
2896 *
2897 * This will produce:
2898 *
2899 * a
2900 * Got here
2901 * c
2902 *
2903 * See also the instance method #wakeup.
2904 */
2905
2907rb_thread_run(VALUE thread)
2908{
2909 rb_thread_wakeup(thread);
2911 return thread;
2912}
2913
2914
2916rb_thread_stop(void)
2917{
2918 if (rb_thread_alone()) {
2919 rb_raise(rb_eThreadError,
2920 "stopping only thread\n\tnote: use sleep to stop forever");
2921 }
2923 return Qnil;
2924}
2925
2926/*
2927 * call-seq:
2928 * Thread.stop -> nil
2929 *
2930 * Stops execution of the current thread, putting it into a ``sleep'' state,
2931 * and schedules execution of another thread.
2932 *
2933 * a = Thread.new { print "a"; Thread.stop; print "c" }
2934 * sleep 0.1 while a.status!='sleep'
2935 * print "b"
2936 * a.run
2937 * a.join
2938 * #=> "abc"
2939 */
2940
2941static VALUE
2942thread_stop(VALUE _)
2943{
2944 return rb_thread_stop();
2945}
2946
2947/********************************************************************/
2948
2949VALUE
2950rb_thread_list(void)
2951{
2952 // TODO
2953 return rb_ractor_thread_list();
2954}
2955
2956/*
2957 * call-seq:
2958 * Thread.list -> array
2959 *
2960 * Returns an array of Thread objects for all threads that are either runnable
2961 * or stopped.
2962 *
2963 * Thread.new { sleep(200) }
2964 * Thread.new { 1000000.times {|i| i*i } }
2965 * Thread.new { Thread.stop }
2966 * Thread.list.each {|t| p t}
2967 *
2968 * This will produce:
2969 *
2970 * #<Thread:0x401b3e84 sleep>
2971 * #<Thread:0x401b3f38 run>
2972 * #<Thread:0x401b3fb0 sleep>
2973 * #<Thread:0x401bdf4c run>
2974 */
2975
2976static VALUE
2977thread_list(VALUE _)
2978{
2979 return rb_thread_list();
2980}
2981
2984{
2985 return GET_THREAD()->self;
2986}
2987
2988/*
2989 * call-seq:
2990 * Thread.current -> thread
2991 *
2992 * Returns the currently executing thread.
2993 *
2994 * Thread.current #=> #<Thread:0x401bdf4c run>
2995 */
2996
2997static VALUE
2998thread_s_current(VALUE klass)
2999{
3000 return rb_thread_current();
3001}
3002
3004rb_thread_main(void)
3005{
3006 return GET_RACTOR()->threads.main->self;
3007}
3008
3009/*
3010 * call-seq:
3011 * Thread.main -> thread
3012 *
3013 * Returns the main thread.
3014 */
3015
3016static VALUE
3017rb_thread_s_main(VALUE klass)
3018{
3019 return rb_thread_main();
3020}
3021
3022
3023/*
3024 * call-seq:
3025 * Thread.abort_on_exception -> true or false
3026 *
3027 * Returns the status of the global ``abort on exception'' condition.
3028 *
3029 * The default is +false+.
3030 *
3031 * When set to +true+, if any thread is aborted by an exception, the
3032 * raised exception will be re-raised in the main thread.
3033 *
3034 * Can also be specified by the global $DEBUG flag or command line option
3035 * +-d+.
3036 *
3037 * See also ::abort_on_exception=.
3038 *
3039 * There is also an instance level method to set this for a specific thread,
3040 * see #abort_on_exception.
3041 */
3042
3043static VALUE
3044rb_thread_s_abort_exc(VALUE _)
3045{
3046 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3047}
3048
3049
3050/*
3051 * call-seq:
3052 * Thread.abort_on_exception= boolean -> true or false
3053 *
3054 * When set to +true+, if any thread is aborted by an exception, the
3055 * raised exception will be re-raised in the main thread.
3056 * Returns the new state.
3057 *
3058 * Thread.abort_on_exception = true
3059 * t1 = Thread.new do
3060 * puts "In new thread"
3061 * raise "Exception from thread"
3062 * end
3063 * sleep(1)
3064 * puts "not reached"
3065 *
3066 * This will produce:
3067 *
3068 * In new thread
3069 * prog.rb:4: Exception from thread (RuntimeError)
3070 * from prog.rb:2:in `initialize'
3071 * from prog.rb:2:in `new'
3072 * from prog.rb:2
3073 *
3074 * See also ::abort_on_exception.
3075 *
3076 * There is also an instance level method to set this for a specific thread,
3077 * see #abort_on_exception=.
3078 */
3079
3080static VALUE
3081rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3082{
3083 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3084 return val;
3085}
3086
3087
3088/*
3089 * call-seq:
3090 * thr.abort_on_exception -> true or false
3091 *
3092 * Returns the status of the thread-local ``abort on exception'' condition for
3093 * this +thr+.
3094 *
3095 * The default is +false+.
3096 *
3097 * See also #abort_on_exception=.
3098 *
3099 * There is also a class level method to set this for all threads, see
3100 * ::abort_on_exception.
3101 */
3102
3103static VALUE
3104rb_thread_abort_exc(VALUE thread)
3105{
3106 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3107}
3108
3109
3110/*
3111 * call-seq:
3112 * thr.abort_on_exception= boolean -> true or false
3113 *
3114 * When set to +true+, if this +thr+ is aborted by an exception, the
3115 * raised exception will be re-raised in the main thread.
3116 *
3117 * See also #abort_on_exception.
3118 *
3119 * There is also a class level method to set this for all threads, see
3120 * ::abort_on_exception=.
3121 */
3122
3123static VALUE
3124rb_thread_abort_exc_set(VALUE thread, VALUE val)
3125{
3126 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3127 return val;
3128}
3129
3130
3131/*
3132 * call-seq:
3133 * Thread.report_on_exception -> true or false
3134 *
3135 * Returns the status of the global ``report on exception'' condition.
3136 *
3137 * The default is +true+ since Ruby 2.5.
3138 *
3139 * All threads created when this flag is true will report
3140 * a message on $stderr if an exception kills the thread.
3141 *
3142 * Thread.new { 1.times { raise } }
3143 *
3144 * will produce this output on $stderr:
3145 *
3146 * #<Thread:...> terminated with exception (report_on_exception is true):
3147 * Traceback (most recent call last):
3148 * 2: from -e:1:in `block in <main>'
3149 * 1: from -e:1:in `times'
3150 *
3151 * This is done to catch errors in threads early.
3152 * In some cases, you might not want this output.
3153 * There are multiple ways to avoid the extra output:
3154 *
3155 * * If the exception is not intended, the best is to fix the cause of
3156 * the exception so it does not happen anymore.
3157 * * If the exception is intended, it might be better to rescue it closer to
3158 * where it is raised rather then let it kill the Thread.
3159 * * If it is guaranteed the Thread will be joined with Thread#join or
3160 * Thread#value, then it is safe to disable this report with
3161 * <code>Thread.current.report_on_exception = false</code>
3162 * when starting the Thread.
3163 * However, this might handle the exception much later, or not at all
3164 * if the Thread is never joined due to the parent thread being blocked, etc.
3165 *
3166 * See also ::report_on_exception=.
3167 *
3168 * There is also an instance level method to set this for a specific thread,
3169 * see #report_on_exception=.
3170 *
3171 */
3172
3173static VALUE
3174rb_thread_s_report_exc(VALUE _)
3175{
3176 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3177}
3178
3179
3180/*
3181 * call-seq:
3182 * Thread.report_on_exception= boolean -> true or false
3183 *
3184 * Returns the new state.
3185 * When set to +true+, all threads created afterwards will inherit the
3186 * condition and report a message on $stderr if an exception kills a thread:
3187 *
3188 * Thread.report_on_exception = true
3189 * t1 = Thread.new do
3190 * puts "In new thread"
3191 * raise "Exception from thread"
3192 * end
3193 * sleep(1)
3194 * puts "In the main thread"
3195 *
3196 * This will produce:
3197 *
3198 * In new thread
3199 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3200 * Traceback (most recent call last):
3201 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3202 * In the main thread
3203 *
3204 * See also ::report_on_exception.
3205 *
3206 * There is also an instance level method to set this for a specific thread,
3207 * see #report_on_exception=.
3208 */
3209
3210static VALUE
3211rb_thread_s_report_exc_set(VALUE self, VALUE val)
3212{
3213 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3214 return val;
3215}
3216
3217
3218/*
3219 * call-seq:
3220 * Thread.ignore_deadlock -> true or false
3221 *
3222 * Returns the status of the global ``ignore deadlock'' condition.
3223 * The default is +false+, so that deadlock conditions are not ignored.
3224 *
3225 * See also ::ignore_deadlock=.
3226 *
3227 */
3228
3229static VALUE
3230rb_thread_s_ignore_deadlock(VALUE _)
3231{
3232 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3233}
3234
3235
3236/*
3237 * call-seq:
3238 * Thread.ignore_deadlock = boolean -> true or false
3239 *
3240 * Returns the new state.
3241 * When set to +true+, the VM will not check for deadlock conditions.
3242 * It is only useful to set this if your application can break a
3243 * deadlock condition via some other means, such as a signal.
3244 *
3245 * Thread.ignore_deadlock = true
3246 * queue = Thread::Queue.new
3247 *
3248 * trap(:SIGUSR1){queue.push "Received signal"}
3249 *
3250 * # raises fatal error unless ignoring deadlock
3251 * puts queue.pop
3252 *
3253 * See also ::ignore_deadlock.
3254 */
3255
3256static VALUE
3257rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3258{
3259 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3260 return val;
3261}
3262
3263
3264/*
3265 * call-seq:
3266 * thr.report_on_exception -> true or false
3267 *
3268 * Returns the status of the thread-local ``report on exception'' condition for
3269 * this +thr+.
3270 *
3271 * The default value when creating a Thread is the value of
3272 * the global flag Thread.report_on_exception.
3273 *
3274 * See also #report_on_exception=.
3275 *
3276 * There is also a class level method to set this for all new threads, see
3277 * ::report_on_exception=.
3278 */
3279
3280static VALUE
3281rb_thread_report_exc(VALUE thread)
3282{
3283 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3284}
3285
3286
3287/*
3288 * call-seq:
3289 * thr.report_on_exception= boolean -> true or false
3290 *
3291 * When set to +true+, a message is printed on $stderr if an exception
3292 * kills this +thr+. See ::report_on_exception for details.
3293 *
3294 * See also #report_on_exception.
3295 *
3296 * There is also a class level method to set this for all new threads, see
3297 * ::report_on_exception=.
3298 */
3299
3300static VALUE
3301rb_thread_report_exc_set(VALUE thread, VALUE val)
3302{
3303 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3304 return val;
3305}
3306
3307
3308/*
3309 * call-seq:
3310 * thr.group -> thgrp or nil
3311 *
3312 * Returns the ThreadGroup which contains the given thread.
3313 *
3314 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3315 */
3316
3317VALUE
3318rb_thread_group(VALUE thread)
3319{
3320 return rb_thread_ptr(thread)->thgroup;
3321}
3322
3323static const char *
3324thread_status_name(rb_thread_t *th, int detail)
3325{
3326 switch (th->status) {
3327 case THREAD_RUNNABLE:
3328 return th->to_kill ? "aborting" : "run";
3329 case THREAD_STOPPED_FOREVER:
3330 if (detail) return "sleep_forever";
3331 case THREAD_STOPPED:
3332 return "sleep";
3333 case THREAD_KILLED:
3334 return "dead";
3335 default:
3336 return "unknown";
3337 }
3338}
3339
3340static int
3341rb_threadptr_dead(rb_thread_t *th)
3342{
3343 return th->status == THREAD_KILLED;
3344}
3345
3346
3347/*
3348 * call-seq:
3349 * thr.status -> string, false or nil
3350 *
3351 * Returns the status of +thr+.
3352 *
3353 * [<tt>"sleep"</tt>]
3354 * Returned if this thread is sleeping or waiting on I/O
3355 * [<tt>"run"</tt>]
3356 * When this thread is executing
3357 * [<tt>"aborting"</tt>]
3358 * If this thread is aborting
3359 * [+false+]
3360 * When this thread is terminated normally
3361 * [+nil+]
3362 * If terminated with an exception.
3363 *
3364 * a = Thread.new { raise("die now") }
3365 * b = Thread.new { Thread.stop }
3366 * c = Thread.new { Thread.exit }
3367 * d = Thread.new { sleep }
3368 * d.kill #=> #<Thread:0x401b3678 aborting>
3369 * a.status #=> nil
3370 * b.status #=> "sleep"
3371 * c.status #=> false
3372 * d.status #=> "aborting"
3373 * Thread.current.status #=> "run"
3374 *
3375 * See also the instance methods #alive? and #stop?
3376 */
3377
3378static VALUE
3379rb_thread_status(VALUE thread)
3380{
3381 rb_thread_t *target_th = rb_thread_ptr(thread);
3382
3383 if (rb_threadptr_dead(target_th)) {
3384 if (!NIL_P(target_th->ec->errinfo) &&
3385 !FIXNUM_P(target_th->ec->errinfo)) {
3386 return Qnil;
3387 }
3388 else {
3389 return Qfalse;
3390 }
3391 }
3392 else {
3393 return rb_str_new2(thread_status_name(target_th, FALSE));
3394 }
3395}
3396
3397
3398/*
3399 * call-seq:
3400 * thr.alive? -> true or false
3401 *
3402 * Returns +true+ if +thr+ is running or sleeping.
3403 *
3404 * thr = Thread.new { }
3405 * thr.join #=> #<Thread:0x401b3fb0 dead>
3406 * Thread.current.alive? #=> true
3407 * thr.alive? #=> false
3408 *
3409 * See also #stop? and #status.
3410 */
3411
3412static VALUE
3413rb_thread_alive_p(VALUE thread)
3414{
3415 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3416}
3417
3418/*
3419 * call-seq:
3420 * thr.stop? -> true or false
3421 *
3422 * Returns +true+ if +thr+ is dead or sleeping.
3423 *
3424 * a = Thread.new { Thread.stop }
3425 * b = Thread.current
3426 * a.stop? #=> true
3427 * b.stop? #=> false
3428 *
3429 * See also #alive? and #status.
3430 */
3431
3432static VALUE
3433rb_thread_stop_p(VALUE thread)
3434{
3435 rb_thread_t *th = rb_thread_ptr(thread);
3436
3437 if (rb_threadptr_dead(th)) {
3438 return Qtrue;
3439 }
3440 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3441}
3442
3443/*
3444 * call-seq:
3445 * thr.name -> string
3446 *
3447 * show the name of the thread.
3448 */
3449
3450static VALUE
3451rb_thread_getname(VALUE thread)
3452{
3453 return rb_thread_ptr(thread)->name;
3454}
3455
3456/*
3457 * call-seq:
3458 * thr.name=(name) -> string
3459 *
3460 * set given name to the ruby thread.
3461 * On some platform, it may set the name to pthread and/or kernel.
3462 */
3463
3464static VALUE
3465rb_thread_setname(VALUE thread, VALUE name)
3466{
3467 rb_thread_t *target_th = rb_thread_ptr(thread);
3468
3469 if (!NIL_P(name)) {
3470 rb_encoding *enc;
3471 StringValueCStr(name);
3472 enc = rb_enc_get(name);
3473 if (!rb_enc_asciicompat(enc)) {
3474 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3475 rb_enc_name(enc));
3476 }
3477 name = rb_str_new_frozen(name);
3478 }
3479 target_th->name = name;
3480 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3481 native_set_another_thread_name(target_th->nt->thread_id, name);
3482 }
3483 return name;
3484}
3485
3486#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3487/*
3488 * call-seq:
3489 * thr.native_thread_id -> integer
3490 *
3491 * Return the native thread ID which is used by the Ruby thread.
3492 *
3493 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3494 * * On Linux it is TID returned by gettid(2).
3495 * * On macOS it is the system-wide unique integral ID of thread returned
3496 * by pthread_threadid_np(3).
3497 * * On FreeBSD it is the unique integral ID of the thread returned by
3498 * pthread_getthreadid_np(3).
3499 * * On Windows it is the thread identifier returned by GetThreadId().
3500 * * On other platforms, it raises NotImplementedError.
3501 *
3502 * NOTE:
3503 * If the thread is not associated yet or already deassociated with a native
3504 * thread, it returns _nil_.
3505 * If the Ruby implementation uses M:N thread model, the ID may change
3506 * depending on the timing.
3507 */
3508
3509static VALUE
3510rb_thread_native_thread_id(VALUE thread)
3511{
3512 rb_thread_t *target_th = rb_thread_ptr(thread);
3513 if (rb_threadptr_dead(target_th)) return Qnil;
3514 return native_thread_native_thread_id(target_th);
3515}
3516#else
3517# define rb_thread_native_thread_id rb_f_notimplement
3518#endif
3519
3520/*
3521 * call-seq:
3522 * thr.to_s -> string
3523 *
3524 * Dump the name, id, and status of _thr_ to a string.
3525 */
3526
3527static VALUE
3528rb_thread_to_s(VALUE thread)
3529{
3530 VALUE cname = rb_class_path(rb_obj_class(thread));
3531 rb_thread_t *target_th = rb_thread_ptr(thread);
3532 const char *status;
3533 VALUE str, loc;
3534
3535 status = thread_status_name(target_th, TRUE);
3536 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3537 if (!NIL_P(target_th->name)) {
3538 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3539 }
3540 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3541 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3542 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3543 }
3544 rb_str_catf(str, " %s>", status);
3545
3546 return str;
3547}
3548
3549/* variables for recursive traversals */
3550#define recursive_key id__recursive_key__
3551
3552static VALUE
3553threadptr_local_aref(rb_thread_t *th, ID id)
3554{
3555 if (id == recursive_key) {
3556 return th->ec->local_storage_recursive_hash;
3557 }
3558 else {
3559 VALUE val;
3560 struct rb_id_table *local_storage = th->ec->local_storage;
3561
3562 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3563 return val;
3564 }
3565 else {
3566 return Qnil;
3567 }
3568 }
3569}
3570
3572rb_thread_local_aref(VALUE thread, ID id)
3573{
3574 return threadptr_local_aref(rb_thread_ptr(thread), id);
3575}
3576
3577/*
3578 * call-seq:
3579 * thr[sym] -> obj or nil
3580 *
3581 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3582 * if not explicitly inside a Fiber), using either a symbol or a string name.
3583 * If the specified variable does not exist, returns +nil+.
3584 *
3585 * [
3586 * Thread.new { Thread.current["name"] = "A" },
3587 * Thread.new { Thread.current[:name] = "B" },
3588 * Thread.new { Thread.current["name"] = "C" }
3589 * ].each do |th|
3590 * th.join
3591 * puts "#{th.inspect}: #{th[:name]}"
3592 * end
3593 *
3594 * This will produce:
3595 *
3596 * #<Thread:0x00000002a54220 dead>: A
3597 * #<Thread:0x00000002a541a8 dead>: B
3598 * #<Thread:0x00000002a54130 dead>: C
3599 *
3600 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3601 * This confusion did not exist in Ruby 1.8 because
3602 * fibers are only available since Ruby 1.9.
3603 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3604 * following idiom for dynamic scope.
3605 *
3606 * def meth(newvalue)
3607 * begin
3608 * oldvalue = Thread.current[:name]
3609 * Thread.current[:name] = newvalue
3610 * yield
3611 * ensure
3612 * Thread.current[:name] = oldvalue
3613 * end
3614 * end
3615 *
3616 * The idiom may not work as dynamic scope if the methods are thread-local
3617 * and a given block switches fiber.
3618 *
3619 * f = Fiber.new {
3620 * meth(1) {
3621 * Fiber.yield
3622 * }
3623 * }
3624 * meth(2) {
3625 * f.resume
3626 * }
3627 * f.resume
3628 * p Thread.current[:name]
3629 * #=> nil if fiber-local
3630 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3631 *
3632 * For thread-local variables, please see #thread_variable_get and
3633 * #thread_variable_set.
3634 *
3635 */
3636
3637static VALUE
3638rb_thread_aref(VALUE thread, VALUE key)
3639{
3640 ID id = rb_check_id(&key);
3641 if (!id) return Qnil;
3642 return rb_thread_local_aref(thread, id);
3643}
3644
3645/*
3646 * call-seq:
3647 * thr.fetch(sym) -> obj
3648 * thr.fetch(sym) { } -> obj
3649 * thr.fetch(sym, default) -> obj
3650 *
3651 * Returns a fiber-local for the given key. If the key can't be
3652 * found, there are several options: With no other arguments, it will
3653 * raise a KeyError exception; if <i>default</i> is given, then that
3654 * will be returned; if the optional code block is specified, then
3655 * that will be run and its result returned. See Thread#[] and
3656 * Hash#fetch.
3657 */
3658static VALUE
3659rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3660{
3661 VALUE key, val;
3662 ID id;
3663 rb_thread_t *target_th = rb_thread_ptr(self);
3664 int block_given;
3665
3666 rb_check_arity(argc, 1, 2);
3667 key = argv[0];
3668
3669 block_given = rb_block_given_p();
3670 if (block_given && argc == 2) {
3671 rb_warn("block supersedes default value argument");
3672 }
3673
3674 id = rb_check_id(&key);
3675
3676 if (id == recursive_key) {
3677 return target_th->ec->local_storage_recursive_hash;
3678 }
3679 else if (id && target_th->ec->local_storage &&
3680 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3681 return val;
3682 }
3683 else if (block_given) {
3684 return rb_yield(key);
3685 }
3686 else if (argc == 1) {
3687 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3688 }
3689 else {
3690 return argv[1];
3691 }
3692}
3693
3694static VALUE
3695threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3696{
3697 if (id == recursive_key) {
3698 th->ec->local_storage_recursive_hash = val;
3699 return val;
3700 }
3701 else {
3702 struct rb_id_table *local_storage = th->ec->local_storage;
3703
3704 if (NIL_P(val)) {
3705 if (!local_storage) return Qnil;
3706 rb_id_table_delete(local_storage, id);
3707 return Qnil;
3708 }
3709 else {
3710 if (local_storage == NULL) {
3711 th->ec->local_storage = local_storage = rb_id_table_create(0);
3712 }
3713 rb_id_table_insert(local_storage, id, val);
3714 return val;
3715 }
3716 }
3717}
3718
3720rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3721{
3722 if (OBJ_FROZEN(thread)) {
3723 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3724 }
3725
3726 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3727}
3728
3729/*
3730 * call-seq:
3731 * thr[sym] = obj -> obj
3732 *
3733 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3734 * using either a symbol or a string.
3735 *
3736 * See also Thread#[].
3737 *
3738 * For thread-local variables, please see #thread_variable_set and
3739 * #thread_variable_get.
3740 */
3741
3742static VALUE
3743rb_thread_aset(VALUE self, VALUE id, VALUE val)
3744{
3745 return rb_thread_local_aset(self, rb_to_id(id), val);
3746}
3747
3748/*
3749 * call-seq:
3750 * thr.thread_variable_get(key) -> obj or nil
3751 *
3752 * Returns the value of a thread local variable that has been set. Note that
3753 * these are different than fiber local values. For fiber local values,
3754 * please see Thread#[] and Thread#[]=.
3755 *
3756 * Thread local values are carried along with threads, and do not respect
3757 * fibers. For example:
3758 *
3759 * Thread.new {
3760 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3761 * Thread.current["foo"] = "bar" # set a fiber local
3762 *
3763 * Fiber.new {
3764 * Fiber.yield [
3765 * Thread.current.thread_variable_get("foo"), # get the thread local
3766 * Thread.current["foo"], # get the fiber local
3767 * ]
3768 * }.resume
3769 * }.join.value # => ['bar', nil]
3770 *
3771 * The value "bar" is returned for the thread local, where nil is returned
3772 * for the fiber local. The fiber is executed in the same thread, so the
3773 * thread local values are available.
3774 */
3775
3776static VALUE
3777rb_thread_variable_get(VALUE thread, VALUE key)
3778{
3779 VALUE locals;
3780 VALUE symbol = rb_to_symbol(key);
3781
3782 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3783 return Qnil;
3784 }
3785 locals = rb_thread_local_storage(thread);
3786 return rb_hash_aref(locals, symbol);
3787}
3788
3789/*
3790 * call-seq:
3791 * thr.thread_variable_set(key, value)
3792 *
3793 * Sets a thread local with +key+ to +value+. Note that these are local to
3794 * threads, and not to fibers. Please see Thread#thread_variable_get and
3795 * Thread#[] for more information.
3796 */
3797
3798static VALUE
3799rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3800{
3801 VALUE locals;
3802
3803 if (OBJ_FROZEN(thread)) {
3804 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3805 }
3806
3807 locals = rb_thread_local_storage(thread);
3808 return rb_hash_aset(locals, rb_to_symbol(key), val);
3809}
3810
3811/*
3812 * call-seq:
3813 * thr.key?(sym) -> true or false
3814 *
3815 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3816 * variable.
3817 *
3818 * me = Thread.current
3819 * me[:oliver] = "a"
3820 * me.key?(:oliver) #=> true
3821 * me.key?(:stanley) #=> false
3822 */
3823
3824static VALUE
3825rb_thread_key_p(VALUE self, VALUE key)
3826{
3827 VALUE val;
3828 ID id = rb_check_id(&key);
3829 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3830
3831 if (!id || local_storage == NULL) {
3832 return Qfalse;
3833 }
3834 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3835}
3836
3837static enum rb_id_table_iterator_result
3838thread_keys_i(ID key, VALUE value, void *ary)
3839{
3840 rb_ary_push((VALUE)ary, ID2SYM(key));
3841 return ID_TABLE_CONTINUE;
3842}
3843
3845rb_thread_alone(void)
3846{
3847 // TODO
3848 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3849}
3850
3851/*
3852 * call-seq:
3853 * thr.keys -> array
3854 *
3855 * Returns an array of the names of the fiber-local variables (as Symbols).
3856 *
3857 * thr = Thread.new do
3858 * Thread.current[:cat] = 'meow'
3859 * Thread.current["dog"] = 'woof'
3860 * end
3861 * thr.join #=> #<Thread:0x401b3f10 dead>
3862 * thr.keys #=> [:dog, :cat]
3863 */
3864
3865static VALUE
3866rb_thread_keys(VALUE self)
3867{
3868 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3869 VALUE ary = rb_ary_new();
3870
3871 if (local_storage) {
3872 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3873 }
3874 return ary;
3875}
3876
3877static int
3878keys_i(VALUE key, VALUE value, VALUE ary)
3879{
3880 rb_ary_push(ary, key);
3881 return ST_CONTINUE;
3882}
3883
3884/*
3885 * call-seq:
3886 * thr.thread_variables -> array
3887 *
3888 * Returns an array of the names of the thread-local variables (as Symbols).
3889 *
3890 * thr = Thread.new do
3891 * Thread.current.thread_variable_set(:cat, 'meow')
3892 * Thread.current.thread_variable_set("dog", 'woof')
3893 * end
3894 * thr.join #=> #<Thread:0x401b3f10 dead>
3895 * thr.thread_variables #=> [:dog, :cat]
3896 *
3897 * Note that these are not fiber local variables. Please see Thread#[] and
3898 * Thread#thread_variable_get for more details.
3899 */
3900
3901static VALUE
3902rb_thread_variables(VALUE thread)
3903{
3904 VALUE locals;
3905 VALUE ary;
3906
3907 ary = rb_ary_new();
3908 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3909 return ary;
3910 }
3911 locals = rb_thread_local_storage(thread);
3912 rb_hash_foreach(locals, keys_i, ary);
3913
3914 return ary;
3915}
3916
3917/*
3918 * call-seq:
3919 * thr.thread_variable?(key) -> true or false
3920 *
3921 * Returns +true+ if the given string (or symbol) exists as a thread-local
3922 * variable.
3923 *
3924 * me = Thread.current
3925 * me.thread_variable_set(:oliver, "a")
3926 * me.thread_variable?(:oliver) #=> true
3927 * me.thread_variable?(:stanley) #=> false
3928 *
3929 * Note that these are not fiber local variables. Please see Thread#[] and
3930 * Thread#thread_variable_get for more details.
3931 */
3932
3933static VALUE
3934rb_thread_variable_p(VALUE thread, VALUE key)
3935{
3936 VALUE locals;
3937 VALUE symbol = rb_to_symbol(key);
3938
3939 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3940 return Qfalse;
3941 }
3942 locals = rb_thread_local_storage(thread);
3943
3944 return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
3945}
3946
3947/*
3948 * call-seq:
3949 * thr.priority -> integer
3950 *
3951 * Returns the priority of <i>thr</i>. Default is inherited from the
3952 * current thread which creating the new thread, or zero for the
3953 * initial main thread; higher-priority thread will run more frequently
3954 * than lower-priority threads (but lower-priority threads can also run).
3955 *
3956 * This is just hint for Ruby thread scheduler. It may be ignored on some
3957 * platform.
3958 *
3959 * Thread.current.priority #=> 0
3960 */
3961
3962static VALUE
3963rb_thread_priority(VALUE thread)
3964{
3965 return INT2NUM(rb_thread_ptr(thread)->priority);
3966}
3967
3968
3969/*
3970 * call-seq:
3971 * thr.priority= integer -> thr
3972 *
3973 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3974 * will run more frequently than lower-priority threads (but lower-priority
3975 * threads can also run).
3976 *
3977 * This is just hint for Ruby thread scheduler. It may be ignored on some
3978 * platform.
3979 *
3980 * count1 = count2 = 0
3981 * a = Thread.new do
3982 * loop { count1 += 1 }
3983 * end
3984 * a.priority = -1
3985 *
3986 * b = Thread.new do
3987 * loop { count2 += 1 }
3988 * end
3989 * b.priority = -2
3990 * sleep 1 #=> 1
3991 * count1 #=> 622504
3992 * count2 #=> 5832
3993 */
3994
3995static VALUE
3996rb_thread_priority_set(VALUE thread, VALUE prio)
3997{
3998 rb_thread_t *target_th = rb_thread_ptr(thread);
3999 int priority;
4000
4001#if USE_NATIVE_THREAD_PRIORITY
4002 target_th->priority = NUM2INT(prio);
4003 native_thread_apply_priority(th);
4004#else
4005 priority = NUM2INT(prio);
4006 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4007 priority = RUBY_THREAD_PRIORITY_MAX;
4008 }
4009 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4010 priority = RUBY_THREAD_PRIORITY_MIN;
4011 }
4012 target_th->priority = (int8_t)priority;
4013#endif
4014 return INT2NUM(target_th->priority);
4015}
4016
4017/* for IO */
4018
4019#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4020
4021/*
4022 * several Unix platforms support file descriptors bigger than FD_SETSIZE
4023 * in select(2) system call.
4024 *
4025 * - Linux 2.2.12 (?)
4026 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4027 * select(2) documents how to allocate fd_set dynamically.
4028 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4029 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4030 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4031 * select(2) documents how to allocate fd_set dynamically.
4032 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4033 * - Solaris 8 has select_large_fdset
4034 * - Mac OS X 10.7 (Lion)
4035 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4036 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4037 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4038 *
4039 * When fd_set is not big enough to hold big file descriptors,
4040 * it should be allocated dynamically.
4041 * Note that this assumes fd_set is structured as bitmap.
4042 *
4043 * rb_fd_init allocates the memory.
4044 * rb_fd_term free the memory.
4045 * rb_fd_set may re-allocates bitmap.
4046 *
4047 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4048 */
4049
4050void
4052{
4053 fds->maxfd = 0;
4054 fds->fdset = ALLOC(fd_set);
4055 FD_ZERO(fds->fdset);
4056}
4057
4058void
4059rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4060{
4061 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4062
4063 if (size < sizeof(fd_set))
4064 size = sizeof(fd_set);
4065 dst->maxfd = src->maxfd;
4066 dst->fdset = xmalloc(size);
4067 memcpy(dst->fdset, src->fdset, size);
4068}
4069
4070void
4072{
4073 xfree(fds->fdset);
4074 fds->maxfd = 0;
4075 fds->fdset = 0;
4076}
4077
4078void
4080{
4081 if (fds->fdset)
4082 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4083}
4084
4085static void
4086rb_fd_resize(int n, rb_fdset_t *fds)
4087{
4088 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4089 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4090
4091 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4092 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4093
4094 if (m > o) {
4095 fds->fdset = xrealloc(fds->fdset, m);
4096 memset((char *)fds->fdset + o, 0, m - o);
4097 }
4098 if (n >= fds->maxfd) fds->maxfd = n + 1;
4099}
4100
4101void
4102rb_fd_set(int n, rb_fdset_t *fds)
4103{
4104 rb_fd_resize(n, fds);
4105 FD_SET(n, fds->fdset);
4106}
4107
4108void
4109rb_fd_clr(int n, rb_fdset_t *fds)
4110{
4111 if (n >= fds->maxfd) return;
4112 FD_CLR(n, fds->fdset);
4113}
4114
4115int
4116rb_fd_isset(int n, const rb_fdset_t *fds)
4117{
4118 if (n >= fds->maxfd) return 0;
4119 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4120}
4121
4122void
4123rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4124{
4125 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4126
4127 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4128 dst->maxfd = max;
4129 dst->fdset = xrealloc(dst->fdset, size);
4130 memcpy(dst->fdset, src, size);
4131}
4132
4133void
4134rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4135{
4136 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4137
4138 if (size < sizeof(fd_set))
4139 size = sizeof(fd_set);
4140 dst->maxfd = src->maxfd;
4141 dst->fdset = xrealloc(dst->fdset, size);
4142 memcpy(dst->fdset, src->fdset, size);
4143}
4144
4145int
4146rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4147{
4148 fd_set *r = NULL, *w = NULL, *e = NULL;
4149 if (readfds) {
4150 rb_fd_resize(n - 1, readfds);
4151 r = rb_fd_ptr(readfds);
4152 }
4153 if (writefds) {
4154 rb_fd_resize(n - 1, writefds);
4155 w = rb_fd_ptr(writefds);
4156 }
4157 if (exceptfds) {
4158 rb_fd_resize(n - 1, exceptfds);
4159 e = rb_fd_ptr(exceptfds);
4160 }
4161 return select(n, r, w, e, timeout);
4162}
4163
4164#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4165
4166#undef FD_ZERO
4167#undef FD_SET
4168#undef FD_CLR
4169#undef FD_ISSET
4170
4171#define FD_ZERO(f) rb_fd_zero(f)
4172#define FD_SET(i, f) rb_fd_set((i), (f))
4173#define FD_CLR(i, f) rb_fd_clr((i), (f))
4174#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4175
4176#elif defined(_WIN32)
4177
4178void
4180{
4181 set->capa = FD_SETSIZE;
4182 set->fdset = ALLOC(fd_set);
4183 FD_ZERO(set->fdset);
4184}
4185
4186void
4187rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4188{
4189 rb_fd_init(dst);
4190 rb_fd_dup(dst, src);
4191}
4192
4193void
4195{
4196 xfree(set->fdset);
4197 set->fdset = NULL;
4198 set->capa = 0;
4199}
4200
4201void
4202rb_fd_set(int fd, rb_fdset_t *set)
4203{
4204 unsigned int i;
4205 SOCKET s = rb_w32_get_osfhandle(fd);
4206
4207 for (i = 0; i < set->fdset->fd_count; i++) {
4208 if (set->fdset->fd_array[i] == s) {
4209 return;
4210 }
4211 }
4212 if (set->fdset->fd_count >= (unsigned)set->capa) {
4213 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4214 set->fdset =
4215 rb_xrealloc_mul_add(
4216 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4217 }
4218 set->fdset->fd_array[set->fdset->fd_count++] = s;
4219}
4220
4221#undef FD_ZERO
4222#undef FD_SET
4223#undef FD_CLR
4224#undef FD_ISSET
4225
4226#define FD_ZERO(f) rb_fd_zero(f)
4227#define FD_SET(i, f) rb_fd_set((i), (f))
4228#define FD_CLR(i, f) rb_fd_clr((i), (f))
4229#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4230
4231#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4232
4233#endif
4234
4235#ifndef rb_fd_no_init
4236#define rb_fd_no_init(fds) (void)(fds)
4237#endif
4238
4239static int
4240wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4241{
4242 int r = *result;
4243 if (r < 0) {
4244 switch (errnum) {
4245 case EINTR:
4246#ifdef ERESTART
4247 case ERESTART:
4248#endif
4249 *result = 0;
4250 if (rel && hrtime_update_expire(rel, end)) {
4251 *rel = 0;
4252 }
4253 return TRUE;
4254 }
4255 return FALSE;
4256 }
4257 else if (r == 0) {
4258 /* check for spurious wakeup */
4259 if (rel) {
4260 return !hrtime_update_expire(rel, end);
4261 }
4262 return TRUE;
4263 }
4264 return FALSE;
4265}
4267struct select_set {
4268 int max;
4269 rb_thread_t *th;
4270 rb_fdset_t *rset;
4271 rb_fdset_t *wset;
4272 rb_fdset_t *eset;
4273 rb_fdset_t orig_rset;
4274 rb_fdset_t orig_wset;
4275 rb_fdset_t orig_eset;
4276 struct timeval *timeout;
4277};
4278
4279static VALUE
4280select_set_free(VALUE p)
4281{
4282 struct select_set *set = (struct select_set *)p;
4283
4284 rb_fd_term(&set->orig_rset);
4285 rb_fd_term(&set->orig_wset);
4286 rb_fd_term(&set->orig_eset);
4287
4288 return Qfalse;
4289}
4290
4291static VALUE
4292do_select(VALUE p)
4293{
4294 struct select_set *set = (struct select_set *)p;
4295 volatile int result = 0;
4296 int lerrno;
4297 rb_hrtime_t *to, rel, end = 0;
4298
4299 timeout_prepare(&to, &rel, &end, set->timeout);
4300 volatile rb_hrtime_t endtime = end;
4301#define restore_fdset(dst, src) \
4302 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4303#define do_select_update() \
4304 (restore_fdset(set->rset, &set->orig_rset), \
4305 restore_fdset(set->wset, &set->orig_wset), \
4306 restore_fdset(set->eset, &set->orig_eset), \
4307 TRUE)
4308
4309 do {
4310 lerrno = 0;
4311
4312 BLOCKING_REGION(set->th, {
4313 struct timeval tv;
4314
4315 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4316 result = native_fd_select(set->max,
4317 set->rset, set->wset, set->eset,
4318 rb_hrtime2timeval(&tv, to), set->th);
4319 if (result < 0) lerrno = errno;
4320 }
4321 }, ubf_select, set->th, TRUE);
4322
4323 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4324 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4325
4326 if (result < 0) {
4327 errno = lerrno;
4328 }
4329
4330 return (VALUE)result;
4331}
4332
4334rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4335 struct timeval *timeout)
4336{
4337 struct select_set set;
4338
4339 set.th = GET_THREAD();
4340 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4341 set.max = max;
4342 set.rset = read;
4343 set.wset = write;
4344 set.eset = except;
4345 set.timeout = timeout;
4346
4347 if (!set.rset && !set.wset && !set.eset) {
4348 if (!timeout) {
4350 return 0;
4351 }
4352 rb_thread_wait_for(*timeout);
4353 return 0;
4354 }
4355
4356#define fd_init_copy(f) do { \
4357 if (set.f) { \
4358 rb_fd_resize(set.max - 1, set.f); \
4359 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4360 rb_fd_init_copy(&set.orig_##f, set.f); \
4361 } \
4362 } \
4363 else { \
4364 rb_fd_no_init(&set.orig_##f); \
4365 } \
4366 } while (0)
4367 fd_init_copy(rset);
4368 fd_init_copy(wset);
4369 fd_init_copy(eset);
4370#undef fd_init_copy
4371
4372 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4373}
4374
4375#ifdef USE_POLL
4376
4377/* The same with linux kernel. TODO: make platform independent definition. */
4378#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4379#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4380#define POLLEX_SET (POLLPRI)
4381
4382#ifndef POLLERR_SET /* defined for FreeBSD for now */
4383# define POLLERR_SET (0)
4384#endif
4385
4386static int
4387wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4388 rb_hrtime_t *const to, volatile int *lerrno)
4389{
4390 struct timespec ts;
4391 volatile int result = 0;
4392
4393 *lerrno = 0;
4394 BLOCKING_REGION(th, {
4395 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4396 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4397 if (result < 0) *lerrno = errno;
4398 }
4399 }, ubf_select, th, TRUE);
4400 return result;
4401}
4402
4403/*
4404 * returns a mask of events
4405 */
4406int
4407rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4408{
4409 struct pollfd fds[1] = {{
4410 .fd = fd,
4411 .events = (short)events,
4412 .revents = 0,
4413 }};
4414 volatile int result = 0;
4415 nfds_t nfds;
4416 struct waiting_fd wfd;
4417 enum ruby_tag_type state;
4418 volatile int lerrno;
4419
4420 rb_execution_context_t *ec = GET_EC();
4421 rb_thread_t *th = rb_ec_thread_ptr(ec);
4422
4423 thread_io_setup_wfd(th, fd, &wfd);
4424
4425 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4426 // fd is readable
4427 state = 0;
4428 fds[0].revents = events;
4429 errno = 0;
4430 }
4431 else {
4432 EC_PUSH_TAG(wfd.th->ec);
4433 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4434 rb_hrtime_t *to, rel, end = 0;
4435 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4436 timeout_prepare(&to, &rel, &end, timeout);
4437 do {
4438 nfds = numberof(fds);
4439 result = wait_for_single_fd_blocking_region(wfd.th, fds, nfds, to, &lerrno);
4440
4441 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4442 } while (wait_retryable(&result, lerrno, to, end));
4443 }
4444 EC_POP_TAG();
4445 }
4446
4447 thread_io_wake_pending_closer(&wfd);
4448
4449 if (state) {
4450 EC_JUMP_TAG(wfd.th->ec, state);
4451 }
4452
4453 if (result < 0) {
4454 errno = lerrno;
4455 return -1;
4456 }
4457
4458 if (fds[0].revents & POLLNVAL) {
4459 errno = EBADF;
4460 return -1;
4461 }
4462
4463 /*
4464 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4465 * Therefore we need to fix it up.
4466 */
4467 result = 0;
4468 if (fds[0].revents & POLLIN_SET)
4469 result |= RB_WAITFD_IN;
4470 if (fds[0].revents & POLLOUT_SET)
4471 result |= RB_WAITFD_OUT;
4472 if (fds[0].revents & POLLEX_SET)
4473 result |= RB_WAITFD_PRI;
4474
4475 /* all requested events are ready if there is an error */
4476 if (fds[0].revents & POLLERR_SET)
4477 result |= events;
4478
4479 return result;
4480}
4481#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4482struct select_args {
4483 union {
4484 int fd;
4485 int error;
4486 } as;
4487 rb_fdset_t *read;
4488 rb_fdset_t *write;
4489 rb_fdset_t *except;
4490 struct waiting_fd wfd;
4491 struct timeval *tv;
4492};
4493
4494static VALUE
4495select_single(VALUE ptr)
4496{
4497 struct select_args *args = (struct select_args *)ptr;
4498 int r;
4499
4500 r = rb_thread_fd_select(args->as.fd + 1,
4501 args->read, args->write, args->except, args->tv);
4502 if (r == -1)
4503 args->as.error = errno;
4504 if (r > 0) {
4505 r = 0;
4506 if (args->read && rb_fd_isset(args->as.fd, args->read))
4507 r |= RB_WAITFD_IN;
4508 if (args->write && rb_fd_isset(args->as.fd, args->write))
4509 r |= RB_WAITFD_OUT;
4510 if (args->except && rb_fd_isset(args->as.fd, args->except))
4511 r |= RB_WAITFD_PRI;
4512 }
4513 return (VALUE)r;
4514}
4515
4516static VALUE
4517select_single_cleanup(VALUE ptr)
4518{
4519 struct select_args *args = (struct select_args *)ptr;
4520
4521 thread_io_wake_pending_closer(&args->wfd);
4522 if (args->read) rb_fd_term(args->read);
4523 if (args->write) rb_fd_term(args->write);
4524 if (args->except) rb_fd_term(args->except);
4525
4526 return (VALUE)-1;
4527}
4528
4529static rb_fdset_t *
4530init_set_fd(int fd, rb_fdset_t *fds)
4531{
4532 if (fd < 0) {
4533 return 0;
4534 }
4535 rb_fd_init(fds);
4536 rb_fd_set(fd, fds);
4537
4538 return fds;
4539}
4540
4541int
4542rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4543{
4544 rb_fdset_t rfds, wfds, efds;
4545 struct select_args args;
4546 int r;
4547 VALUE ptr = (VALUE)&args;
4548 rb_execution_context_t *ec = GET_EC();
4549 rb_thread_t *th = rb_ec_thread_ptr(ec);
4550
4551 args.as.fd = fd;
4552 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4553 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4554 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4555 args.tv = timeout;
4556 thread_io_setup_wfd(th, fd, &args.wfd);
4557
4558 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4559 if (r == -1)
4560 errno = args.as.error;
4561
4562 return r;
4563}
4564#endif /* ! USE_POLL */
4565
4566/*
4567 * for GC
4568 */
4569
4570#ifdef USE_CONSERVATIVE_STACK_END
4571void
4572rb_gc_set_stack_end(VALUE **stack_end_p)
4573{
4574 VALUE stack_end;
4575COMPILER_WARNING_PUSH
4576#if __has_warning("-Wdangling-pointer")
4577COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4578#endif
4579 *stack_end_p = &stack_end;
4580COMPILER_WARNING_POP
4581}
4582#endif
4583
4584/*
4585 *
4586 */
4587
4588void
4589rb_threadptr_check_signal(rb_thread_t *mth)
4590{
4591 /* mth must be main_thread */
4592 if (rb_signal_buff_size() > 0) {
4593 /* wakeup main thread */
4594 threadptr_trap_interrupt(mth);
4595 }
4596}
4597
4598static void
4599async_bug_fd(const char *mesg, int errno_arg, int fd)
4600{
4601 char buff[64];
4602 size_t n = strlcpy(buff, mesg, sizeof(buff));
4603 if (n < sizeof(buff)-3) {
4604 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4605 }
4606 rb_async_bug_errno(buff, errno_arg);
4607}
4608
4609/* VM-dependent API is not available for this function */
4610static int
4611consume_communication_pipe(int fd)
4612{
4613#if USE_EVENTFD
4614 uint64_t buff[1];
4615#else
4616 /* buffer can be shared because no one refers to them. */
4617 static char buff[1024];
4618#endif
4619 ssize_t result;
4620 int ret = FALSE; /* for rb_sigwait_sleep */
4621
4622 while (1) {
4623 result = read(fd, buff, sizeof(buff));
4624#if USE_EVENTFD
4625 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4626#else
4627 RUBY_DEBUG_LOG("result:%d", (int)result);
4628#endif
4629 if (result > 0) {
4630 ret = TRUE;
4631 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4632 return ret;
4633 }
4634 }
4635 else if (result == 0) {
4636 return ret;
4637 }
4638 else if (result < 0) {
4639 int e = errno;
4640 switch (e) {
4641 case EINTR:
4642 continue; /* retry */
4643 case EAGAIN:
4644#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4645 case EWOULDBLOCK:
4646#endif
4647 return ret;
4648 default:
4649 async_bug_fd("consume_communication_pipe: read", e, fd);
4650 }
4651 }
4652 }
4653}
4654
4655void
4656rb_thread_stop_timer_thread(void)
4657{
4658 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4659 native_reset_timer_thread();
4660 }
4661}
4662
4663void
4664rb_thread_reset_timer_thread(void)
4665{
4666 native_reset_timer_thread();
4667}
4668
4669void
4670rb_thread_start_timer_thread(void)
4671{
4672 system_working = 1;
4673 rb_thread_create_timer_thread();
4674}
4675
4676static int
4677clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4678{
4679 int i;
4680 VALUE coverage = (VALUE)val;
4681 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4682 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4683
4684 if (lines) {
4685 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4686 rb_ary_clear(lines);
4687 }
4688 else {
4689 int i;
4690 for (i = 0; i < RARRAY_LEN(lines); i++) {
4691 if (RARRAY_AREF(lines, i) != Qnil)
4692 RARRAY_ASET(lines, i, INT2FIX(0));
4693 }
4694 }
4695 }
4696 if (branches) {
4697 VALUE counters = RARRAY_AREF(branches, 1);
4698 for (i = 0; i < RARRAY_LEN(counters); i++) {
4699 RARRAY_ASET(counters, i, INT2FIX(0));
4700 }
4701 }
4702
4703 return ST_CONTINUE;
4704}
4705
4706void
4707rb_clear_coverages(void)
4708{
4709 VALUE coverages = rb_get_coverages();
4710 if (RTEST(coverages)) {
4711 rb_hash_foreach(coverages, clear_coverage_i, 0);
4712 }
4713}
4714
4715#if defined(HAVE_WORKING_FORK)
4716
4717static void
4718rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4719{
4720 rb_thread_t *i = 0;
4721 rb_vm_t *vm = th->vm;
4722 rb_ractor_t *r = th->ractor;
4723 vm->ractor.main_ractor = r;
4724 vm->ractor.main_thread = th;
4725 r->threads.main = th;
4726 r->status_ = ractor_created;
4727
4728 thread_sched_atfork(TH_SCHED(th));
4729 ubf_list_atfork();
4730
4731 // OK. Only this thread accesses:
4732 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4733 ccan_list_for_each(&r->threads.set, i, lt_node) {
4734 atfork(i, th);
4735 }
4736 }
4737 rb_vm_living_threads_init(vm);
4738
4739 rb_ractor_atfork(vm, th);
4740 rb_vm_postponed_job_atfork();
4741
4742 /* may be held by any thread in parent */
4743 rb_native_mutex_initialize(&th->interrupt_lock);
4744 ccan_list_head_init(&th->interrupt_exec_tasks);
4745
4746 vm->fork_gen++;
4747 rb_ractor_sleeper_threads_clear(th->ractor);
4748 rb_clear_coverages();
4749
4750 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4751 rb_thread_reset_timer_thread();
4752 rb_thread_start_timer_thread();
4753
4754 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4755 VM_ASSERT(vm->ractor.cnt == 1);
4756}
4757
4758static void
4759terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4760{
4761 if (th != current_th) {
4762 rb_native_mutex_initialize(&th->interrupt_lock);
4763 rb_mutex_abandon_keeping_mutexes(th);
4764 rb_mutex_abandon_locking_mutex(th);
4765 thread_cleanup_func(th, TRUE);
4766 }
4767}
4768
4769void rb_fiber_atfork(rb_thread_t *);
4770void
4771rb_thread_atfork(void)
4772{
4773 rb_thread_t *th = GET_THREAD();
4774 rb_threadptr_pending_interrupt_clear(th);
4775 rb_thread_atfork_internal(th, terminate_atfork_i);
4776 th->join_list = NULL;
4777 rb_fiber_atfork(th);
4778
4779 /* We don't want reproduce CVE-2003-0900. */
4781}
4782
4783static void
4784terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4785{
4786 if (th != current_th) {
4787 thread_cleanup_func_before_exec(th);
4788 }
4789}
4790
4791void
4793{
4794 rb_thread_t *th = GET_THREAD();
4795 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4796}
4797#else
4798void
4799rb_thread_atfork(void)
4800{
4801}
4802
4803void
4805{
4806}
4807#endif
4809struct thgroup {
4810 int enclosed;
4811};
4812
4813static const rb_data_type_t thgroup_data_type = {
4814 "thgroup",
4815 {
4816 0,
4818 NULL, // No external memory to report
4819 },
4820 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
4821};
4822
4823/*
4824 * Document-class: ThreadGroup
4825 *
4826 * ThreadGroup provides a means of keeping track of a number of threads as a
4827 * group.
4828 *
4829 * A given Thread object can only belong to one ThreadGroup at a time; adding
4830 * a thread to a new group will remove it from any previous group.
4831 *
4832 * Newly created threads belong to the same group as the thread from which they
4833 * were created.
4834 */
4835
4836/*
4837 * Document-const: Default
4838 *
4839 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4840 * by default.
4841 */
4842static VALUE
4843thgroup_s_alloc(VALUE klass)
4844{
4845 VALUE group;
4846 struct thgroup *data;
4847
4848 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4849 data->enclosed = 0;
4850
4851 return group;
4852}
4853
4854/*
4855 * call-seq:
4856 * thgrp.list -> array
4857 *
4858 * Returns an array of all existing Thread objects that belong to this group.
4859 *
4860 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4861 */
4862
4863static VALUE
4864thgroup_list(VALUE group)
4865{
4866 VALUE ary = rb_ary_new();
4867 rb_thread_t *th = 0;
4868 rb_ractor_t *r = GET_RACTOR();
4869
4870 ccan_list_for_each(&r->threads.set, th, lt_node) {
4871 if (th->thgroup == group) {
4872 rb_ary_push(ary, th->self);
4873 }
4874 }
4875 return ary;
4876}
4877
4878
4879/*
4880 * call-seq:
4881 * thgrp.enclose -> thgrp
4882 *
4883 * Prevents threads from being added to or removed from the receiving
4884 * ThreadGroup.
4885 *
4886 * New threads can still be started in an enclosed ThreadGroup.
4887 *
4888 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4889 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4890 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4891 * tg.add thr
4892 * #=> ThreadError: can't move from the enclosed thread group
4893 */
4894
4895static VALUE
4896thgroup_enclose(VALUE group)
4897{
4898 struct thgroup *data;
4899
4900 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4901 data->enclosed = 1;
4902
4903 return group;
4904}
4905
4906
4907/*
4908 * call-seq:
4909 * thgrp.enclosed? -> true or false
4910 *
4911 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4912 */
4913
4914static VALUE
4915thgroup_enclosed_p(VALUE group)
4916{
4917 struct thgroup *data;
4918
4919 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4920 return RBOOL(data->enclosed);
4921}
4922
4923
4924/*
4925 * call-seq:
4926 * thgrp.add(thread) -> thgrp
4927 *
4928 * Adds the given +thread+ to this group, removing it from any other
4929 * group to which it may have previously been a member.
4930 *
4931 * puts "Initial group is #{ThreadGroup::Default.list}"
4932 * tg = ThreadGroup.new
4933 * t1 = Thread.new { sleep }
4934 * t2 = Thread.new { sleep }
4935 * puts "t1 is #{t1}"
4936 * puts "t2 is #{t2}"
4937 * tg.add(t1)
4938 * puts "Initial group now #{ThreadGroup::Default.list}"
4939 * puts "tg group now #{tg.list}"
4940 *
4941 * This will produce:
4942 *
4943 * Initial group is #<Thread:0x401bdf4c>
4944 * t1 is #<Thread:0x401b3c90>
4945 * t2 is #<Thread:0x401b3c18>
4946 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4947 * tg group now #<Thread:0x401b3c90>
4948 */
4949
4950static VALUE
4951thgroup_add(VALUE group, VALUE thread)
4952{
4953 rb_thread_t *target_th = rb_thread_ptr(thread);
4954 struct thgroup *data;
4955
4956 if (OBJ_FROZEN(group)) {
4957 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4958 }
4959 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4960 if (data->enclosed) {
4961 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4962 }
4963
4964 if (OBJ_FROZEN(target_th->thgroup)) {
4965 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4966 }
4967 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4968 if (data->enclosed) {
4969 rb_raise(rb_eThreadError,
4970 "can't move from the enclosed thread group");
4971 }
4972
4973 target_th->thgroup = group;
4974 return group;
4975}
4976
4977/*
4978 * Document-class: ThreadShield
4979 */
4980static void
4981thread_shield_mark(void *ptr)
4982{
4983 rb_gc_mark((VALUE)ptr);
4984}
4985
4986static const rb_data_type_t thread_shield_data_type = {
4987 "thread_shield",
4988 {thread_shield_mark, 0, 0,},
4989 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4990};
4991
4992static VALUE
4993thread_shield_alloc(VALUE klass)
4994{
4995 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4996}
4997
4998#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4999#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5000#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5001#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5002STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5003static inline unsigned int
5004rb_thread_shield_waiting(VALUE b)
5005{
5006 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5007}
5008
5009static inline void
5010rb_thread_shield_waiting_inc(VALUE b)
5011{
5012 unsigned int w = rb_thread_shield_waiting(b);
5013 w++;
5014 if (w > THREAD_SHIELD_WAITING_MAX)
5015 rb_raise(rb_eRuntimeError, "waiting count overflow");
5016 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5017 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5018}
5019
5020static inline void
5021rb_thread_shield_waiting_dec(VALUE b)
5022{
5023 unsigned int w = rb_thread_shield_waiting(b);
5024 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5025 w--;
5026 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5027 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5028}
5029
5030VALUE
5031rb_thread_shield_new(void)
5032{
5033 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5034 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5035 return thread_shield;
5036}
5037
5038bool
5039rb_thread_shield_owned(VALUE self)
5040{
5041 VALUE mutex = GetThreadShieldPtr(self);
5042 if (!mutex) return false;
5043
5044 rb_mutex_t *m = mutex_ptr(mutex);
5045
5046 return m->fiber == GET_EC()->fiber_ptr;
5047}
5048
5049/*
5050 * Wait a thread shield.
5051 *
5052 * Returns
5053 * true: acquired the thread shield
5054 * false: the thread shield was destroyed and no other threads waiting
5055 * nil: the thread shield was destroyed but still in use
5056 */
5057VALUE
5058rb_thread_shield_wait(VALUE self)
5059{
5060 VALUE mutex = GetThreadShieldPtr(self);
5061 rb_mutex_t *m;
5062
5063 if (!mutex) return Qfalse;
5064 m = mutex_ptr(mutex);
5065 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5066 rb_thread_shield_waiting_inc(self);
5067 rb_mutex_lock(mutex);
5068 rb_thread_shield_waiting_dec(self);
5069 if (DATA_PTR(self)) return Qtrue;
5070 rb_mutex_unlock(mutex);
5071 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5072}
5073
5074static VALUE
5075thread_shield_get_mutex(VALUE self)
5076{
5077 VALUE mutex = GetThreadShieldPtr(self);
5078 if (!mutex)
5079 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5080 return mutex;
5081}
5082
5083/*
5084 * Release a thread shield, and return true if it has waiting threads.
5085 */
5086VALUE
5087rb_thread_shield_release(VALUE self)
5088{
5089 VALUE mutex = thread_shield_get_mutex(self);
5090 rb_mutex_unlock(mutex);
5091 return RBOOL(rb_thread_shield_waiting(self) > 0);
5092}
5093
5094/*
5095 * Release and destroy a thread shield, and return true if it has waiting threads.
5096 */
5097VALUE
5098rb_thread_shield_destroy(VALUE self)
5099{
5100 VALUE mutex = thread_shield_get_mutex(self);
5101 DATA_PTR(self) = 0;
5102 rb_mutex_unlock(mutex);
5103 return RBOOL(rb_thread_shield_waiting(self) > 0);
5104}
5105
5106static VALUE
5107threadptr_recursive_hash(rb_thread_t *th)
5108{
5109 return th->ec->local_storage_recursive_hash;
5110}
5111
5112static void
5113threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5114{
5115 th->ec->local_storage_recursive_hash = hash;
5116}
5117
5119
5120/*
5121 * Returns the current "recursive list" used to detect recursion.
5122 * This list is a hash table, unique for the current thread and for
5123 * the current __callee__.
5124 */
5125
5126static VALUE
5127recursive_list_access(VALUE sym)
5128{
5129 rb_thread_t *th = GET_THREAD();
5130 VALUE hash = threadptr_recursive_hash(th);
5131 VALUE list;
5132 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5133 hash = rb_ident_hash_new();
5134 threadptr_recursive_hash_set(th, hash);
5135 list = Qnil;
5136 }
5137 else {
5138 list = rb_hash_aref(hash, sym);
5139 }
5140 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5141 list = rb_ident_hash_new();
5142 rb_hash_aset(hash, sym, list);
5143 }
5144 return list;
5145}
5146
5147/*
5148 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5149 * in the recursion list.
5150 * Assumes the recursion list is valid.
5151 */
5152
5153static bool
5154recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5155{
5156#if SIZEOF_LONG == SIZEOF_VOIDP
5157 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5158#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5159 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5160 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5161#endif
5162
5163 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5164 if (UNDEF_P(pair_list))
5165 return false;
5166 if (paired_obj_id) {
5167 if (!RB_TYPE_P(pair_list, T_HASH)) {
5168 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5169 return false;
5170 }
5171 else {
5172 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5173 return false;
5174 }
5175 }
5176 return true;
5177}
5178
5179/*
5180 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5181 * For a single obj, it sets list[obj] to Qtrue.
5182 * For a pair, it sets list[obj] to paired_obj_id if possible,
5183 * otherwise list[obj] becomes a hash like:
5184 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5185 * Assumes the recursion list is valid.
5186 */
5187
5188static void
5189recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5190{
5191 VALUE pair_list;
5192
5193 if (!paired_obj) {
5194 rb_hash_aset(list, obj, Qtrue);
5195 }
5196 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5197 rb_hash_aset(list, obj, paired_obj);
5198 }
5199 else {
5200 if (!RB_TYPE_P(pair_list, T_HASH)){
5201 VALUE other_paired_obj = pair_list;
5202 pair_list = rb_hash_new();
5203 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5204 rb_hash_aset(list, obj, pair_list);
5205 }
5206 rb_hash_aset(pair_list, paired_obj, Qtrue);
5207 }
5208}
5209
5210/*
5211 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5212 * For a pair, if list[obj] is a hash, then paired_obj_id is
5213 * removed from the hash and no attempt is made to simplify
5214 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5215 * Assumes the recursion list is valid.
5216 */
5217
5218static int
5219recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5220{
5221 if (paired_obj) {
5222 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5223 if (UNDEF_P(pair_list)) {
5224 return 0;
5225 }
5226 if (RB_TYPE_P(pair_list, T_HASH)) {
5227 rb_hash_delete_entry(pair_list, paired_obj);
5228 if (!RHASH_EMPTY_P(pair_list)) {
5229 return 1; /* keep hash until is empty */
5230 }
5231 }
5232 }
5233 rb_hash_delete_entry(list, obj);
5234 return 1;
5235}
5237struct exec_recursive_params {
5238 VALUE (*func) (VALUE, VALUE, int);
5239 VALUE list;
5240 VALUE obj;
5241 VALUE pairid;
5242 VALUE arg;
5243};
5244
5245static VALUE
5246exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5247{
5248 struct exec_recursive_params *p = (void *)data;
5249 return (*p->func)(p->obj, p->arg, FALSE);
5250}
5251
5252/*
5253 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5254 * current method is called recursively on obj, or on the pair <obj, pairid>
5255 * If outer is 0, then the innermost func will be called with recursive set
5256 * to true, otherwise the outermost func will be called. In the latter case,
5257 * all inner func are short-circuited by throw.
5258 * Implementation details: the value thrown is the recursive list which is
5259 * proper to the current method and unlikely to be caught anywhere else.
5260 * list[recursive_key] is used as a flag for the outermost call.
5261 */
5262
5263static VALUE
5264exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5265{
5266 VALUE result = Qundef;
5267 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5268 struct exec_recursive_params p;
5269 int outermost;
5270 p.list = recursive_list_access(sym);
5271 p.obj = obj;
5272 p.pairid = pairid;
5273 p.arg = arg;
5274 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5275
5276 if (recursive_check(p.list, p.obj, pairid)) {
5277 if (outer && !outermost) {
5278 rb_throw_obj(p.list, p.list);
5279 }
5280 return (*func)(obj, arg, TRUE);
5281 }
5282 else {
5283 enum ruby_tag_type state;
5284
5285 p.func = func;
5286
5287 if (outermost) {
5288 recursive_push(p.list, ID2SYM(recursive_key), 0);
5289 recursive_push(p.list, p.obj, p.pairid);
5290 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5291 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5292 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5293 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5294 if (result == p.list) {
5295 result = (*func)(obj, arg, TRUE);
5296 }
5297 }
5298 else {
5299 volatile VALUE ret = Qundef;
5300 recursive_push(p.list, p.obj, p.pairid);
5301 EC_PUSH_TAG(GET_EC());
5302 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5303 ret = (*func)(obj, arg, FALSE);
5304 }
5305 EC_POP_TAG();
5306 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5307 goto invalid;
5308 }
5309 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5310 result = ret;
5311 }
5312 }
5313 *(volatile struct exec_recursive_params *)&p;
5314 return result;
5315
5316 invalid:
5317 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5318 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5319 sym, rb_thread_current());
5321}
5322
5323/*
5324 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5325 * current method is called recursively on obj
5326 */
5327
5328VALUE
5329rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5330{
5331 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5332}
5333
5334/*
5335 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5336 * current method is called recursively on the ordered pair <obj, paired_obj>
5337 */
5338
5339VALUE
5340rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5341{
5342 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5343}
5344
5345/*
5346 * If recursion is detected on the current method and obj, the outermost
5347 * func will be called with (obj, arg, true). All inner func will be
5348 * short-circuited using throw.
5349 */
5350
5351VALUE
5352rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5353{
5354 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5355}
5356
5357VALUE
5358rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5359{
5360 return exec_recursive(func, obj, 0, arg, 1, mid);
5361}
5362
5363/*
5364 * If recursion is detected on the current method, obj and paired_obj,
5365 * the outermost func will be called with (obj, arg, true). All inner
5366 * func will be short-circuited using throw.
5367 */
5368
5369VALUE
5370rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5371{
5372 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5373}
5374
5375/*
5376 * call-seq:
5377 * thread.backtrace -> array or nil
5378 *
5379 * Returns the current backtrace of the target thread.
5380 *
5381 */
5382
5383static VALUE
5384rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5385{
5386 return rb_vm_thread_backtrace(argc, argv, thval);
5387}
5388
5389/* call-seq:
5390 * thread.backtrace_locations(*args) -> array or nil
5391 *
5392 * Returns the execution stack for the target thread---an array containing
5393 * backtrace location objects.
5394 *
5395 * See Thread::Backtrace::Location for more information.
5396 *
5397 * This method behaves similarly to Kernel#caller_locations except it applies
5398 * to a specific thread.
5399 */
5400static VALUE
5401rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5402{
5403 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5404}
5405
5406void
5407Init_Thread_Mutex(void)
5408{
5409 rb_thread_t *th = GET_THREAD();
5410
5411 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5412 rb_native_mutex_initialize(&th->interrupt_lock);
5413}
5414
5415/*
5416 * Document-class: ThreadError
5417 *
5418 * Raised when an invalid operation is attempted on a thread.
5419 *
5420 * For example, when no other thread has been started:
5421 *
5422 * Thread.stop
5423 *
5424 * This will raises the following exception:
5425 *
5426 * ThreadError: stopping only thread
5427 * note: use sleep to stop forever
5428 */
5429
5430void
5431Init_Thread(void)
5432{
5433 rb_thread_t *th = GET_THREAD();
5434
5435 sym_never = ID2SYM(rb_intern_const("never"));
5436 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5437 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5438
5439 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5440 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5441 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5442 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5443 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5444 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5445 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5446 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5447 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5448 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5449 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5450 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5451 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5452 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5453 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5454 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5455 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5456 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5457 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5458
5459 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5460 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5461 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5462 rb_define_method(rb_cThread, "value", thread_value, 0);
5463 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5464 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5465 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5466 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5467 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5468 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5469 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5470 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5471 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5472 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5473 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5474 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5475 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5476 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5477 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5478 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5479 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5480 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5481 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5482 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5483 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5484 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5485 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5486 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5487 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5488 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5489
5490 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5491 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5492 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5493 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5494 rb_define_alias(rb_cThread, "inspect", "to_s");
5495
5496 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5497 "stream closed in another thread");
5498
5499 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5500 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5501 rb_define_method(cThGroup, "list", thgroup_list, 0);
5502 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5503 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5504 rb_define_method(cThGroup, "add", thgroup_add, 1);
5505
5506 const char * ptr = getenv("RUBY_THREAD_TIMESLICE");
5507
5508 if (ptr) {
5509 long quantum = strtol(ptr, NULL, 0);
5510 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5511 thread_default_quantum_ms = (uint32_t)quantum;
5512 }
5513 else if (0) {
5514 fprintf(stderr, "Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5515 }
5516 }
5517
5518 {
5519 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5520 rb_define_const(cThGroup, "Default", th->thgroup);
5521 }
5522
5524
5525 /* init thread core */
5526 {
5527 /* main thread setting */
5528 {
5529 /* acquire global vm lock */
5530#ifdef HAVE_PTHREAD_NP_H
5531 VM_ASSERT(TH_SCHED(th)->running == th);
5532#endif
5533 // thread_sched_to_running() should not be called because
5534 // it assumes blocked by thread_sched_to_waiting().
5535 // thread_sched_to_running(sched, th);
5536
5537 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5538 th->pending_interrupt_queue_checked = 0;
5539 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5540 }
5541 }
5542
5543 rb_thread_create_timer_thread();
5544
5545 Init_thread_sync();
5546
5547 // TODO: Suppress unused function warning for now
5548 // if (0) rb_thread_sched_destroy(NULL);
5549}
5550
5553{
5554 rb_thread_t *th = ruby_thread_from_native();
5555
5556 return th != 0;
5557}
5558
5559#ifdef NON_SCALAR_THREAD_ID
5560 #define thread_id_str(th) (NULL)
5561#else
5562 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5563#endif
5564
5565static void
5566debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5567{
5568 rb_thread_t *th = 0;
5569 VALUE sep = rb_str_new_cstr("\n ");
5570
5571 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5572 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5573 (void *)GET_THREAD(), (void *)r->threads.main);
5574
5575 ccan_list_for_each(&r->threads.set, th, lt_node) {
5576 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5577 "native:%p int:%u",
5578 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5579
5580 if (th->locking_mutex) {
5581 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5582 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5583 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5584 }
5585
5586 {
5587 struct rb_waiting_list *list = th->join_list;
5588 while (list) {
5589 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5590 list = list->next;
5591 }
5592 }
5593 rb_str_catf(msg, "\n ");
5594 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5595 rb_str_catf(msg, "\n");
5596 }
5597}
5598
5599static void
5600rb_check_deadlock(rb_ractor_t *r)
5601{
5602 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5603
5604#ifdef RUBY_THREAD_PTHREAD_H
5605 if (r->threads.sched.readyq_cnt > 0) return;
5606#endif
5607
5608 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5609 int ltnum = rb_ractor_living_thread_num(r);
5610
5611 if (ltnum > sleeper_num) return;
5612 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5613
5614 int found = 0;
5615 rb_thread_t *th = NULL;
5616
5617 ccan_list_for_each(&r->threads.set, th, lt_node) {
5618 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5619 found = 1;
5620 }
5621 else if (th->locking_mutex) {
5622 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5623 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5624 found = 1;
5625 }
5626 }
5627 if (found)
5628 break;
5629 }
5630
5631 if (!found) {
5632 VALUE argv[2];
5633 argv[0] = rb_eFatal;
5634 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5635 debug_deadlock_check(r, argv[1]);
5636 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5637 rb_threadptr_raise(r->threads.main, 2, argv);
5638 }
5639}
5640
5641// Used for VM memsize reporting. Returns the size of a list of waiting_fd
5642// structs. Defined here because the struct definition lives here as well.
5643size_t
5644rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5645{
5646 struct waiting_fd *waitfd = 0;
5647 size_t size = 0;
5648
5649 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5650 size += sizeof(struct waiting_fd);
5651 }
5652
5653 return size;
5654}
5655
5656static void
5657update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5658{
5659 const rb_control_frame_t *cfp = GET_EC()->cfp;
5660 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5661 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5662 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5663 if (lines) {
5664 long line = rb_sourceline() - 1;
5665 long count;
5666 VALUE num;
5667 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5668 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5669 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5670 rb_ary_push(lines, LONG2FIX(line + 1));
5671 return;
5672 }
5673 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5674 return;
5675 }
5676 num = RARRAY_AREF(lines, line);
5677 if (!FIXNUM_P(num)) return;
5678 count = FIX2LONG(num) + 1;
5679 if (POSFIXABLE(count)) {
5680 RARRAY_ASET(lines, line, LONG2FIX(count));
5681 }
5682 }
5683 }
5684}
5685
5686static void
5687update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5688{
5689 const rb_control_frame_t *cfp = GET_EC()->cfp;
5690 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5691 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5692 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5693 if (branches) {
5694 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5695 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5696 VALUE counters = RARRAY_AREF(branches, 1);
5697 VALUE num = RARRAY_AREF(counters, idx);
5698 count = FIX2LONG(num) + 1;
5699 if (POSFIXABLE(count)) {
5700 RARRAY_ASET(counters, idx, LONG2FIX(count));
5701 }
5702 }
5703 }
5704}
5705
5706const rb_method_entry_t *
5707rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5708{
5709 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5710
5711 if (!me->def) return NULL; // negative cme
5712
5713 retry:
5714 switch (me->def->type) {
5715 case VM_METHOD_TYPE_ISEQ: {
5716 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5717 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5718 path = rb_iseq_path(iseq);
5719 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5720 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5721 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5722 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5723 break;
5724 }
5725 case VM_METHOD_TYPE_BMETHOD: {
5726 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5727 if (iseq) {
5728 rb_iseq_location_t *loc;
5729 rb_iseq_check(iseq);
5730 path = rb_iseq_path(iseq);
5731 loc = &ISEQ_BODY(iseq)->location;
5732 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5733 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5734 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5735 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5736 break;
5737 }
5738 return NULL;
5739 }
5740 case VM_METHOD_TYPE_ALIAS:
5741 me = me->def->body.alias.original_me;
5742 goto retry;
5743 case VM_METHOD_TYPE_REFINED:
5744 me = me->def->body.refined.orig_me;
5745 if (!me) return NULL;
5746 goto retry;
5747 default:
5748 return NULL;
5749 }
5750
5751 /* found */
5752 if (RB_TYPE_P(path, T_ARRAY)) {
5753 path = rb_ary_entry(path, 1);
5754 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5755 }
5756 if (resolved_location) {
5757 resolved_location[0] = path;
5758 resolved_location[1] = beg_pos_lineno;
5759 resolved_location[2] = beg_pos_column;
5760 resolved_location[3] = end_pos_lineno;
5761 resolved_location[4] = end_pos_column;
5762 }
5763 return me;
5764}
5765
5766static void
5767update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5768{
5769 const rb_control_frame_t *cfp = GET_EC()->cfp;
5770 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5771 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5772 VALUE rcount;
5773 long count;
5774
5775 me = rb_resolve_me_location(me, 0);
5776 if (!me) return;
5777
5778 rcount = rb_hash_aref(me2counter, (VALUE) me);
5779 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5780 if (POSFIXABLE(count)) {
5781 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5782 }
5783}
5784
5785VALUE
5786rb_get_coverages(void)
5787{
5788 return GET_VM()->coverages;
5789}
5790
5791int
5792rb_get_coverage_mode(void)
5793{
5794 return GET_VM()->coverage_mode;
5795}
5796
5797void
5798rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5799{
5800 GET_VM()->coverages = coverages;
5801 GET_VM()->me2counter = me2counter;
5802 GET_VM()->coverage_mode = mode;
5803}
5804
5805void
5806rb_resume_coverages(void)
5807{
5808 int mode = GET_VM()->coverage_mode;
5809 VALUE me2counter = GET_VM()->me2counter;
5810 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5811 if (mode & COVERAGE_TARGET_BRANCHES) {
5812 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5813 }
5814 if (mode & COVERAGE_TARGET_METHODS) {
5815 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5816 }
5817}
5818
5819void
5820rb_suspend_coverages(void)
5821{
5822 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5823 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5824 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5825 }
5826 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5827 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5828 }
5829}
5830
5831/* Make coverage arrays empty so old covered files are no longer tracked. */
5832void
5833rb_reset_coverages(void)
5834{
5835 rb_clear_coverages();
5836 rb_iseq_remove_coverage_all();
5837 GET_VM()->coverages = Qfalse;
5838}
5839
5840VALUE
5841rb_default_coverage(int n)
5842{
5843 VALUE coverage = rb_ary_hidden_new_fill(3);
5844 VALUE lines = Qfalse, branches = Qfalse;
5845 int mode = GET_VM()->coverage_mode;
5846
5847 if (mode & COVERAGE_TARGET_LINES) {
5848 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5849 }
5850 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5851
5852 if (mode & COVERAGE_TARGET_BRANCHES) {
5853 branches = rb_ary_hidden_new_fill(2);
5854 /* internal data structures for branch coverage:
5855 *
5856 * { branch base node =>
5857 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5858 * branch target id =>
5859 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5860 * ...
5861 * }],
5862 * ...
5863 * }
5864 *
5865 * Example:
5866 * { NODE_CASE =>
5867 * [1, 0, 4, 3, {
5868 * NODE_WHEN => [2, 8, 2, 9, 0],
5869 * NODE_WHEN => [3, 8, 3, 9, 1],
5870 * ...
5871 * }],
5872 * ...
5873 * }
5874 */
5875 VALUE structure = rb_hash_new();
5876 rb_obj_hide(structure);
5877 RARRAY_ASET(branches, 0, structure);
5878 /* branch execution counters */
5879 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5880 }
5881 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5882
5883 return coverage;
5884}
5885
5886static VALUE
5887uninterruptible_exit(VALUE v)
5888{
5889 rb_thread_t *cur_th = GET_THREAD();
5890 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5891
5892 cur_th->pending_interrupt_queue_checked = 0;
5893 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5894 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5895 }
5896 return Qnil;
5897}
5898
5899VALUE
5900rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5901{
5902 VALUE interrupt_mask = rb_ident_hash_new();
5903 rb_thread_t *cur_th = GET_THREAD();
5904
5905 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5906 OBJ_FREEZE(interrupt_mask);
5907 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5908
5909 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5910
5911 RUBY_VM_CHECK_INTS(cur_th->ec);
5912 return ret;
5913}
5914
5915static void
5916thread_specific_storage_alloc(rb_thread_t *th)
5917{
5918 VM_ASSERT(th->specific_storage == NULL);
5919
5920 if (UNLIKELY(specific_key_count > 0)) {
5921 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5922 }
5923}
5924
5925rb_internal_thread_specific_key_t
5927{
5928 rb_vm_t *vm = GET_VM();
5929
5930 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
5931 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
5932 }
5933 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
5934 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5935 }
5936 else {
5937 rb_internal_thread_specific_key_t key = specific_key_count++;
5938
5939 if (key == 0) {
5940 // allocate
5941 rb_ractor_t *cr = GET_RACTOR();
5942 rb_thread_t *th;
5943
5944 ccan_list_for_each(&cr->threads.set, th, lt_node) {
5945 thread_specific_storage_alloc(th);
5946 }
5947 }
5948 return key;
5949 }
5950}
5951
5952// async and native thread safe.
5953void *
5954rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
5955{
5956 rb_thread_t *th = DATA_PTR(thread_val);
5957
5958 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5959 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5960 VM_ASSERT(th->specific_storage);
5961
5962 return th->specific_storage[key];
5963}
5964
5965// async and native thread safe.
5966void
5967rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
5968{
5969 rb_thread_t *th = DATA_PTR(thread_val);
5970
5971 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5972 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5973 VM_ASSERT(th->specific_storage);
5974
5975 th->specific_storage[key] = data;
5976}
5977
5978// interrupt_exec
5981 struct ccan_list_node node;
5982
5983 rb_interrupt_exec_func_t *func;
5984 void *data;
5985 enum rb_interrupt_exec_flag flags;
5986};
5987
5988void
5989rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
5990{
5991 struct rb_interrupt_exec_task *task;
5992
5993 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
5994 if (task->flags & rb_interrupt_exec_flag_value_data) {
5995 rb_gc_mark((VALUE)task->data);
5996 }
5997 }
5998}
5999
6000// native thread safe
6001// th should be available
6002void
6003rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6004{
6005 // should not use ALLOC
6007 *task = (struct rb_interrupt_exec_task) {
6008 .flags = flags,
6009 .func = func,
6010 .data = data,
6011 };
6012
6013 rb_native_mutex_lock(&th->interrupt_lock);
6014 {
6015 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6016 threadptr_interrupt_locked(th, true);
6017 }
6018 rb_native_mutex_unlock(&th->interrupt_lock);
6019}
6020
6021static void
6022threadptr_interrupt_exec_exec(rb_thread_t *th)
6023{
6024 while (1) {
6025 struct rb_interrupt_exec_task *task;
6026
6027 rb_native_mutex_lock(&th->interrupt_lock);
6028 {
6029 task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6030 }
6031 rb_native_mutex_unlock(&th->interrupt_lock);
6032
6033 if (task) {
6034 (*task->func)(task->data);
6035 ruby_xfree(task);
6036 }
6037 else {
6038 break;
6039 }
6040 }
6041}
6042
6043static void
6044threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6045{
6046 rb_native_mutex_lock(&th->interrupt_lock);
6047 {
6048 struct rb_interrupt_exec_task *task;
6049
6050 while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6051 ruby_xfree(task);
6052 }
6053 }
6054 rb_native_mutex_unlock(&th->interrupt_lock);
6055}
6058 rb_interrupt_exec_func_t *func;
6059 void *data;
6060};
6061
6062static VALUE
6063interrupt_ractor_new_thread_func(void *data)
6064{
6066 ruby_xfree(data);
6067
6068 d.func(d.data);
6069 return Qnil;
6070}
6071
6072static VALUE
6073interrupt_ractor_func(void *data)
6074{
6075 rb_thread_create(interrupt_ractor_new_thread_func, data);
6076 return Qnil;
6077}
6078
6079// native thread safe
6080// func/data should be native thread safe
6081void
6082rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6083 rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6084{
6086
6087 d->func = func;
6088 d->data = data;
6089 rb_thread_t *main_th = target_r->threads.main;
6090 rb_threadptr_interrupt_exec(main_th, interrupt_ractor_func, d, flags);
6091
6092 // TODO MEMO: we can create a new thread in a ractor, but not sure how to do that now.
6093}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:313
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:980
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2348
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1159
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:949
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:936
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:137
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:287
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:486
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:675
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1423
VALUE rb_eIOError
IOError exception.
Definition io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1427
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:4117
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1468
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1422
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:954
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4353
VALUE rb_eSignal
SignalException exception.
Definition error.c:1425
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2097
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:104
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_cThread
Thread class.
Definition vm.c:530
VALUE rb_cModule
Module class.
Definition object.c:67
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3715
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:865
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:839
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1787
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1470
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3937
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1449
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3571
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2764
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:3003
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1379
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition thread.c:2705
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1411
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:2915
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4803
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1432
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:2906
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:2859
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1386
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4798
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2982
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3844
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3719
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1480
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:2868
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1455
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:2015
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2964
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1924
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1415
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:373
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1887
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12522
ID rb_to_id(VALUE str)
Definition string.c:12512
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:190
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:5953
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:5966
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:5925
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1537
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1902
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition thread.h:73
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1676
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1354
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2493
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:449
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5551
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:753
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:228
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:392
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:189
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:411
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4333
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:63
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:296
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:302
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:284
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:290
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376