Ruby  3.4.0dev (2024-11-05 revision 348a53415339076afc4a02fcd09f3ae36e9c4c61)
thread.c (348a53415339076afc4a02fcd09f3ae36e9c4c61)
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author$
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23  model 4: M:N User:Native threads with Global VM lock
24  Combination of model 1 and 2
25 
26  model 5: M:N User:Native thread with fine grain lock
27  Combination of model 1 and 3
28 
29 ------------------------------------------------------------------------
30 
31  model 2:
32  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33  When thread scheduling, running thread release GVL. If running thread
34  try blocking operation, this thread must release GVL and another
35  thread can continue this flow. After blocking operation, thread
36  must check interrupt (RUBY_VM_CHECK_INTS).
37 
38  Every VM can run parallel.
39 
40  Ruby threads are scheduled by OS thread scheduler.
41 
42 ------------------------------------------------------------------------
43 
44  model 3:
45  Every threads run concurrent or parallel and to access shared object
46  exclusive access control is needed. For example, to access String
47  object or Array object, fine grain lock must be locked every time.
48  */
49 
50 
51 /*
52  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53  * 2.15 or later and set _FORTIFY_SOURCE > 0.
54  * However, the implementation is wrong. Even though Linux's select(2)
55  * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57  * it doesn't work correctly and makes program abort. Therefore we need to
58  * disable FORTIFY_SOURCE until glibc fixes it.
59  */
60 #undef _FORTIFY_SOURCE
61 #undef __USE_FORTIFY_LEVEL
62 #define __USE_FORTIFY_LEVEL 0
63 
64 /* for model 2 */
65 
66 #include "ruby/internal/config.h"
67 
68 #ifdef __linux__
69 // Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70 # include <alloca.h>
71 #endif
72 
73 #define TH_SCHED(th) (&(th)->ractor->threads.sched)
74 
75 #include "eval_intern.h"
76 #include "hrtime.h"
77 #include "internal.h"
78 #include "internal/class.h"
79 #include "internal/cont.h"
80 #include "internal/error.h"
81 #include "internal/gc.h"
82 #include "internal/hash.h"
83 #include "internal/io.h"
84 #include "internal/object.h"
85 #include "internal/proc.h"
86 #include "ruby/fiber/scheduler.h"
87 #include "internal/signal.h"
88 #include "internal/thread.h"
89 #include "internal/time.h"
90 #include "internal/warnings.h"
91 #include "iseq.h"
92 #include "rjit.h"
93 #include "ruby/debug.h"
94 #include "ruby/io.h"
95 #include "ruby/thread.h"
96 #include "ruby/thread_native.h"
97 #include "timev.h"
98 #include "vm_core.h"
99 #include "ractor_core.h"
100 #include "vm_debug.h"
101 #include "vm_sync.h"
102 
103 #if USE_RJIT && defined(HAVE_SYS_WAIT_H)
104 #include <sys/wait.h>
105 #endif
106 
107 #ifndef USE_NATIVE_THREAD_PRIORITY
108 #define USE_NATIVE_THREAD_PRIORITY 0
109 #define RUBY_THREAD_PRIORITY_MAX 3
110 #define RUBY_THREAD_PRIORITY_MIN -3
111 #endif
112 
113 static VALUE rb_cThreadShield;
114 
115 static VALUE sym_immediate;
116 static VALUE sym_on_blocking;
117 static VALUE sym_never;
118 
119 #define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
120 #define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
121 
122 static inline VALUE
123 rb_thread_local_storage(VALUE thread)
124 {
125  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
126  rb_ivar_set(thread, idLocals, rb_hash_new());
127  RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
128  }
129  return rb_ivar_get(thread, idLocals);
130 }
131 
132 enum SLEEP_FLAGS {
133  SLEEP_DEADLOCKABLE = 0x01,
134  SLEEP_SPURIOUS_CHECK = 0x02,
135  SLEEP_ALLOW_SPURIOUS = 0x04,
136  SLEEP_NO_CHECKINTS = 0x08,
137 };
138 
139 static void sleep_forever(rb_thread_t *th, unsigned int fl);
140 static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
141 
142 static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
143 static int rb_threadptr_dead(rb_thread_t *th);
144 static void rb_check_deadlock(rb_ractor_t *r);
145 static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
146 static const char *thread_status_name(rb_thread_t *th, int detail);
147 static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
148 NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
149 MAYBE_UNUSED(static int consume_communication_pipe(int fd));
150 
151 static volatile int system_working = 1;
152 static rb_internal_thread_specific_key_t specific_key_count;
153 
154 struct waiting_fd {
155  struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
156  rb_thread_t *th;
157  int fd;
158  struct rb_io_close_wait_list *busy;
159 };
160 
161 /********************************************************************************/
162 
163 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
164 
166  enum rb_thread_status prev_status;
167 };
168 
169 static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
170 static void unblock_function_clear(rb_thread_t *th);
171 
172 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
173  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
174 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
175 
176 #define THREAD_BLOCKING_BEGIN(th) do { \
177  struct rb_thread_sched * const sched = TH_SCHED(th); \
178  RB_VM_SAVE_MACHINE_CONTEXT(th); \
179  thread_sched_to_waiting((sched), (th));
180 
181 #define THREAD_BLOCKING_END(th) \
182  thread_sched_to_running((sched), (th)); \
183  rb_ractor_thread_switch(th->ractor, th); \
184 } while(0)
185 
186 #ifdef __GNUC__
187 #ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
188 #define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
189 #else
190 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
191 #endif
192 #else
193 #define only_if_constant(expr, notconst) notconst
194 #endif
195 #define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
196  struct rb_blocking_region_buffer __region; \
197  if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
198  /* always return true unless fail_if_interrupted */ \
199  !only_if_constant(fail_if_interrupted, TRUE)) { \
200  /* Important that this is inlined into the macro, and not part of \
201  * blocking_region_begin - see bug #20493 */ \
202  RB_VM_SAVE_MACHINE_CONTEXT(th); \
203  thread_sched_to_waiting(TH_SCHED(th), th); \
204  exec; \
205  blocking_region_end(th, &__region); \
206  }; \
207 } while(0)
208 
209 /*
210  * returns true if this thread was spuriously interrupted, false otherwise
211  * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
212  */
213 #define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
214 static inline int
215 vm_check_ints_blocking(rb_execution_context_t *ec)
216 {
217  rb_thread_t *th = rb_ec_thread_ptr(ec);
218 
219  if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
220  if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
221  }
222  else {
223  th->pending_interrupt_queue_checked = 0;
224  RUBY_VM_SET_INTERRUPT(ec);
225  }
226  return rb_threadptr_execute_interrupts(th, 1);
227 }
228 
229 int
230 rb_vm_check_ints_blocking(rb_execution_context_t *ec)
231 {
232  return vm_check_ints_blocking(ec);
233 }
234 
235 /*
236  * poll() is supported by many OSes, but so far Linux is the only
237  * one we know of that supports using poll() in all places select()
238  * would work.
239  */
240 #if defined(HAVE_POLL)
241 # if defined(__linux__)
242 # define USE_POLL
243 # endif
244 # if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
245 # define USE_POLL
246  /* FreeBSD does not set POLLOUT when POLLHUP happens */
247 # define POLLERR_SET (POLLHUP | POLLERR)
248 # endif
249 #endif
250 
251 static void
252 timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
253  const struct timeval *timeout)
254 {
255  if (timeout) {
256  *rel = rb_timeval2hrtime(timeout);
257  *end = rb_hrtime_add(rb_hrtime_now(), *rel);
258  *to = rel;
259  }
260  else {
261  *to = 0;
262  }
263 }
264 
265 MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
266 MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
267 MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
268 
269 #include THREAD_IMPL_SRC
270 
271 /*
272  * TODO: somebody with win32 knowledge should be able to get rid of
273  * timer-thread by busy-waiting on signals. And it should be possible
274  * to make the GVL in thread_pthread.c be platform-independent.
275  */
276 #ifndef BUSY_WAIT_SIGNALS
277 # define BUSY_WAIT_SIGNALS (0)
278 #endif
279 
280 #ifndef USE_EVENTFD
281 # define USE_EVENTFD (0)
282 #endif
283 
284 #include "thread_sync.c"
285 
286 void
287 rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
288 {
290 }
291 
292 void
293 rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
294 {
296 }
297 
298 void
299 rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
300 {
301  rb_native_mutex_lock(lock);
302 }
303 
304 void
305 rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
306 {
308 }
309 
310 static int
311 unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
312 {
313  do {
314  if (fail_if_interrupted) {
315  if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
316  return FALSE;
317  }
318  }
319  else {
320  RUBY_VM_CHECK_INTS(th->ec);
321  }
322 
323  rb_native_mutex_lock(&th->interrupt_lock);
324  } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
325  (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
326 
327  VM_ASSERT(th->unblock.func == NULL);
328 
329  th->unblock.func = func;
330  th->unblock.arg = arg;
331  rb_native_mutex_unlock(&th->interrupt_lock);
332 
333  return TRUE;
334 }
335 
336 static void
337 unblock_function_clear(rb_thread_t *th)
338 {
339  rb_native_mutex_lock(&th->interrupt_lock);
340  th->unblock.func = 0;
341  rb_native_mutex_unlock(&th->interrupt_lock);
342 }
343 
344 static void
345 rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
346 {
347  RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
348 
349  rb_native_mutex_lock(&th->interrupt_lock);
350  {
351  if (trap) {
352  RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
353  }
354  else {
355  RUBY_VM_SET_INTERRUPT(th->ec);
356  }
357 
358  if (th->unblock.func != NULL) {
359  (th->unblock.func)(th->unblock.arg);
360  }
361  else {
362  /* none */
363  }
364  }
365  rb_native_mutex_unlock(&th->interrupt_lock);
366 }
367 
368 void
369 rb_threadptr_interrupt(rb_thread_t *th)
370 {
371  RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
372  rb_threadptr_interrupt_common(th, 0);
373 }
374 
375 static void
376 threadptr_trap_interrupt(rb_thread_t *th)
377 {
378  rb_threadptr_interrupt_common(th, 1);
379 }
380 
381 static void
382 terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
383 {
384  rb_thread_t *th = 0;
385 
386  ccan_list_for_each(&r->threads.set, th, lt_node) {
387  if (th != main_thread) {
388  RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
389 
390  rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
391  rb_threadptr_interrupt(th);
392 
393  RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
394  }
395  else {
396  RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
397  }
398  }
399 }
400 
401 static void
402 rb_threadptr_join_list_wakeup(rb_thread_t *thread)
403 {
404  while (thread->join_list) {
405  struct rb_waiting_list *join_list = thread->join_list;
406 
407  // Consume the entry from the join list:
408  thread->join_list = join_list->next;
409 
410  rb_thread_t *target_thread = join_list->thread;
411 
412  if (target_thread->scheduler != Qnil && join_list->fiber) {
413  rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
414  }
415  else {
416  rb_threadptr_interrupt(target_thread);
417 
418  switch (target_thread->status) {
419  case THREAD_STOPPED:
420  case THREAD_STOPPED_FOREVER:
421  target_thread->status = THREAD_RUNNABLE;
422  break;
423  default:
424  break;
425  }
426  }
427  }
428 }
429 
430 void
431 rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
432 {
433  while (th->keeping_mutexes) {
434  rb_mutex_t *mutex = th->keeping_mutexes;
435  th->keeping_mutexes = mutex->next_mutex;
436 
437  // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
438 
439  const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
440  if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
441  }
442 }
443 
444 void
445 rb_thread_terminate_all(rb_thread_t *th)
446 {
447  rb_ractor_t *cr = th->ractor;
448  rb_execution_context_t * volatile ec = th->ec;
449  volatile int sleeping = 0;
450 
451  if (cr->threads.main != th) {
452  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
453  (void *)cr->threads.main, (void *)th);
454  }
455 
456  /* unlock all locking mutexes */
457  rb_threadptr_unlock_all_locking_mutexes(th);
458 
459  EC_PUSH_TAG(ec);
460  if (EC_EXEC_TAG() == TAG_NONE) {
461  retry:
462  RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
463 
464  terminate_all(cr, th);
465 
466  while (rb_ractor_living_thread_num(cr) > 1) {
467  rb_hrtime_t rel = RB_HRTIME_PER_SEC;
468  /*q
469  * Thread exiting routine in thread_start_func_2 notify
470  * me when the last sub-thread exit.
471  */
472  sleeping = 1;
473  native_sleep(th, &rel);
474  RUBY_VM_CHECK_INTS_BLOCKING(ec);
475  sleeping = 0;
476  }
477  }
478  else {
479  /*
480  * When caught an exception (e.g. Ctrl+C), let's broadcast
481  * kill request again to ensure killing all threads even
482  * if they are blocked on sleep, mutex, etc.
483  */
484  if (sleeping) {
485  sleeping = 0;
486  goto retry;
487  }
488  }
489  EC_POP_TAG();
490 }
491 
492 void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
493 
494 static void
495 thread_cleanup_func_before_exec(void *th_ptr)
496 {
497  rb_thread_t *th = th_ptr;
498  th->status = THREAD_KILLED;
499 
500  // The thread stack doesn't exist in the forked process:
501  th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
502 
503  rb_threadptr_root_fiber_terminate(th);
504 }
505 
506 static void
507 thread_cleanup_func(void *th_ptr, int atfork)
508 {
509  rb_thread_t *th = th_ptr;
510 
511  th->locking_mutex = Qfalse;
512  thread_cleanup_func_before_exec(th_ptr);
513 
514  /*
515  * Unfortunately, we can't release native threading resource at fork
516  * because libc may have unstable locking state therefore touching
517  * a threading resource may cause a deadlock.
518  */
519  if (atfork) {
520  th->nt = NULL;
521  return;
522  }
523 
524  rb_native_mutex_destroy(&th->interrupt_lock);
525 }
526 
527 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
528 static VALUE rb_thread_to_s(VALUE thread);
529 
530 void
531 ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
532 {
533  native_thread_init_stack(th, local_in_parent_frame);
534 }
535 
536 const VALUE *
537 rb_vm_proc_local_ep(VALUE proc)
538 {
539  const VALUE *ep = vm_proc_ep(proc);
540 
541  if (ep) {
542  return rb_vm_ep_local_ep(ep);
543  }
544  else {
545  return NULL;
546  }
547 }
548 
549 // for ractor, defined in vm.c
550 VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
551  int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
552 
553 static VALUE
554 thread_do_start_proc(rb_thread_t *th)
555 {
556  VALUE args = th->invoke_arg.proc.args;
557  const VALUE *args_ptr;
558  int args_len;
559  VALUE procval = th->invoke_arg.proc.proc;
560  rb_proc_t *proc;
561  GetProcPtr(procval, proc);
562 
563  th->ec->errinfo = Qnil;
564  th->ec->root_lep = rb_vm_proc_local_ep(procval);
565  th->ec->root_svar = Qfalse;
566 
567  vm_check_ints_blocking(th->ec);
568 
569  if (th->invoke_type == thread_invoke_type_ractor_proc) {
570  VALUE self = rb_ractor_self(th->ractor);
571  VM_ASSERT(FIXNUM_P(args));
572  args_len = FIX2INT(args);
573  args_ptr = ALLOCA_N(VALUE, args_len);
574  rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
575  vm_check_ints_blocking(th->ec);
576 
577  return rb_vm_invoke_proc_with_self(
578  th->ec, proc, self,
579  args_len, args_ptr,
580  th->invoke_arg.proc.kw_splat,
581  VM_BLOCK_HANDLER_NONE
582  );
583  }
584  else {
585  args_len = RARRAY_LENINT(args);
586  if (args_len < 8) {
587  /* free proc.args if the length is enough small */
588  args_ptr = ALLOCA_N(VALUE, args_len);
589  MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
590  th->invoke_arg.proc.args = Qnil;
591  }
592  else {
593  args_ptr = RARRAY_CONST_PTR(args);
594  }
595 
596  vm_check_ints_blocking(th->ec);
597 
598  return rb_vm_invoke_proc(
599  th->ec, proc,
600  args_len, args_ptr,
601  th->invoke_arg.proc.kw_splat,
602  VM_BLOCK_HANDLER_NONE
603  );
604  }
605 }
606 
607 static VALUE
608 thread_do_start(rb_thread_t *th)
609 {
610  native_set_thread_name(th);
611  VALUE result = Qundef;
612 
613  switch (th->invoke_type) {
614  case thread_invoke_type_proc:
615  result = thread_do_start_proc(th);
616  break;
617 
618  case thread_invoke_type_ractor_proc:
619  result = thread_do_start_proc(th);
620  rb_ractor_atexit(th->ec, result);
621  break;
622 
623  case thread_invoke_type_func:
624  result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
625  break;
626 
627  case thread_invoke_type_none:
628  rb_bug("unreachable");
629  }
630 
631  return result;
632 }
633 
634 void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
635 
636 static int
637 thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
638 {
639  RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
640  VM_ASSERT(th != th->vm->ractor.main_thread);
641 
642  enum ruby_tag_type state;
643  VALUE errinfo = Qnil;
644  rb_thread_t *ractor_main_th = th->ractor->threads.main;
645 
646  // setup ractor
647  if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
648  RB_VM_LOCK();
649  {
650  rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
651  rb_ractor_t *r = th->ractor;
652  r->r_stdin = rb_io_prep_stdin();
653  r->r_stdout = rb_io_prep_stdout();
654  r->r_stderr = rb_io_prep_stderr();
655  }
656  RB_VM_UNLOCK();
657  }
658 
659  // Ensure that we are not joinable.
660  VM_ASSERT(UNDEF_P(th->value));
661 
662  int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
663  VALUE result = Qundef;
664 
665  EC_PUSH_TAG(th->ec);
666 
667  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
668  EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
669 
670  result = thread_do_start(th);
671  }
672 
673  if (!fiber_scheduler_closed) {
674  fiber_scheduler_closed = 1;
676  }
677 
678  if (!event_thread_end_hooked) {
679  event_thread_end_hooked = 1;
680  EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
681  }
682 
683  if (state == TAG_NONE) {
684  // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
685  th->value = result;
686  } else {
687  errinfo = th->ec->errinfo;
688 
689  VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
690  if (!NIL_P(exc)) errinfo = exc;
691 
692  if (state == TAG_FATAL) {
693  if (th->invoke_type == thread_invoke_type_ractor_proc) {
694  rb_ractor_atexit(th->ec, Qnil);
695  }
696  /* fatal error within this thread, need to stop whole script */
697  }
698  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
699  /* exit on main_thread. */
700  }
701  else {
702  if (th->report_on_exception) {
703  VALUE mesg = rb_thread_to_s(th->self);
704  rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
705  rb_write_error_str(mesg);
706  rb_ec_error_print(th->ec, errinfo);
707  }
708 
709  if (th->invoke_type == thread_invoke_type_ractor_proc) {
710  rb_ractor_atexit_exception(th->ec);
711  }
712 
713  if (th->vm->thread_abort_on_exception ||
714  th->abort_on_exception || RTEST(ruby_debug)) {
715  /* exit on main_thread */
716  }
717  else {
718  errinfo = Qnil;
719  }
720  }
721  th->value = Qnil;
722  }
723 
724  // The thread is effectively finished and can be joined.
725  VM_ASSERT(!UNDEF_P(th->value));
726 
727  rb_threadptr_join_list_wakeup(th);
728  rb_threadptr_unlock_all_locking_mutexes(th);
729 
730  if (th->invoke_type == thread_invoke_type_ractor_proc) {
731  rb_thread_terminate_all(th);
732  rb_ractor_teardown(th->ec);
733  }
734 
735  th->status = THREAD_KILLED;
736  RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
737 
738  if (th->vm->ractor.main_thread == th) {
739  ruby_stop(0);
740  }
741 
742  if (RB_TYPE_P(errinfo, T_OBJECT)) {
743  /* treat with normal error object */
744  rb_threadptr_raise(ractor_main_th, 1, &errinfo);
745  }
746 
747  EC_POP_TAG();
748 
749  rb_ec_clear_current_thread_trace_func(th->ec);
750 
751  /* locking_mutex must be Qfalse */
752  if (th->locking_mutex != Qfalse) {
753  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
754  (void *)th, th->locking_mutex);
755  }
756 
757  if (ractor_main_th->status == THREAD_KILLED &&
758  th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
759  /* I'm last thread. wake up main thread from rb_thread_terminate_all */
760  rb_threadptr_interrupt(ractor_main_th);
761  }
762 
763  rb_check_deadlock(th->ractor);
764 
765  rb_fiber_close(th->ec->fiber_ptr);
766 
767  thread_cleanup_func(th, FALSE);
768  VM_ASSERT(th->ec->vm_stack == NULL);
769 
770  if (th->invoke_type == thread_invoke_type_ractor_proc) {
771  // after rb_ractor_living_threads_remove()
772  // GC will happen anytime and this ractor can be collected (and destroy GVL).
773  // So gvl_release() should be before it.
774  thread_sched_to_dead(TH_SCHED(th), th);
775  rb_ractor_living_threads_remove(th->ractor, th);
776  }
777  else {
778  rb_ractor_living_threads_remove(th->ractor, th);
779  thread_sched_to_dead(TH_SCHED(th), th);
780  }
781 
782  return 0;
783 }
784 
785 struct thread_create_params {
786  enum thread_invoke_type type;
787 
788  // for normal proc thread
789  VALUE args;
790  VALUE proc;
791 
792  // for ractor
793  rb_ractor_t *g;
794 
795  // for func
796  VALUE (*fn)(void *);
797 };
798 
799 static void thread_specific_storage_alloc(rb_thread_t *th);
800 
801 static VALUE
802 thread_create_core(VALUE thval, struct thread_create_params *params)
803 {
804  rb_execution_context_t *ec = GET_EC();
805  rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
806  int err;
807 
808  thread_specific_storage_alloc(th);
809 
810  if (OBJ_FROZEN(current_th->thgroup)) {
812  "can't start a new thread (frozen ThreadGroup)");
813  }
814 
815  rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
816 
817  switch (params->type) {
818  case thread_invoke_type_proc:
819  th->invoke_type = thread_invoke_type_proc;
820  th->invoke_arg.proc.args = params->args;
821  th->invoke_arg.proc.proc = params->proc;
822  th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
823  break;
824 
825  case thread_invoke_type_ractor_proc:
826 #if RACTOR_CHECK_MODE > 0
827  rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
828 #endif
829  th->invoke_type = thread_invoke_type_ractor_proc;
830  th->ractor = params->g;
831  th->ractor->threads.main = th;
832  th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
833  th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
834  th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
835  rb_ractor_send_parameters(ec, params->g, params->args);
836  break;
837 
838  case thread_invoke_type_func:
839  th->invoke_type = thread_invoke_type_func;
840  th->invoke_arg.func.func = params->fn;
841  th->invoke_arg.func.arg = (void *)params->args;
842  break;
843 
844  default:
845  rb_bug("unreachable");
846  }
847 
848  th->priority = current_th->priority;
849  th->thgroup = current_th->thgroup;
850 
851  th->pending_interrupt_queue = rb_ary_hidden_new(0);
852  th->pending_interrupt_queue_checked = 0;
853  th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
854  RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
855 
856  rb_native_mutex_initialize(&th->interrupt_lock);
857 
858  RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
859 
860  rb_ractor_living_threads_insert(th->ractor, th);
861 
862  /* kick thread */
863  err = native_thread_create(th);
864  if (err) {
865  th->status = THREAD_KILLED;
866  rb_ractor_living_threads_remove(th->ractor, th);
867  rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
868  }
869  return thval;
870 }
871 
872 #define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
873 
874 /*
875  * call-seq:
876  * Thread.new { ... } -> thread
877  * Thread.new(*args, &proc) -> thread
878  * Thread.new(*args) { |args| ... } -> thread
879  *
880  * Creates a new thread executing the given block.
881  *
882  * Any +args+ given to ::new will be passed to the block:
883  *
884  * arr = []
885  * a, b, c = 1, 2, 3
886  * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
887  * arr #=> [1, 2, 3]
888  *
889  * A ThreadError exception is raised if ::new is called without a block.
890  *
891  * If you're going to subclass Thread, be sure to call super in your
892  * +initialize+ method, otherwise a ThreadError will be raised.
893  */
894 static VALUE
895 thread_s_new(int argc, VALUE *argv, VALUE klass)
896 {
897  rb_thread_t *th;
898  VALUE thread = rb_thread_alloc(klass);
899 
900  if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
901  rb_raise(rb_eThreadError, "can't alloc thread");
902  }
903 
904  rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
905  th = rb_thread_ptr(thread);
906  if (!threadptr_initialized(th)) {
907  rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
908  klass);
909  }
910  return thread;
911 }
912 
913 /*
914  * call-seq:
915  * Thread.start([args]*) {|args| block } -> thread
916  * Thread.fork([args]*) {|args| block } -> thread
917  *
918  * Basically the same as ::new. However, if class Thread is subclassed, then
919  * calling +start+ in that subclass will not invoke the subclass's
920  * +initialize+ method.
921  */
922 
923 static VALUE
924 thread_start(VALUE klass, VALUE args)
925 {
926  struct thread_create_params params = {
927  .type = thread_invoke_type_proc,
928  .args = args,
929  .proc = rb_block_proc(),
930  };
931  return thread_create_core(rb_thread_alloc(klass), &params);
932 }
933 
934 static VALUE
935 threadptr_invoke_proc_location(rb_thread_t *th)
936 {
937  if (th->invoke_type == thread_invoke_type_proc) {
938  return rb_proc_location(th->invoke_arg.proc.proc);
939  }
940  else {
941  return Qnil;
942  }
943 }
944 
945 /* :nodoc: */
946 static VALUE
947 thread_initialize(VALUE thread, VALUE args)
948 {
949  rb_thread_t *th = rb_thread_ptr(thread);
950 
951  if (!rb_block_given_p()) {
952  rb_raise(rb_eThreadError, "must be called with a block");
953  }
954  else if (th->invoke_type != thread_invoke_type_none) {
955  VALUE loc = threadptr_invoke_proc_location(th);
956  if (!NIL_P(loc)) {
958  "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
959  RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
960  }
961  else {
962  rb_raise(rb_eThreadError, "already initialized thread");
963  }
964  }
965  else {
966  struct thread_create_params params = {
967  .type = thread_invoke_type_proc,
968  .args = args,
969  .proc = rb_block_proc(),
970  };
971  return thread_create_core(thread, &params);
972  }
973 }
974 
975 VALUE
976 rb_thread_create(VALUE (*fn)(void *), void *arg)
977 {
978  struct thread_create_params params = {
979  .type = thread_invoke_type_func,
980  .fn = fn,
981  .args = (VALUE)arg,
982  };
983  return thread_create_core(rb_thread_alloc(rb_cThread), &params);
984 }
985 
986 VALUE
987 rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
988 {
989  struct thread_create_params params = {
990  .type = thread_invoke_type_ractor_proc,
991  .g = r,
992  .args = args,
993  .proc = proc,
994  };
995  return thread_create_core(rb_thread_alloc(rb_cThread), &params);
996 }
997 
998 
999 struct join_arg {
1000  struct rb_waiting_list *waiter;
1001  rb_thread_t *target;
1002  VALUE timeout;
1003  rb_hrtime_t *limit;
1004 };
1005 
1006 static VALUE
1007 remove_from_join_list(VALUE arg)
1008 {
1009  struct join_arg *p = (struct join_arg *)arg;
1010  rb_thread_t *target_thread = p->target;
1011 
1012  if (target_thread->status != THREAD_KILLED) {
1013  struct rb_waiting_list **join_list = &target_thread->join_list;
1014 
1015  while (*join_list) {
1016  if (*join_list == p->waiter) {
1017  *join_list = (*join_list)->next;
1018  break;
1019  }
1020 
1021  join_list = &(*join_list)->next;
1022  }
1023  }
1024 
1025  return Qnil;
1026 }
1027 
1028 static int
1029 thread_finished(rb_thread_t *th)
1030 {
1031  return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1032 }
1033 
1034 static VALUE
1035 thread_join_sleep(VALUE arg)
1036 {
1037  struct join_arg *p = (struct join_arg *)arg;
1038  rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1039  rb_hrtime_t end = 0, *limit = p->limit;
1040 
1041  if (limit) {
1042  end = rb_hrtime_add(*limit, rb_hrtime_now());
1043  }
1044 
1045  while (!thread_finished(target_th)) {
1046  VALUE scheduler = rb_fiber_scheduler_current();
1047 
1048  if (scheduler != Qnil) {
1049  rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1050  // Check if the target thread is finished after blocking:
1051  if (thread_finished(target_th)) break;
1052  // Otherwise, a timeout occurred:
1053  else return Qfalse;
1054  }
1055  else if (!limit) {
1056  sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1057  }
1058  else {
1059  if (hrtime_update_expire(limit, end)) {
1060  RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1061  return Qfalse;
1062  }
1063  th->status = THREAD_STOPPED;
1064  native_sleep(th, limit);
1065  }
1066  RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1067  th->status = THREAD_RUNNABLE;
1068 
1069  RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1070  }
1071 
1072  return Qtrue;
1073 }
1074 
1075 static VALUE
1076 thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1077 {
1078  rb_execution_context_t *ec = GET_EC();
1079  rb_thread_t *th = ec->thread_ptr;
1080  rb_fiber_t *fiber = ec->fiber_ptr;
1081 
1082  if (th == target_th) {
1083  rb_raise(rb_eThreadError, "Target thread must not be current thread");
1084  }
1085 
1086  if (th->ractor->threads.main == target_th) {
1087  rb_raise(rb_eThreadError, "Target thread must not be main thread");
1088  }
1089 
1090  RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1091 
1092  if (target_th->status != THREAD_KILLED) {
1093  struct rb_waiting_list waiter;
1094  waiter.next = target_th->join_list;
1095  waiter.thread = th;
1096  waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1097  target_th->join_list = &waiter;
1098 
1099  struct join_arg arg;
1100  arg.waiter = &waiter;
1101  arg.target = target_th;
1102  arg.timeout = timeout;
1103  arg.limit = limit;
1104 
1105  if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1106  return Qnil;
1107  }
1108  }
1109 
1110  RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1111 
1112  if (target_th->ec->errinfo != Qnil) {
1113  VALUE err = target_th->ec->errinfo;
1114 
1115  if (FIXNUM_P(err)) {
1116  switch (err) {
1117  case INT2FIX(TAG_FATAL):
1118  RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1119 
1120  /* OK. killed. */
1121  break;
1122  default:
1123  rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1124  }
1125  }
1126  else if (THROW_DATA_P(target_th->ec->errinfo)) {
1127  rb_bug("thread_join: THROW_DATA should not reach here.");
1128  }
1129  else {
1130  /* normal exception */
1131  rb_exc_raise(err);
1132  }
1133  }
1134  return target_th->self;
1135 }
1136 
1137 /*
1138  * call-seq:
1139  * thr.join -> thr
1140  * thr.join(limit) -> thr
1141  *
1142  * The calling thread will suspend execution and run this +thr+.
1143  *
1144  * Does not return until +thr+ exits or until the given +limit+ seconds have
1145  * passed.
1146  *
1147  * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1148  * returned.
1149  *
1150  * Any threads not joined will be killed when the main program exits.
1151  *
1152  * If +thr+ had previously raised an exception and the ::abort_on_exception or
1153  * $DEBUG flags are not set, (so the exception has not yet been processed), it
1154  * will be processed at this time.
1155  *
1156  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1157  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1158  * x.join # Let thread x finish, thread a will be killed on exit.
1159  * #=> "axyz"
1160  *
1161  * The following example illustrates the +limit+ parameter.
1162  *
1163  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1164  * puts "Waiting" until y.join(0.15)
1165  *
1166  * This will produce:
1167  *
1168  * tick...
1169  * Waiting
1170  * tick...
1171  * Waiting
1172  * tick...
1173  * tick...
1174  */
1175 
1176 static VALUE
1177 thread_join_m(int argc, VALUE *argv, VALUE self)
1178 {
1179  VALUE timeout = Qnil;
1180  rb_hrtime_t rel = 0, *limit = 0;
1181 
1182  if (rb_check_arity(argc, 0, 1)) {
1183  timeout = argv[0];
1184  }
1185 
1186  // Convert the timeout eagerly, so it's always converted and deterministic
1187  /*
1188  * This supports INFINITY and negative values, so we can't use
1189  * rb_time_interval right now...
1190  */
1191  if (NIL_P(timeout)) {
1192  /* unlimited */
1193  }
1194  else if (FIXNUM_P(timeout)) {
1195  rel = rb_sec2hrtime(NUM2TIMET(timeout));
1196  limit = &rel;
1197  }
1198  else {
1199  limit = double2hrtime(&rel, rb_num2dbl(timeout));
1200  }
1201 
1202  return thread_join(rb_thread_ptr(self), timeout, limit);
1203 }
1204 
1205 /*
1206  * call-seq:
1207  * thr.value -> obj
1208  *
1209  * Waits for +thr+ to complete, using #join, and returns its value or raises
1210  * the exception which terminated the thread.
1211  *
1212  * a = Thread.new { 2 + 2 }
1213  * a.value #=> 4
1214  *
1215  * b = Thread.new { raise 'something went wrong' }
1216  * b.value #=> RuntimeError: something went wrong
1217  */
1218 
1219 static VALUE
1220 thread_value(VALUE self)
1221 {
1222  rb_thread_t *th = rb_thread_ptr(self);
1223  thread_join(th, Qnil, 0);
1224  if (UNDEF_P(th->value)) {
1225  // If the thread is dead because we forked th->value is still Qundef.
1226  return Qnil;
1227  }
1228  return th->value;
1229 }
1230 
1231 /*
1232  * Thread Scheduling
1233  */
1234 
1235 static void
1236 getclockofday(struct timespec *ts)
1237 {
1238 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1239  if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1240  return;
1241 #endif
1242  rb_timespec_now(ts);
1243 }
1244 
1245 /*
1246  * Don't inline this, since library call is already time consuming
1247  * and we don't want "struct timespec" on stack too long for GC
1248  */
1249 NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1250 rb_hrtime_t
1251 rb_hrtime_now(void)
1252 {
1253  struct timespec ts;
1254 
1255  getclockofday(&ts);
1256  return rb_timespec2hrtime(&ts);
1257 }
1258 
1259 /*
1260  * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1261  * being uninitialized, maybe other versions, too.
1262  */
1263 COMPILER_WARNING_PUSH
1264 #if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1265 COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1266 #endif
1267 #ifndef PRIu64
1268 #define PRIu64 PRI_64_PREFIX "u"
1269 #endif
1270 /*
1271  * @end is the absolute time when @ts is set to expire
1272  * Returns true if @end has past
1273  * Updates @ts and returns false otherwise
1274  */
1275 static int
1276 hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1277 {
1278  rb_hrtime_t now = rb_hrtime_now();
1279 
1280  if (now > end) return 1;
1281 
1282  RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1283 
1284  *timeout = end - now;
1285  return 0;
1286 }
1287 COMPILER_WARNING_POP
1288 
1289 static int
1290 sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1291 {
1292  enum rb_thread_status prev_status = th->status;
1293  int woke;
1294  rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1295 
1296  th->status = THREAD_STOPPED;
1297  RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1298  while (th->status == THREAD_STOPPED) {
1299  native_sleep(th, &rel);
1300  woke = vm_check_ints_blocking(th->ec);
1301  if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1302  break;
1303  if (hrtime_update_expire(&rel, end))
1304  break;
1305  woke = 1;
1306  }
1307  th->status = prev_status;
1308  return woke;
1309 }
1310 
1311 static int
1312 sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1313 {
1314  enum rb_thread_status prev_status = th->status;
1315  int woke;
1316  rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1317 
1318  th->status = THREAD_STOPPED;
1319  RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1320  while (th->status == THREAD_STOPPED) {
1321  native_sleep(th, &rel);
1322  woke = vm_check_ints_blocking(th->ec);
1323  if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1324  break;
1325  if (hrtime_update_expire(&rel, end))
1326  break;
1327  woke = 1;
1328  }
1329  th->status = prev_status;
1330  return woke;
1331 }
1332 
1333 static void
1334 sleep_forever(rb_thread_t *th, unsigned int fl)
1335 {
1336  enum rb_thread_status prev_status = th->status;
1337  enum rb_thread_status status;
1338  int woke;
1339 
1340  status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1341  th->status = status;
1342 
1343  if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1344 
1345  while (th->status == status) {
1346  if (fl & SLEEP_DEADLOCKABLE) {
1347  rb_ractor_sleeper_threads_inc(th->ractor);
1348  rb_check_deadlock(th->ractor);
1349  }
1350  {
1351  native_sleep(th, 0);
1352  }
1353  if (fl & SLEEP_DEADLOCKABLE) {
1354  rb_ractor_sleeper_threads_dec(th->ractor);
1355  }
1356  if (fl & SLEEP_ALLOW_SPURIOUS) {
1357  break;
1358  }
1359 
1360  woke = vm_check_ints_blocking(th->ec);
1361 
1362  if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1363  break;
1364  }
1365  }
1366  th->status = prev_status;
1367 }
1368 
1369 void
1371 {
1372  RUBY_DEBUG_LOG("forever");
1373  sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1374 }
1375 
1376 void
1378 {
1379  RUBY_DEBUG_LOG("deadly");
1380  sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1381 }
1382 
1383 static void
1384 rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1385 {
1386  VALUE scheduler = rb_fiber_scheduler_current();
1387  if (scheduler != Qnil) {
1388  rb_fiber_scheduler_block(scheduler, blocker, timeout);
1389  }
1390  else {
1391  RUBY_DEBUG_LOG("...");
1392  if (end) {
1393  sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1394  }
1395  else {
1396  sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1397  }
1398  }
1399 }
1400 
1401 void
1402 rb_thread_wait_for(struct timeval time)
1403 {
1404  rb_thread_t *th = GET_THREAD();
1405 
1406  sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1407 }
1408 
1409 /*
1410  * CAUTION: This function causes thread switching.
1411  * rb_thread_check_ints() check ruby's interrupts.
1412  * some interrupt needs thread switching/invoke handlers,
1413  * and so on.
1414  */
1415 
1416 void
1418 {
1419  RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1420 }
1421 
1422 /*
1423  * Hidden API for tcl/tk wrapper.
1424  * There is no guarantee to perpetuate it.
1425  */
1426 int
1427 rb_thread_check_trap_pending(void)
1428 {
1429  return rb_signal_buff_size() != 0;
1430 }
1431 
1432 /* This function can be called in blocking region. */
1433 int
1435 {
1436  return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1437 }
1438 
1439 void
1440 rb_thread_sleep(int sec)
1441 {
1443 }
1444 
1445 static void
1446 rb_thread_schedule_limits(uint32_t limits_us)
1447 {
1448  if (!rb_thread_alone()) {
1449  rb_thread_t *th = GET_THREAD();
1450  RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1451 
1452  if (th->running_time_us >= limits_us) {
1453  RUBY_DEBUG_LOG("switch %s", "start");
1454 
1455  RB_VM_SAVE_MACHINE_CONTEXT(th);
1456  thread_sched_yield(TH_SCHED(th), th);
1457  rb_ractor_thread_switch(th->ractor, th);
1458 
1459  RUBY_DEBUG_LOG("switch %s", "done");
1460  }
1461  }
1462 }
1463 
1464 void
1465 rb_thread_schedule(void)
1466 {
1467  rb_thread_schedule_limits(0);
1468  RUBY_VM_CHECK_INTS(GET_EC());
1469 }
1470 
1471 /* blocking region */
1472 
1473 static inline int
1474 blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1475  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1476 {
1477 #ifdef RUBY_ASSERT_CRITICAL_SECTION
1478  VM_ASSERT(ruby_assert_critical_section_entered == 0);
1479 #endif
1480  VM_ASSERT(th == GET_THREAD());
1481 
1482  region->prev_status = th->status;
1483  if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1484  th->blocking_region_buffer = region;
1485  th->status = THREAD_STOPPED;
1486  rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1487 
1488  RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1489  return TRUE;
1490  }
1491  else {
1492  return FALSE;
1493  }
1494 }
1495 
1496 static inline void
1497 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1498 {
1499  /* entry to ubf_list still permitted at this point, make it impossible: */
1500  unblock_function_clear(th);
1501  /* entry to ubf_list impossible at this point, so unregister is safe: */
1502  unregister_ubf_list(th);
1503 
1504  thread_sched_to_running(TH_SCHED(th), th);
1505  rb_ractor_thread_switch(th->ractor, th);
1506 
1507  th->blocking_region_buffer = 0;
1508  rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1509  if (th->status == THREAD_STOPPED) {
1510  th->status = region->prev_status;
1511  }
1512 
1513  RUBY_DEBUG_LOG("end");
1514 
1515 #ifndef _WIN32
1516  // GET_THREAD() clears WSAGetLastError()
1517  VM_ASSERT(th == GET_THREAD());
1518 #endif
1519 }
1520 
1521 void *
1522 rb_nogvl(void *(*func)(void *), void *data1,
1523  rb_unblock_function_t *ubf, void *data2,
1524  int flags)
1525 {
1526  VALUE scheduler = rb_fiber_scheduler_current();
1527  if (scheduler != Qnil) {
1529 
1530  VALUE result = rb_fiber_scheduler_blocking_region(scheduler, func, data1, ubf, data2, flags, &state);
1531 
1532  if (!UNDEF_P(result)) {
1533  rb_errno_set(state.saved_errno);
1534  return state.result;
1535  }
1536  }
1537 
1538  void *val = 0;
1539  rb_execution_context_t *ec = GET_EC();
1540  rb_thread_t *th = rb_ec_thread_ptr(ec);
1541  rb_vm_t *vm = rb_ec_vm_ptr(ec);
1542  bool is_main_thread = vm->ractor.main_thread == th;
1543  int saved_errno = 0;
1544  VALUE ubf_th = Qfalse;
1545 
1546  if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1547  ubf = ubf_select;
1548  data2 = th;
1549  }
1550  else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1551  if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1552  vm->ubf_async_safe = 1;
1553  }
1554  }
1555 
1556  rb_vm_t *volatile saved_vm = vm;
1557  BLOCKING_REGION(th, {
1558  val = func(data1);
1559  saved_errno = rb_errno();
1560  }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1561  vm = saved_vm;
1562 
1563  if (is_main_thread) vm->ubf_async_safe = 0;
1564 
1565  if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1566  RUBY_VM_CHECK_INTS_BLOCKING(ec);
1567  }
1568 
1569  if (ubf_th != Qfalse) {
1570  thread_value(rb_thread_kill(ubf_th));
1571  }
1572 
1573  rb_errno_set(saved_errno);
1574 
1575  return val;
1576 }
1577 
1578 /*
1579  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1580  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1581  * without interrupt process.
1582  *
1583  * rb_thread_call_without_gvl() does:
1584  * (1) Check interrupts.
1585  * (2) release GVL.
1586  * Other Ruby threads may run in parallel.
1587  * (3) call func with data1
1588  * (4) acquire GVL.
1589  * Other Ruby threads can not run in parallel any more.
1590  * (5) Check interrupts.
1591  *
1592  * rb_thread_call_without_gvl2() does:
1593  * (1) Check interrupt and return if interrupted.
1594  * (2) release GVL.
1595  * (3) call func with data1 and a pointer to the flags.
1596  * (4) acquire GVL.
1597  *
1598  * If another thread interrupts this thread (Thread#kill, signal delivery,
1599  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1600  * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1601  * toggling a cancellation flag, canceling the invocation of a call inside
1602  * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1603  *
1604  * There are built-in ubfs and you can specify these ubfs:
1605  *
1606  * * RUBY_UBF_IO: ubf for IO operation
1607  * * RUBY_UBF_PROCESS: ubf for process operation
1608  *
1609  * However, we can not guarantee our built-in ubfs interrupt your `func()'
1610  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1611  * provide proper ubf(), your program will not stop for Control+C or other
1612  * shutdown events.
1613  *
1614  * "Check interrupts" on above list means checking asynchronous
1615  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1616  * request, and so on) and calling corresponding procedures
1617  * (such as `trap' for signals, raise an exception for Thread#raise).
1618  * If `func()' finished and received interrupts, you may skip interrupt
1619  * checking. For example, assume the following func() it reads data from file.
1620  *
1621  * read_func(...) {
1622  * // (a) before read
1623  * read(buffer); // (b) reading
1624  * // (c) after read
1625  * }
1626  *
1627  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1628  * `read_func()' and interrupts are checked. However, if an interrupt occurs
1629  * at (c), after *read* operation is completed, checking interrupts is harmful
1630  * because it causes irrevocable side-effect, the read data will vanish. To
1631  * avoid such problem, the `read_func()' should be used with
1632  * `rb_thread_call_without_gvl2()'.
1633  *
1634  * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1635  * immediately. This function does not show when the execution was interrupted.
1636  * For example, there are 4 possible timing (a), (b), (c) and before calling
1637  * read_func(). You need to record progress of a read_func() and check
1638  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1639  * `rb_thread_check_ints()' correctly or your program can not process proper
1640  * process such as `trap' and so on.
1641  *
1642  * NOTE: You can not execute most of Ruby C API and touch Ruby
1643  * objects in `func()' and `ubf()', including raising an
1644  * exception, because current thread doesn't acquire GVL
1645  * (it causes synchronization problems). If you need to
1646  * call ruby functions either use rb_thread_call_with_gvl()
1647  * or read source code of C APIs and confirm safety by
1648  * yourself.
1649  *
1650  * NOTE: In short, this API is difficult to use safely. I recommend you
1651  * use other ways if you have. We lack experiences to use this API.
1652  * Please report your problem related on it.
1653  *
1654  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1655  * for a short running `func()'. Be sure to benchmark and use this
1656  * mechanism when `func()' consumes enough time.
1657  *
1658  * Safe C API:
1659  * * rb_thread_interrupted() - check interrupt flag
1660  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1661  * they will work without GVL, and may acquire GVL when GC is needed.
1662  */
1663 void *
1664 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1665  rb_unblock_function_t *ubf, void *data2)
1666 {
1667  return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1668 }
1669 
1670 void *
1671 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1672  rb_unblock_function_t *ubf, void *data2)
1673 {
1674  return rb_nogvl(func, data1, ubf, data2, 0);
1675 }
1676 
1677 static int
1678 waitfd_to_waiting_flag(int wfd_event)
1679 {
1680  return wfd_event << 1;
1681 }
1682 
1683 static void
1684 thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
1685 {
1686  wfd->fd = fd;
1687  wfd->th = th;
1688  wfd->busy = NULL;
1689 
1690  RB_VM_LOCK_ENTER();
1691  {
1692  ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
1693  }
1694  RB_VM_LOCK_LEAVE();
1695 }
1696 
1697 static void
1698 thread_io_wake_pending_closer(struct waiting_fd *wfd)
1699 {
1700  bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
1701  if (has_waiter) {
1702  rb_mutex_lock(wfd->busy->wakeup_mutex);
1703  }
1704 
1705  /* Needs to be protected with RB_VM_LOCK because we don't know if
1706  wfd is on the global list of pending FD ops or if it's on a
1707  struct rb_io_close_wait_list close-waiter. */
1708  RB_VM_LOCK_ENTER();
1709  ccan_list_del(&wfd->wfd_node);
1710  RB_VM_LOCK_LEAVE();
1711 
1712  if (has_waiter) {
1713  rb_thread_t *th = rb_thread_ptr(wfd->busy->closing_thread);
1714  if (th->scheduler != Qnil) {
1715  rb_fiber_scheduler_unblock(th->scheduler, wfd->busy->closing_thread, wfd->busy->closing_fiber);
1716  } else {
1717  rb_thread_wakeup(wfd->busy->closing_thread);
1718  }
1719  rb_mutex_unlock(wfd->busy->wakeup_mutex);
1720  }
1721 }
1722 
1723 static bool
1724 thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1725 {
1726 #if defined(USE_MN_THREADS) && USE_MN_THREADS
1727  return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1728 #else
1729  return false;
1730 #endif
1731 }
1732 
1733 // true if need retry
1734 static bool
1735 thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1736 {
1737 #if defined(USE_MN_THREADS) && USE_MN_THREADS
1738  if (thread_io_mn_schedulable(th, events, timeout)) {
1739  rb_hrtime_t rel, *prel;
1740 
1741  if (timeout) {
1742  rel = rb_timeval2hrtime(timeout);
1743  prel = &rel;
1744  }
1745  else {
1746  prel = NULL;
1747  }
1748 
1749  VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1750 
1751  if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1752  // timeout
1753  return false;
1754  }
1755  else {
1756  return true;
1757  }
1758  }
1759 #endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1760  return false;
1761 }
1762 
1763 // assume read/write
1764 static bool
1765 blocking_call_retryable_p(int r, int eno)
1766 {
1767  if (r != -1) return false;
1768 
1769  switch (eno) {
1770  case EAGAIN:
1771 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1772  case EWOULDBLOCK:
1773 #endif
1774  return true;
1775  default:
1776  return false;
1777  }
1778 }
1779 
1780 bool
1781 rb_thread_mn_schedulable(VALUE thval)
1782 {
1783  rb_thread_t *th = rb_thread_ptr(thval);
1784  return th->mn_schedulable;
1785 }
1786 
1787 VALUE
1788 rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events)
1789 {
1790  rb_execution_context_t *volatile ec = GET_EC();
1791  rb_thread_t *volatile th = rb_ec_thread_ptr(ec);
1792 
1793  RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), fd, events);
1794 
1795  struct waiting_fd waiting_fd;
1796  volatile VALUE val = Qundef; /* shouldn't be used */
1797  volatile int saved_errno = 0;
1798  enum ruby_tag_type state;
1799  bool prev_mn_schedulable = th->mn_schedulable;
1800  th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1801 
1802  // `errno` is only valid when there is an actual error - but we can't
1803  // extract that from the return value of `func` alone, so we clear any
1804  // prior `errno` value here so that we can later check if it was set by
1805  // `func` or not (as opposed to some previously set value).
1806  errno = 0;
1807 
1808  thread_io_setup_wfd(th, fd, &waiting_fd);
1809  {
1810  EC_PUSH_TAG(ec);
1811  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1812  volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1813  retry:
1814  BLOCKING_REGION(waiting_fd.th, {
1815  val = func(data1);
1816  saved_errno = errno;
1817  }, ubf_select, waiting_fd.th, FALSE);
1818 
1819  th = rb_ec_thread_ptr(ec);
1820  if (events &&
1821  blocking_call_retryable_p((int)val, saved_errno) &&
1822  thread_io_wait_events(th, fd, events, NULL)) {
1823  RUBY_VM_CHECK_INTS_BLOCKING(ec);
1824  goto retry;
1825  }
1826  state = saved_state;
1827  }
1828  EC_POP_TAG();
1829 
1830  th = rb_ec_thread_ptr(ec);
1831  th->mn_schedulable = prev_mn_schedulable;
1832  }
1833  /*
1834  * must be deleted before jump
1835  * this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
1836  */
1837  thread_io_wake_pending_closer(&waiting_fd);
1838 
1839  if (state) {
1840  EC_JUMP_TAG(ec, state);
1841  }
1842  /* TODO: check func() */
1843  RUBY_VM_CHECK_INTS_BLOCKING(ec);
1844 
1845  // If the error was a timeout, we raise a specific exception for that:
1846  if (saved_errno == ETIMEDOUT) {
1847  rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1848  }
1849 
1850  errno = saved_errno;
1851 
1852  return val;
1853 }
1854 
1855 VALUE
1856 rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1857 {
1858  return rb_thread_io_blocking_call(func, data1, fd, 0);
1859 }
1860 
1861 /*
1862  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1863  *
1864  * After releasing GVL using
1865  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1866  * methods. If you need to access Ruby you must use this function
1867  * rb_thread_call_with_gvl().
1868  *
1869  * This function rb_thread_call_with_gvl() does:
1870  * (1) acquire GVL.
1871  * (2) call passed function `func'.
1872  * (3) release GVL.
1873  * (4) return a value which is returned at (2).
1874  *
1875  * NOTE: You should not return Ruby object at (2) because such Object
1876  * will not be marked.
1877  *
1878  * NOTE: If an exception is raised in `func', this function DOES NOT
1879  * protect (catch) the exception. If you have any resources
1880  * which should free before throwing exception, you need use
1881  * rb_protect() in `func' and return a value which represents
1882  * exception was raised.
1883  *
1884  * NOTE: This function should not be called by a thread which was not
1885  * created as Ruby thread (created by Thread.new or so). In other
1886  * words, this function *DOES NOT* associate or convert a NON-Ruby
1887  * thread to a Ruby thread.
1888  */
1889 void *
1890 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1891 {
1892  rb_thread_t *th = ruby_thread_from_native();
1893  struct rb_blocking_region_buffer *brb;
1894  struct rb_unblock_callback prev_unblock;
1895  void *r;
1896 
1897  if (th == 0) {
1898  /* Error has occurred, but we can't use rb_bug()
1899  * because this thread is not Ruby's thread.
1900  * What should we do?
1901  */
1902  bp();
1903  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1904  exit(EXIT_FAILURE);
1905  }
1906 
1907  brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1908  prev_unblock = th->unblock;
1909 
1910  if (brb == 0) {
1911  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1912  }
1913 
1914  blocking_region_end(th, brb);
1915  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1916  r = (*func)(data1);
1917  /* leave from Ruby world: You can not access Ruby values, etc. */
1918  int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1919  RUBY_ASSERT_ALWAYS(released);
1920  RB_VM_SAVE_MACHINE_CONTEXT(th);
1921  thread_sched_to_waiting(TH_SCHED(th), th);
1922  return r;
1923 }
1924 
1925 /*
1926  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1927  *
1928  ***
1929  *** This API is EXPERIMENTAL!
1930  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1931  ***
1932  */
1933 
1934 int
1935 ruby_thread_has_gvl_p(void)
1936 {
1937  rb_thread_t *th = ruby_thread_from_native();
1938 
1939  if (th && th->blocking_region_buffer == 0) {
1940  return 1;
1941  }
1942  else {
1943  return 0;
1944  }
1945 }
1946 
1947 /*
1948  * call-seq:
1949  * Thread.pass -> nil
1950  *
1951  * Give the thread scheduler a hint to pass execution to another thread.
1952  * A running thread may or may not switch, it depends on OS and processor.
1953  */
1954 
1955 static VALUE
1956 thread_s_pass(VALUE klass)
1957 {
1959  return Qnil;
1960 }
1961 
1962 /*****************************************************/
1963 
1964 /*
1965  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1966  *
1967  * Async events such as an exception thrown by Thread#raise,
1968  * Thread#kill and thread termination (after main thread termination)
1969  * will be queued to th->pending_interrupt_queue.
1970  * - clear: clear the queue.
1971  * - enque: enqueue err object into queue.
1972  * - deque: dequeue err object from queue.
1973  * - active_p: return 1 if the queue should be checked.
1974  *
1975  * All rb_threadptr_pending_interrupt_* functions are called by
1976  * a GVL acquired thread, of course.
1977  * Note that all "rb_" prefix APIs need GVL to call.
1978  */
1979 
1980 void
1981 rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1982 {
1983  rb_ary_clear(th->pending_interrupt_queue);
1984 }
1985 
1986 void
1987 rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1988 {
1989  rb_ary_push(th->pending_interrupt_queue, v);
1990  th->pending_interrupt_queue_checked = 0;
1991 }
1992 
1993 static void
1994 threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1995 {
1996  if (!th->pending_interrupt_queue) {
1997  rb_raise(rb_eThreadError, "uninitialized thread");
1998  }
1999 }
2000 
2001 enum handle_interrupt_timing {
2002  INTERRUPT_NONE,
2003  INTERRUPT_IMMEDIATE,
2004  INTERRUPT_ON_BLOCKING,
2005  INTERRUPT_NEVER
2006 };
2007 
2008 static enum handle_interrupt_timing
2009 rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2010 {
2011  if (sym == sym_immediate) {
2012  return INTERRUPT_IMMEDIATE;
2013  }
2014  else if (sym == sym_on_blocking) {
2015  return INTERRUPT_ON_BLOCKING;
2016  }
2017  else if (sym == sym_never) {
2018  return INTERRUPT_NEVER;
2019  }
2020  else {
2021  rb_raise(rb_eThreadError, "unknown mask signature");
2022  }
2023 }
2024 
2025 static enum handle_interrupt_timing
2026 rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2027 {
2028  VALUE mask;
2029  long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2030  const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2031  VALUE mod;
2032  long i;
2033 
2034  for (i=0; i<mask_stack_len; i++) {
2035  mask = mask_stack[mask_stack_len-(i+1)];
2036 
2037  if (SYMBOL_P(mask)) {
2038  /* do not match RUBY_FATAL_THREAD_KILLED etc */
2039  if (err != rb_cInteger) {
2040  return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2041  }
2042  else {
2043  continue;
2044  }
2045  }
2046 
2047  for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2048  VALUE klass = mod;
2049  VALUE sym;
2050 
2051  if (BUILTIN_TYPE(mod) == T_ICLASS) {
2052  klass = RBASIC(mod)->klass;
2053  }
2054  else if (mod != RCLASS_ORIGIN(mod)) {
2055  continue;
2056  }
2057 
2058  if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2059  return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2060  }
2061  }
2062  /* try to next mask */
2063  }
2064  return INTERRUPT_NONE;
2065 }
2066 
2067 static int
2068 rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2069 {
2070  return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2071 }
2072 
2073 static int
2074 rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2075 {
2076  int i;
2077  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2078  VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2079  if (rb_obj_is_kind_of(e, err)) {
2080  return TRUE;
2081  }
2082  }
2083  return FALSE;
2084 }
2085 
2086 static VALUE
2087 rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2088 {
2089 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2090  int i;
2091 
2092  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2093  VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2094 
2095  enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2096 
2097  switch (mask_timing) {
2098  case INTERRUPT_ON_BLOCKING:
2099  if (timing != INTERRUPT_ON_BLOCKING) {
2100  break;
2101  }
2102  /* fall through */
2103  case INTERRUPT_NONE: /* default: IMMEDIATE */
2104  case INTERRUPT_IMMEDIATE:
2105  rb_ary_delete_at(th->pending_interrupt_queue, i);
2106  return err;
2107  case INTERRUPT_NEVER:
2108  break;
2109  }
2110  }
2111 
2112  th->pending_interrupt_queue_checked = 1;
2113  return Qundef;
2114 #else
2115  VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2116  if (rb_threadptr_pending_interrupt_empty_p(th)) {
2117  th->pending_interrupt_queue_checked = 1;
2118  }
2119  return err;
2120 #endif
2121 }
2122 
2123 static int
2124 threadptr_pending_interrupt_active_p(rb_thread_t *th)
2125 {
2126  /*
2127  * For optimization, we don't check async errinfo queue
2128  * if the queue and the thread interrupt mask were not changed
2129  * since last check.
2130  */
2131  if (th->pending_interrupt_queue_checked) {
2132  return 0;
2133  }
2134 
2135  if (rb_threadptr_pending_interrupt_empty_p(th)) {
2136  return 0;
2137  }
2138 
2139  return 1;
2140 }
2141 
2142 static int
2143 handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2144 {
2145  VALUE *maskp = (VALUE *)args;
2146 
2147  if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2148  rb_raise(rb_eArgError, "unknown mask signature");
2149  }
2150 
2151  if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2152  *maskp = val;
2153  return ST_CONTINUE;
2154  }
2155 
2156  if (RTEST(*maskp)) {
2157  if (!RB_TYPE_P(*maskp, T_HASH)) {
2158  VALUE prev = *maskp;
2159  *maskp = rb_ident_hash_new();
2160  if (SYMBOL_P(prev)) {
2161  rb_hash_aset(*maskp, rb_eException, prev);
2162  }
2163  }
2164  rb_hash_aset(*maskp, key, val);
2165  }
2166  else {
2167  *maskp = Qfalse;
2168  }
2169 
2170  return ST_CONTINUE;
2171 }
2172 
2173 /*
2174  * call-seq:
2175  * Thread.handle_interrupt(hash) { ... } -> result of the block
2176  *
2177  * Changes asynchronous interrupt timing.
2178  *
2179  * _interrupt_ means asynchronous event and corresponding procedure
2180  * by Thread#raise, Thread#kill, signal trap (not supported yet)
2181  * and main thread termination (if main thread terminates, then all
2182  * other thread will be killed).
2183  *
2184  * The given +hash+ has pairs like <code>ExceptionClass =>
2185  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2186  * the given block. The TimingSymbol can be one of the following symbols:
2187  *
2188  * [+:immediate+] Invoke interrupts immediately.
2189  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2190  * [+:never+] Never invoke all interrupts.
2191  *
2192  * _BlockingOperation_ means that the operation will block the calling thread,
2193  * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2194  * operation executed without GVL.
2195  *
2196  * Masked asynchronous interrupts are delayed until they are enabled.
2197  * This method is similar to sigprocmask(3).
2198  *
2199  * === NOTE
2200  *
2201  * Asynchronous interrupts are difficult to use.
2202  *
2203  * If you need to communicate between threads, please consider to use another way such as Queue.
2204  *
2205  * Or use them with deep understanding about this method.
2206  *
2207  * === Usage
2208  *
2209  * In this example, we can guard from Thread#raise exceptions.
2210  *
2211  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2212  * ignored in the first block of the main thread. In the second
2213  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2214  *
2215  * th = Thread.new do
2216  * Thread.handle_interrupt(RuntimeError => :never) {
2217  * begin
2218  * # You can write resource allocation code safely.
2219  * Thread.handle_interrupt(RuntimeError => :immediate) {
2220  * # ...
2221  * }
2222  * ensure
2223  * # You can write resource deallocation code safely.
2224  * end
2225  * }
2226  * end
2227  * Thread.pass
2228  * # ...
2229  * th.raise "stop"
2230  *
2231  * While we are ignoring the RuntimeError exception, it's safe to write our
2232  * resource allocation code. Then, the ensure block is where we can safely
2233  * deallocate your resources.
2234  *
2235  * ==== Stack control settings
2236  *
2237  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2238  * to control more than one ExceptionClass and TimingSymbol at a time.
2239  *
2240  * Thread.handle_interrupt(FooError => :never) {
2241  * Thread.handle_interrupt(BarError => :never) {
2242  * # FooError and BarError are prohibited.
2243  * }
2244  * }
2245  *
2246  * ==== Inheritance with ExceptionClass
2247  *
2248  * All exceptions inherited from the ExceptionClass parameter will be considered.
2249  *
2250  * Thread.handle_interrupt(Exception => :never) {
2251  * # all exceptions inherited from Exception are prohibited.
2252  * }
2253  *
2254  * For handling all interrupts, use +Object+ and not +Exception+
2255  * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2256  */
2257 static VALUE
2258 rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2259 {
2260  VALUE mask = Qundef;
2261  rb_execution_context_t * volatile ec = GET_EC();
2262  rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2263  volatile VALUE r = Qnil;
2264  enum ruby_tag_type state;
2265 
2266  if (!rb_block_given_p()) {
2267  rb_raise(rb_eArgError, "block is needed.");
2268  }
2269 
2270  mask_arg = rb_to_hash_type(mask_arg);
2271 
2272  if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2273  mask = Qnil;
2274  }
2275 
2276  rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2277 
2278  if (UNDEF_P(mask)) {
2279  return rb_yield(Qnil);
2280  }
2281 
2282  if (!RTEST(mask)) {
2283  mask = mask_arg;
2284  }
2285  else if (RB_TYPE_P(mask, T_HASH)) {
2286  OBJ_FREEZE(mask);
2287  }
2288 
2289  rb_ary_push(th->pending_interrupt_mask_stack, mask);
2290  if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2291  th->pending_interrupt_queue_checked = 0;
2292  RUBY_VM_SET_INTERRUPT(th->ec);
2293  }
2294 
2295  EC_PUSH_TAG(th->ec);
2296  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2297  r = rb_yield(Qnil);
2298  }
2299  EC_POP_TAG();
2300 
2301  rb_ary_pop(th->pending_interrupt_mask_stack);
2302  if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2303  th->pending_interrupt_queue_checked = 0;
2304  RUBY_VM_SET_INTERRUPT(th->ec);
2305  }
2306 
2307  RUBY_VM_CHECK_INTS(th->ec);
2308 
2309  if (state) {
2310  EC_JUMP_TAG(th->ec, state);
2311  }
2312 
2313  return r;
2314 }
2315 
2316 /*
2317  * call-seq:
2318  * target_thread.pending_interrupt?(error = nil) -> true/false
2319  *
2320  * Returns whether or not the asynchronous queue is empty for the target thread.
2321  *
2322  * If +error+ is given, then check only for +error+ type deferred events.
2323  *
2324  * See ::pending_interrupt? for more information.
2325  */
2326 static VALUE
2327 rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2328 {
2329  rb_thread_t *target_th = rb_thread_ptr(target_thread);
2330 
2331  if (!target_th->pending_interrupt_queue) {
2332  return Qfalse;
2333  }
2334  if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2335  return Qfalse;
2336  }
2337  if (rb_check_arity(argc, 0, 1)) {
2338  VALUE err = argv[0];
2339  if (!rb_obj_is_kind_of(err, rb_cModule)) {
2340  rb_raise(rb_eTypeError, "class or module required for rescue clause");
2341  }
2342  return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2343  }
2344  else {
2345  return Qtrue;
2346  }
2347 }
2348 
2349 /*
2350  * call-seq:
2351  * Thread.pending_interrupt?(error = nil) -> true/false
2352  *
2353  * Returns whether or not the asynchronous queue is empty.
2354  *
2355  * Since Thread::handle_interrupt can be used to defer asynchronous events,
2356  * this method can be used to determine if there are any deferred events.
2357  *
2358  * If you find this method returns true, then you may finish +:never+ blocks.
2359  *
2360  * For example, the following method processes deferred asynchronous events
2361  * immediately.
2362  *
2363  * def Thread.kick_interrupt_immediately
2364  * Thread.handle_interrupt(Object => :immediate) {
2365  * Thread.pass
2366  * }
2367  * end
2368  *
2369  * If +error+ is given, then check only for +error+ type deferred events.
2370  *
2371  * === Usage
2372  *
2373  * th = Thread.new{
2374  * Thread.handle_interrupt(RuntimeError => :on_blocking){
2375  * while true
2376  * ...
2377  * # reach safe point to invoke interrupt
2378  * if Thread.pending_interrupt?
2379  * Thread.handle_interrupt(Object => :immediate){}
2380  * end
2381  * ...
2382  * end
2383  * }
2384  * }
2385  * ...
2386  * th.raise # stop thread
2387  *
2388  * This example can also be written as the following, which you should use to
2389  * avoid asynchronous interrupts.
2390  *
2391  * flag = true
2392  * th = Thread.new{
2393  * Thread.handle_interrupt(RuntimeError => :on_blocking){
2394  * while true
2395  * ...
2396  * # reach safe point to invoke interrupt
2397  * break if flag == false
2398  * ...
2399  * end
2400  * }
2401  * }
2402  * ...
2403  * flag = false # stop thread
2404  */
2405 
2406 static VALUE
2407 rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2408 {
2409  return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2410 }
2411 
2412 NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2413 
2414 static void
2415 rb_threadptr_to_kill(rb_thread_t *th)
2416 {
2417  rb_threadptr_pending_interrupt_clear(th);
2418  th->status = THREAD_RUNNABLE;
2419  th->to_kill = 1;
2420  th->ec->errinfo = INT2FIX(TAG_FATAL);
2421  EC_JUMP_TAG(th->ec, TAG_FATAL);
2422 }
2423 
2424 static inline rb_atomic_t
2425 threadptr_get_interrupts(rb_thread_t *th)
2426 {
2427  rb_execution_context_t *ec = th->ec;
2428  rb_atomic_t interrupt;
2429  rb_atomic_t old;
2430 
2431  do {
2432  interrupt = ec->interrupt_flag;
2433  old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2434  } while (old != interrupt);
2435  return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2436 }
2437 
2438 int
2439 rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2440 {
2441  rb_atomic_t interrupt;
2442  int postponed_job_interrupt = 0;
2443  int ret = FALSE;
2444 
2445  if (th->ec->raised_flag) return ret;
2446 
2447  while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2448  int sig;
2449  int timer_interrupt;
2450  int pending_interrupt;
2451  int trap_interrupt;
2452  int terminate_interrupt;
2453 
2454  timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2455  pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2456  postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2457  trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2458  terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2459 
2460  if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2461  RB_VM_LOCK_ENTER();
2462  RB_VM_LOCK_LEAVE();
2463  }
2464 
2465  if (postponed_job_interrupt) {
2466  rb_postponed_job_flush(th->vm);
2467  }
2468 
2469  /* signal handling */
2470  if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2471  enum rb_thread_status prev_status = th->status;
2472 
2473  th->status = THREAD_RUNNABLE;
2474  {
2475  while ((sig = rb_get_next_signal()) != 0) {
2476  ret |= rb_signal_exec(th, sig);
2477  }
2478  }
2479  th->status = prev_status;
2480  }
2481 
2482  /* exception from another thread */
2483  if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2484  VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2485  RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2486  ret = TRUE;
2487 
2488  if (UNDEF_P(err)) {
2489  /* no error */
2490  }
2491  else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2492  err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2493  err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2494  terminate_interrupt = 1;
2495  }
2496  else {
2497  if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2498  /* the only special exception to be queued across thread */
2499  err = ruby_vm_special_exception_copy(err);
2500  }
2501  /* set runnable if th was slept. */
2502  if (th->status == THREAD_STOPPED ||
2503  th->status == THREAD_STOPPED_FOREVER)
2504  th->status = THREAD_RUNNABLE;
2505  rb_exc_raise(err);
2506  }
2507  }
2508 
2509  if (terminate_interrupt) {
2510  rb_threadptr_to_kill(th);
2511  }
2512 
2513  if (timer_interrupt) {
2514  uint32_t limits_us = TIME_QUANTUM_USEC;
2515 
2516  if (th->priority > 0)
2517  limits_us <<= th->priority;
2518  else
2519  limits_us >>= -th->priority;
2520 
2521  if (th->status == THREAD_RUNNABLE)
2522  th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2523 
2524  VM_ASSERT(th->ec->cfp);
2525  EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2526  0, 0, 0, Qundef);
2527 
2528  rb_thread_schedule_limits(limits_us);
2529  }
2530  }
2531  return ret;
2532 }
2533 
2534 void
2535 rb_thread_execute_interrupts(VALUE thval)
2536 {
2537  rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2538 }
2539 
2540 static void
2541 rb_threadptr_ready(rb_thread_t *th)
2542 {
2543  rb_threadptr_interrupt(th);
2544 }
2545 
2546 static VALUE
2547 rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2548 {
2549  VALUE exc;
2550 
2551  if (rb_threadptr_dead(target_th)) {
2552  return Qnil;
2553  }
2554 
2555  if (argc == 0) {
2556  exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2557  }
2558  else {
2559  exc = rb_make_exception(argc, argv);
2560  }
2561 
2562  /* making an exception object can switch thread,
2563  so we need to check thread deadness again */
2564  if (rb_threadptr_dead(target_th)) {
2565  return Qnil;
2566  }
2567 
2568  rb_ec_setup_exception(GET_EC(), exc, Qundef);
2569  rb_threadptr_pending_interrupt_enque(target_th, exc);
2570  rb_threadptr_interrupt(target_th);
2571  return Qnil;
2572 }
2573 
2574 void
2575 rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2576 {
2577  VALUE argv[2];
2578 
2579  argv[0] = rb_eSignal;
2580  argv[1] = INT2FIX(sig);
2581  rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2582 }
2583 
2584 void
2585 rb_threadptr_signal_exit(rb_thread_t *th)
2586 {
2587  VALUE argv[2];
2588 
2589  argv[0] = rb_eSystemExit;
2590  argv[1] = rb_str_new2("exit");
2591 
2592  // TODO: check signal raise deliverly
2593  rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2594 }
2595 
2596 int
2597 rb_ec_set_raised(rb_execution_context_t *ec)
2598 {
2599  if (ec->raised_flag & RAISED_EXCEPTION) {
2600  return 1;
2601  }
2602  ec->raised_flag |= RAISED_EXCEPTION;
2603  return 0;
2604 }
2605 
2606 int
2607 rb_ec_reset_raised(rb_execution_context_t *ec)
2608 {
2609  if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2610  return 0;
2611  }
2612  ec->raised_flag &= ~RAISED_EXCEPTION;
2613  return 1;
2614 }
2615 
2616 int
2617 rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
2618 {
2619  rb_vm_t *vm = GET_THREAD()->vm;
2620  struct waiting_fd *wfd = 0, *next;
2621  ccan_list_head_init(&busy->pending_fd_users);
2622  int has_any;
2623  VALUE wakeup_mutex;
2624 
2625  RB_VM_LOCK_ENTER();
2626  {
2627  ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2628  if (wfd->fd == fd) {
2629  rb_thread_t *th = wfd->th;
2630  VALUE err;
2631 
2632  ccan_list_del(&wfd->wfd_node);
2633  ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
2634 
2635  wfd->busy = busy;
2636  err = th->vm->special_exceptions[ruby_error_stream_closed];
2637  rb_threadptr_pending_interrupt_enque(th, err);
2638  rb_threadptr_interrupt(th);
2639  }
2640  }
2641  }
2642 
2643  has_any = !ccan_list_empty(&busy->pending_fd_users);
2644  busy->closing_thread = rb_thread_current();
2645  busy->closing_fiber = rb_fiber_current();
2646  wakeup_mutex = Qnil;
2647  if (has_any) {
2648  wakeup_mutex = rb_mutex_new();
2649  RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
2650  }
2651  busy->wakeup_mutex = wakeup_mutex;
2652 
2653  RB_VM_LOCK_LEAVE();
2654 
2655  /* If the caller didn't pass *busy as a pointer to something on the stack,
2656  we need to guard this mutex object on _our_ C stack for the duration
2657  of this function. */
2658  RB_GC_GUARD(wakeup_mutex);
2659  return has_any;
2660 }
2661 
2662 void
2663 rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
2664 {
2665  if (!RB_TEST(busy->wakeup_mutex)) {
2666  /* There was nobody else using this file when we closed it, so we
2667  never bothered to allocate a mutex*/
2668  return;
2669  }
2670 
2671  rb_mutex_lock(busy->wakeup_mutex);
2672  while (!ccan_list_empty(&busy->pending_fd_users)) {
2673  rb_mutex_sleep(busy->wakeup_mutex, Qnil);
2674  }
2675  rb_mutex_unlock(busy->wakeup_mutex);
2676 }
2677 
2678 void
2679 rb_thread_fd_close(int fd)
2680 {
2681  struct rb_io_close_wait_list busy;
2682 
2683  if (rb_notify_fd_close(fd, &busy)) {
2684  rb_notify_fd_close_wait(&busy);
2685  }
2686 }
2687 
2688 /*
2689  * call-seq:
2690  * thr.raise
2691  * thr.raise(string)
2692  * thr.raise(exception [, string [, array]])
2693  *
2694  * Raises an exception from the given thread. The caller does not have to be
2695  * +thr+. See Kernel#raise for more information.
2696  *
2697  * Thread.abort_on_exception = true
2698  * a = Thread.new { sleep(200) }
2699  * a.raise("Gotcha")
2700  *
2701  * This will produce:
2702  *
2703  * prog.rb:3: Gotcha (RuntimeError)
2704  * from prog.rb:2:in `initialize'
2705  * from prog.rb:2:in `new'
2706  * from prog.rb:2
2707  */
2708 
2709 static VALUE
2710 thread_raise_m(int argc, VALUE *argv, VALUE self)
2711 {
2712  rb_thread_t *target_th = rb_thread_ptr(self);
2713  const rb_thread_t *current_th = GET_THREAD();
2714 
2715  threadptr_check_pending_interrupt_queue(target_th);
2716  rb_threadptr_raise(target_th, argc, argv);
2717 
2718  /* To perform Thread.current.raise as Kernel.raise */
2719  if (current_th == target_th) {
2720  RUBY_VM_CHECK_INTS(target_th->ec);
2721  }
2722  return Qnil;
2723 }
2724 
2725 
2726 /*
2727  * call-seq:
2728  * thr.exit -> thr
2729  * thr.kill -> thr
2730  * thr.terminate -> thr
2731  *
2732  * Terminates +thr+ and schedules another thread to be run, returning
2733  * the terminated Thread. If this is the main thread, or the last
2734  * thread, exits the process.
2735  */
2736 
2738 rb_thread_kill(VALUE thread)
2739 {
2740  rb_thread_t *target_th = rb_thread_ptr(thread);
2741 
2742  if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2743  return thread;
2744  }
2745  if (target_th == target_th->vm->ractor.main_thread) {
2746  rb_exit(EXIT_SUCCESS);
2747  }
2748 
2749  RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2750 
2751  if (target_th == GET_THREAD()) {
2752  /* kill myself immediately */
2753  rb_threadptr_to_kill(target_th);
2754  }
2755  else {
2756  threadptr_check_pending_interrupt_queue(target_th);
2757  rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2758  rb_threadptr_interrupt(target_th);
2759  }
2760 
2761  return thread;
2762 }
2763 
2764 int
2765 rb_thread_to_be_killed(VALUE thread)
2766 {
2767  rb_thread_t *target_th = rb_thread_ptr(thread);
2768 
2769  if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2770  return TRUE;
2771  }
2772  return FALSE;
2773 }
2774 
2775 /*
2776  * call-seq:
2777  * Thread.kill(thread) -> thread
2778  *
2779  * Causes the given +thread+ to exit, see also Thread::exit.
2780  *
2781  * count = 0
2782  * a = Thread.new { loop { count += 1 } }
2783  * sleep(0.1) #=> 0
2784  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2785  * count #=> 93947
2786  * a.alive? #=> false
2787  */
2788 
2789 static VALUE
2790 rb_thread_s_kill(VALUE obj, VALUE th)
2791 {
2792  return rb_thread_kill(th);
2793 }
2794 
2795 
2796 /*
2797  * call-seq:
2798  * Thread.exit -> thread
2799  *
2800  * Terminates the currently running thread and schedules another thread to be
2801  * run.
2802  *
2803  * If this thread is already marked to be killed, ::exit returns the Thread.
2804  *
2805  * If this is the main thread, or the last thread, exit the process.
2806  */
2807 
2808 static VALUE
2809 rb_thread_exit(VALUE _)
2810 {
2811  rb_thread_t *th = GET_THREAD();
2812  return rb_thread_kill(th->self);
2813 }
2814 
2815 
2816 /*
2817  * call-seq:
2818  * thr.wakeup -> thr
2819  *
2820  * Marks a given thread as eligible for scheduling, however it may still
2821  * remain blocked on I/O.
2822  *
2823  * *Note:* This does not invoke the scheduler, see #run for more information.
2824  *
2825  * c = Thread.new { Thread.stop; puts "hey!" }
2826  * sleep 0.1 while c.status!='sleep'
2827  * c.wakeup
2828  * c.join
2829  * #=> "hey!"
2830  */
2831 
2833 rb_thread_wakeup(VALUE thread)
2834 {
2835  if (!RTEST(rb_thread_wakeup_alive(thread))) {
2836  rb_raise(rb_eThreadError, "killed thread");
2837  }
2838  return thread;
2839 }
2840 
2843 {
2844  rb_thread_t *target_th = rb_thread_ptr(thread);
2845  if (target_th->status == THREAD_KILLED) return Qnil;
2846 
2847  rb_threadptr_ready(target_th);
2848 
2849  if (target_th->status == THREAD_STOPPED ||
2850  target_th->status == THREAD_STOPPED_FOREVER) {
2851  target_th->status = THREAD_RUNNABLE;
2852  }
2853 
2854  return thread;
2855 }
2856 
2857 
2858 /*
2859  * call-seq:
2860  * thr.run -> thr
2861  *
2862  * Wakes up +thr+, making it eligible for scheduling.
2863  *
2864  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2865  * sleep 0.1 while a.status!='sleep'
2866  * puts "Got here"
2867  * a.run
2868  * a.join
2869  *
2870  * This will produce:
2871  *
2872  * a
2873  * Got here
2874  * c
2875  *
2876  * See also the instance method #wakeup.
2877  */
2878 
2880 rb_thread_run(VALUE thread)
2881 {
2882  rb_thread_wakeup(thread);
2884  return thread;
2885 }
2886 
2887 
2889 rb_thread_stop(void)
2890 {
2891  if (rb_thread_alone()) {
2893  "stopping only thread\n\tnote: use sleep to stop forever");
2894  }
2896  return Qnil;
2897 }
2898 
2899 /*
2900  * call-seq:
2901  * Thread.stop -> nil
2902  *
2903  * Stops execution of the current thread, putting it into a ``sleep'' state,
2904  * and schedules execution of another thread.
2905  *
2906  * a = Thread.new { print "a"; Thread.stop; print "c" }
2907  * sleep 0.1 while a.status!='sleep'
2908  * print "b"
2909  * a.run
2910  * a.join
2911  * #=> "abc"
2912  */
2913 
2914 static VALUE
2915 thread_stop(VALUE _)
2916 {
2917  return rb_thread_stop();
2918 }
2919 
2920 /********************************************************************/
2921 
2922 VALUE
2923 rb_thread_list(void)
2924 {
2925  // TODO
2926  return rb_ractor_thread_list();
2927 }
2928 
2929 /*
2930  * call-seq:
2931  * Thread.list -> array
2932  *
2933  * Returns an array of Thread objects for all threads that are either runnable
2934  * or stopped.
2935  *
2936  * Thread.new { sleep(200) }
2937  * Thread.new { 1000000.times {|i| i*i } }
2938  * Thread.new { Thread.stop }
2939  * Thread.list.each {|t| p t}
2940  *
2941  * This will produce:
2942  *
2943  * #<Thread:0x401b3e84 sleep>
2944  * #<Thread:0x401b3f38 run>
2945  * #<Thread:0x401b3fb0 sleep>
2946  * #<Thread:0x401bdf4c run>
2947  */
2948 
2949 static VALUE
2950 thread_list(VALUE _)
2951 {
2952  return rb_thread_list();
2953 }
2954 
2956 rb_thread_current(void)
2957 {
2958  return GET_THREAD()->self;
2959 }
2960 
2961 /*
2962  * call-seq:
2963  * Thread.current -> thread
2964  *
2965  * Returns the currently executing thread.
2966  *
2967  * Thread.current #=> #<Thread:0x401bdf4c run>
2968  */
2969 
2970 static VALUE
2971 thread_s_current(VALUE klass)
2972 {
2973  return rb_thread_current();
2974 }
2975 
2977 rb_thread_main(void)
2978 {
2979  return GET_RACTOR()->threads.main->self;
2980 }
2981 
2982 /*
2983  * call-seq:
2984  * Thread.main -> thread
2985  *
2986  * Returns the main thread.
2987  */
2988 
2989 static VALUE
2990 rb_thread_s_main(VALUE klass)
2991 {
2992  return rb_thread_main();
2993 }
2994 
2995 
2996 /*
2997  * call-seq:
2998  * Thread.abort_on_exception -> true or false
2999  *
3000  * Returns the status of the global ``abort on exception'' condition.
3001  *
3002  * The default is +false+.
3003  *
3004  * When set to +true+, if any thread is aborted by an exception, the
3005  * raised exception will be re-raised in the main thread.
3006  *
3007  * Can also be specified by the global $DEBUG flag or command line option
3008  * +-d+.
3009  *
3010  * See also ::abort_on_exception=.
3011  *
3012  * There is also an instance level method to set this for a specific thread,
3013  * see #abort_on_exception.
3014  */
3015 
3016 static VALUE
3017 rb_thread_s_abort_exc(VALUE _)
3018 {
3019  return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3020 }
3021 
3022 
3023 /*
3024  * call-seq:
3025  * Thread.abort_on_exception= boolean -> true or false
3026  *
3027  * When set to +true+, if any thread is aborted by an exception, the
3028  * raised exception will be re-raised in the main thread.
3029  * Returns the new state.
3030  *
3031  * Thread.abort_on_exception = true
3032  * t1 = Thread.new do
3033  * puts "In new thread"
3034  * raise "Exception from thread"
3035  * end
3036  * sleep(1)
3037  * puts "not reached"
3038  *
3039  * This will produce:
3040  *
3041  * In new thread
3042  * prog.rb:4: Exception from thread (RuntimeError)
3043  * from prog.rb:2:in `initialize'
3044  * from prog.rb:2:in `new'
3045  * from prog.rb:2
3046  *
3047  * See also ::abort_on_exception.
3048  *
3049  * There is also an instance level method to set this for a specific thread,
3050  * see #abort_on_exception=.
3051  */
3052 
3053 static VALUE
3054 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3055 {
3056  GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3057  return val;
3058 }
3059 
3060 
3061 /*
3062  * call-seq:
3063  * thr.abort_on_exception -> true or false
3064  *
3065  * Returns the status of the thread-local ``abort on exception'' condition for
3066  * this +thr+.
3067  *
3068  * The default is +false+.
3069  *
3070  * See also #abort_on_exception=.
3071  *
3072  * There is also a class level method to set this for all threads, see
3073  * ::abort_on_exception.
3074  */
3075 
3076 static VALUE
3077 rb_thread_abort_exc(VALUE thread)
3078 {
3079  return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3080 }
3081 
3082 
3083 /*
3084  * call-seq:
3085  * thr.abort_on_exception= boolean -> true or false
3086  *
3087  * When set to +true+, if this +thr+ is aborted by an exception, the
3088  * raised exception will be re-raised in the main thread.
3089  *
3090  * See also #abort_on_exception.
3091  *
3092  * There is also a class level method to set this for all threads, see
3093  * ::abort_on_exception=.
3094  */
3095 
3096 static VALUE
3097 rb_thread_abort_exc_set(VALUE thread, VALUE val)
3098 {
3099  rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3100  return val;
3101 }
3102 
3103 
3104 /*
3105  * call-seq:
3106  * Thread.report_on_exception -> true or false
3107  *
3108  * Returns the status of the global ``report on exception'' condition.
3109  *
3110  * The default is +true+ since Ruby 2.5.
3111  *
3112  * All threads created when this flag is true will report
3113  * a message on $stderr if an exception kills the thread.
3114  *
3115  * Thread.new { 1.times { raise } }
3116  *
3117  * will produce this output on $stderr:
3118  *
3119  * #<Thread:...> terminated with exception (report_on_exception is true):
3120  * Traceback (most recent call last):
3121  * 2: from -e:1:in `block in <main>'
3122  * 1: from -e:1:in `times'
3123  *
3124  * This is done to catch errors in threads early.
3125  * In some cases, you might not want this output.
3126  * There are multiple ways to avoid the extra output:
3127  *
3128  * * If the exception is not intended, the best is to fix the cause of
3129  * the exception so it does not happen anymore.
3130  * * If the exception is intended, it might be better to rescue it closer to
3131  * where it is raised rather then let it kill the Thread.
3132  * * If it is guaranteed the Thread will be joined with Thread#join or
3133  * Thread#value, then it is safe to disable this report with
3134  * <code>Thread.current.report_on_exception = false</code>
3135  * when starting the Thread.
3136  * However, this might handle the exception much later, or not at all
3137  * if the Thread is never joined due to the parent thread being blocked, etc.
3138  *
3139  * See also ::report_on_exception=.
3140  *
3141  * There is also an instance level method to set this for a specific thread,
3142  * see #report_on_exception=.
3143  *
3144  */
3145 
3146 static VALUE
3147 rb_thread_s_report_exc(VALUE _)
3148 {
3149  return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3150 }
3151 
3152 
3153 /*
3154  * call-seq:
3155  * Thread.report_on_exception= boolean -> true or false
3156  *
3157  * Returns the new state.
3158  * When set to +true+, all threads created afterwards will inherit the
3159  * condition and report a message on $stderr if an exception kills a thread:
3160  *
3161  * Thread.report_on_exception = true
3162  * t1 = Thread.new do
3163  * puts "In new thread"
3164  * raise "Exception from thread"
3165  * end
3166  * sleep(1)
3167  * puts "In the main thread"
3168  *
3169  * This will produce:
3170  *
3171  * In new thread
3172  * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3173  * Traceback (most recent call last):
3174  * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3175  * In the main thread
3176  *
3177  * See also ::report_on_exception.
3178  *
3179  * There is also an instance level method to set this for a specific thread,
3180  * see #report_on_exception=.
3181  */
3182 
3183 static VALUE
3184 rb_thread_s_report_exc_set(VALUE self, VALUE val)
3185 {
3186  GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3187  return val;
3188 }
3189 
3190 
3191 /*
3192  * call-seq:
3193  * Thread.ignore_deadlock -> true or false
3194  *
3195  * Returns the status of the global ``ignore deadlock'' condition.
3196  * The default is +false+, so that deadlock conditions are not ignored.
3197  *
3198  * See also ::ignore_deadlock=.
3199  *
3200  */
3201 
3202 static VALUE
3203 rb_thread_s_ignore_deadlock(VALUE _)
3204 {
3205  return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3206 }
3207 
3208 
3209 /*
3210  * call-seq:
3211  * Thread.ignore_deadlock = boolean -> true or false
3212  *
3213  * Returns the new state.
3214  * When set to +true+, the VM will not check for deadlock conditions.
3215  * It is only useful to set this if your application can break a
3216  * deadlock condition via some other means, such as a signal.
3217  *
3218  * Thread.ignore_deadlock = true
3219  * queue = Thread::Queue.new
3220  *
3221  * trap(:SIGUSR1){queue.push "Received signal"}
3222  *
3223  * # raises fatal error unless ignoring deadlock
3224  * puts queue.pop
3225  *
3226  * See also ::ignore_deadlock.
3227  */
3228 
3229 static VALUE
3230 rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3231 {
3232  GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3233  return val;
3234 }
3235 
3236 
3237 /*
3238  * call-seq:
3239  * thr.report_on_exception -> true or false
3240  *
3241  * Returns the status of the thread-local ``report on exception'' condition for
3242  * this +thr+.
3243  *
3244  * The default value when creating a Thread is the value of
3245  * the global flag Thread.report_on_exception.
3246  *
3247  * See also #report_on_exception=.
3248  *
3249  * There is also a class level method to set this for all new threads, see
3250  * ::report_on_exception=.
3251  */
3252 
3253 static VALUE
3254 rb_thread_report_exc(VALUE thread)
3255 {
3256  return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3257 }
3258 
3259 
3260 /*
3261  * call-seq:
3262  * thr.report_on_exception= boolean -> true or false
3263  *
3264  * When set to +true+, a message is printed on $stderr if an exception
3265  * kills this +thr+. See ::report_on_exception for details.
3266  *
3267  * See also #report_on_exception.
3268  *
3269  * There is also a class level method to set this for all new threads, see
3270  * ::report_on_exception=.
3271  */
3272 
3273 static VALUE
3274 rb_thread_report_exc_set(VALUE thread, VALUE val)
3275 {
3276  rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3277  return val;
3278 }
3279 
3280 
3281 /*
3282  * call-seq:
3283  * thr.group -> thgrp or nil
3284  *
3285  * Returns the ThreadGroup which contains the given thread.
3286  *
3287  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3288  */
3289 
3290 VALUE
3291 rb_thread_group(VALUE thread)
3292 {
3293  return rb_thread_ptr(thread)->thgroup;
3294 }
3295 
3296 static const char *
3297 thread_status_name(rb_thread_t *th, int detail)
3298 {
3299  switch (th->status) {
3300  case THREAD_RUNNABLE:
3301  return th->to_kill ? "aborting" : "run";
3302  case THREAD_STOPPED_FOREVER:
3303  if (detail) return "sleep_forever";
3304  case THREAD_STOPPED:
3305  return "sleep";
3306  case THREAD_KILLED:
3307  return "dead";
3308  default:
3309  return "unknown";
3310  }
3311 }
3312 
3313 static int
3314 rb_threadptr_dead(rb_thread_t *th)
3315 {
3316  return th->status == THREAD_KILLED;
3317 }
3318 
3319 
3320 /*
3321  * call-seq:
3322  * thr.status -> string, false or nil
3323  *
3324  * Returns the status of +thr+.
3325  *
3326  * [<tt>"sleep"</tt>]
3327  * Returned if this thread is sleeping or waiting on I/O
3328  * [<tt>"run"</tt>]
3329  * When this thread is executing
3330  * [<tt>"aborting"</tt>]
3331  * If this thread is aborting
3332  * [+false+]
3333  * When this thread is terminated normally
3334  * [+nil+]
3335  * If terminated with an exception.
3336  *
3337  * a = Thread.new { raise("die now") }
3338  * b = Thread.new { Thread.stop }
3339  * c = Thread.new { Thread.exit }
3340  * d = Thread.new { sleep }
3341  * d.kill #=> #<Thread:0x401b3678 aborting>
3342  * a.status #=> nil
3343  * b.status #=> "sleep"
3344  * c.status #=> false
3345  * d.status #=> "aborting"
3346  * Thread.current.status #=> "run"
3347  *
3348  * See also the instance methods #alive? and #stop?
3349  */
3350 
3351 static VALUE
3352 rb_thread_status(VALUE thread)
3353 {
3354  rb_thread_t *target_th = rb_thread_ptr(thread);
3355 
3356  if (rb_threadptr_dead(target_th)) {
3357  if (!NIL_P(target_th->ec->errinfo) &&
3358  !FIXNUM_P(target_th->ec->errinfo)) {
3359  return Qnil;
3360  }
3361  else {
3362  return Qfalse;
3363  }
3364  }
3365  else {
3366  return rb_str_new2(thread_status_name(target_th, FALSE));
3367  }
3368 }
3369 
3370 
3371 /*
3372  * call-seq:
3373  * thr.alive? -> true or false
3374  *
3375  * Returns +true+ if +thr+ is running or sleeping.
3376  *
3377  * thr = Thread.new { }
3378  * thr.join #=> #<Thread:0x401b3fb0 dead>
3379  * Thread.current.alive? #=> true
3380  * thr.alive? #=> false
3381  *
3382  * See also #stop? and #status.
3383  */
3384 
3385 static VALUE
3386 rb_thread_alive_p(VALUE thread)
3387 {
3388  return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3389 }
3390 
3391 /*
3392  * call-seq:
3393  * thr.stop? -> true or false
3394  *
3395  * Returns +true+ if +thr+ is dead or sleeping.
3396  *
3397  * a = Thread.new { Thread.stop }
3398  * b = Thread.current
3399  * a.stop? #=> true
3400  * b.stop? #=> false
3401  *
3402  * See also #alive? and #status.
3403  */
3404 
3405 static VALUE
3406 rb_thread_stop_p(VALUE thread)
3407 {
3408  rb_thread_t *th = rb_thread_ptr(thread);
3409 
3410  if (rb_threadptr_dead(th)) {
3411  return Qtrue;
3412  }
3413  return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3414 }
3415 
3416 /*
3417  * call-seq:
3418  * thr.name -> string
3419  *
3420  * show the name of the thread.
3421  */
3422 
3423 static VALUE
3424 rb_thread_getname(VALUE thread)
3425 {
3426  return rb_thread_ptr(thread)->name;
3427 }
3428 
3429 /*
3430  * call-seq:
3431  * thr.name=(name) -> string
3432  *
3433  * set given name to the ruby thread.
3434  * On some platform, it may set the name to pthread and/or kernel.
3435  */
3436 
3437 static VALUE
3438 rb_thread_setname(VALUE thread, VALUE name)
3439 {
3440  rb_thread_t *target_th = rb_thread_ptr(thread);
3441 
3442  if (!NIL_P(name)) {
3443  rb_encoding *enc;
3444  StringValueCStr(name);
3445  enc = rb_enc_get(name);
3446  if (!rb_enc_asciicompat(enc)) {
3447  rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3448  rb_enc_name(enc));
3449  }
3450  name = rb_str_new_frozen(name);
3451  }
3452  target_th->name = name;
3453  if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3454  native_set_another_thread_name(target_th->nt->thread_id, name);
3455  }
3456  return name;
3457 }
3458 
3459 #if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3460 /*
3461  * call-seq:
3462  * thr.native_thread_id -> integer
3463  *
3464  * Return the native thread ID which is used by the Ruby thread.
3465  *
3466  * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3467  * * On Linux it is TID returned by gettid(2).
3468  * * On macOS it is the system-wide unique integral ID of thread returned
3469  * by pthread_threadid_np(3).
3470  * * On FreeBSD it is the unique integral ID of the thread returned by
3471  * pthread_getthreadid_np(3).
3472  * * On Windows it is the thread identifier returned by GetThreadId().
3473  * * On other platforms, it raises NotImplementedError.
3474  *
3475  * NOTE:
3476  * If the thread is not associated yet or already deassociated with a native
3477  * thread, it returns _nil_.
3478  * If the Ruby implementation uses M:N thread model, the ID may change
3479  * depending on the timing.
3480  */
3481 
3482 static VALUE
3483 rb_thread_native_thread_id(VALUE thread)
3484 {
3485  rb_thread_t *target_th = rb_thread_ptr(thread);
3486  if (rb_threadptr_dead(target_th)) return Qnil;
3487  return native_thread_native_thread_id(target_th);
3488 }
3489 #else
3490 # define rb_thread_native_thread_id rb_f_notimplement
3491 #endif
3492 
3493 /*
3494  * call-seq:
3495  * thr.to_s -> string
3496  *
3497  * Dump the name, id, and status of _thr_ to a string.
3498  */
3499 
3500 static VALUE
3501 rb_thread_to_s(VALUE thread)
3502 {
3503  VALUE cname = rb_class_path(rb_obj_class(thread));
3504  rb_thread_t *target_th = rb_thread_ptr(thread);
3505  const char *status;
3506  VALUE str, loc;
3507 
3508  status = thread_status_name(target_th, TRUE);
3509  str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3510  if (!NIL_P(target_th->name)) {
3511  rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3512  }
3513  if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3514  rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3515  RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3516  }
3517  rb_str_catf(str, " %s>", status);
3518 
3519  return str;
3520 }
3521 
3522 /* variables for recursive traversals */
3523 #define recursive_key id__recursive_key__
3524 
3525 static VALUE
3526 threadptr_local_aref(rb_thread_t *th, ID id)
3527 {
3528  if (id == recursive_key) {
3529  return th->ec->local_storage_recursive_hash;
3530  }
3531  else {
3532  VALUE val;
3533  struct rb_id_table *local_storage = th->ec->local_storage;
3534 
3535  if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3536  return val;
3537  }
3538  else {
3539  return Qnil;
3540  }
3541  }
3542 }
3543 
3545 rb_thread_local_aref(VALUE thread, ID id)
3546 {
3547  return threadptr_local_aref(rb_thread_ptr(thread), id);
3548 }
3549 
3550 /*
3551  * call-seq:
3552  * thr[sym] -> obj or nil
3553  *
3554  * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3555  * if not explicitly inside a Fiber), using either a symbol or a string name.
3556  * If the specified variable does not exist, returns +nil+.
3557  *
3558  * [
3559  * Thread.new { Thread.current["name"] = "A" },
3560  * Thread.new { Thread.current[:name] = "B" },
3561  * Thread.new { Thread.current["name"] = "C" }
3562  * ].each do |th|
3563  * th.join
3564  * puts "#{th.inspect}: #{th[:name]}"
3565  * end
3566  *
3567  * This will produce:
3568  *
3569  * #<Thread:0x00000002a54220 dead>: A
3570  * #<Thread:0x00000002a541a8 dead>: B
3571  * #<Thread:0x00000002a54130 dead>: C
3572  *
3573  * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3574  * This confusion did not exist in Ruby 1.8 because
3575  * fibers are only available since Ruby 1.9.
3576  * Ruby 1.9 chooses that the methods behaves fiber-local to save
3577  * following idiom for dynamic scope.
3578  *
3579  * def meth(newvalue)
3580  * begin
3581  * oldvalue = Thread.current[:name]
3582  * Thread.current[:name] = newvalue
3583  * yield
3584  * ensure
3585  * Thread.current[:name] = oldvalue
3586  * end
3587  * end
3588  *
3589  * The idiom may not work as dynamic scope if the methods are thread-local
3590  * and a given block switches fiber.
3591  *
3592  * f = Fiber.new {
3593  * meth(1) {
3594  * Fiber.yield
3595  * }
3596  * }
3597  * meth(2) {
3598  * f.resume
3599  * }
3600  * f.resume
3601  * p Thread.current[:name]
3602  * #=> nil if fiber-local
3603  * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3604  *
3605  * For thread-local variables, please see #thread_variable_get and
3606  * #thread_variable_set.
3607  *
3608  */
3609 
3610 static VALUE
3611 rb_thread_aref(VALUE thread, VALUE key)
3612 {
3613  ID id = rb_check_id(&key);
3614  if (!id) return Qnil;
3615  return rb_thread_local_aref(thread, id);
3616 }
3617 
3618 /*
3619  * call-seq:
3620  * thr.fetch(sym) -> obj
3621  * thr.fetch(sym) { } -> obj
3622  * thr.fetch(sym, default) -> obj
3623  *
3624  * Returns a fiber-local for the given key. If the key can't be
3625  * found, there are several options: With no other arguments, it will
3626  * raise a KeyError exception; if <i>default</i> is given, then that
3627  * will be returned; if the optional code block is specified, then
3628  * that will be run and its result returned. See Thread#[] and
3629  * Hash#fetch.
3630  */
3631 static VALUE
3632 rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3633 {
3634  VALUE key, val;
3635  ID id;
3636  rb_thread_t *target_th = rb_thread_ptr(self);
3637  int block_given;
3638 
3639  rb_check_arity(argc, 1, 2);
3640  key = argv[0];
3641 
3642  block_given = rb_block_given_p();
3643  if (block_given && argc == 2) {
3644  rb_warn("block supersedes default value argument");
3645  }
3646 
3647  id = rb_check_id(&key);
3648 
3649  if (id == recursive_key) {
3650  return target_th->ec->local_storage_recursive_hash;
3651  }
3652  else if (id && target_th->ec->local_storage &&
3653  rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3654  return val;
3655  }
3656  else if (block_given) {
3657  return rb_yield(key);
3658  }
3659  else if (argc == 1) {
3660  rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3661  }
3662  else {
3663  return argv[1];
3664  }
3665 }
3666 
3667 static VALUE
3668 threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3669 {
3670  if (id == recursive_key) {
3671  th->ec->local_storage_recursive_hash = val;
3672  return val;
3673  }
3674  else {
3675  struct rb_id_table *local_storage = th->ec->local_storage;
3676 
3677  if (NIL_P(val)) {
3678  if (!local_storage) return Qnil;
3679  rb_id_table_delete(local_storage, id);
3680  return Qnil;
3681  }
3682  else {
3683  if (local_storage == NULL) {
3684  th->ec->local_storage = local_storage = rb_id_table_create(0);
3685  }
3686  rb_id_table_insert(local_storage, id, val);
3687  return val;
3688  }
3689  }
3690 }
3691 
3693 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3694 {
3695  if (OBJ_FROZEN(thread)) {
3696  rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3697  }
3698 
3699  return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3700 }
3701 
3702 /*
3703  * call-seq:
3704  * thr[sym] = obj -> obj
3705  *
3706  * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3707  * using either a symbol or a string.
3708  *
3709  * See also Thread#[].
3710  *
3711  * For thread-local variables, please see #thread_variable_set and
3712  * #thread_variable_get.
3713  */
3714 
3715 static VALUE
3716 rb_thread_aset(VALUE self, VALUE id, VALUE val)
3717 {
3718  return rb_thread_local_aset(self, rb_to_id(id), val);
3719 }
3720 
3721 /*
3722  * call-seq:
3723  * thr.thread_variable_get(key) -> obj or nil
3724  *
3725  * Returns the value of a thread local variable that has been set. Note that
3726  * these are different than fiber local values. For fiber local values,
3727  * please see Thread#[] and Thread#[]=.
3728  *
3729  * Thread local values are carried along with threads, and do not respect
3730  * fibers. For example:
3731  *
3732  * Thread.new {
3733  * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3734  * Thread.current["foo"] = "bar" # set a fiber local
3735  *
3736  * Fiber.new {
3737  * Fiber.yield [
3738  * Thread.current.thread_variable_get("foo"), # get the thread local
3739  * Thread.current["foo"], # get the fiber local
3740  * ]
3741  * }.resume
3742  * }.join.value # => ['bar', nil]
3743  *
3744  * The value "bar" is returned for the thread local, where nil is returned
3745  * for the fiber local. The fiber is executed in the same thread, so the
3746  * thread local values are available.
3747  */
3748 
3749 static VALUE
3750 rb_thread_variable_get(VALUE thread, VALUE key)
3751 {
3752  VALUE locals;
3753  VALUE symbol = rb_to_symbol(key);
3754 
3755  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3756  return Qnil;
3757  }
3758  locals = rb_thread_local_storage(thread);
3759  return rb_hash_aref(locals, symbol);
3760 }
3761 
3762 /*
3763  * call-seq:
3764  * thr.thread_variable_set(key, value)
3765  *
3766  * Sets a thread local with +key+ to +value+. Note that these are local to
3767  * threads, and not to fibers. Please see Thread#thread_variable_get and
3768  * Thread#[] for more information.
3769  */
3770 
3771 static VALUE
3772 rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3773 {
3774  VALUE locals;
3775 
3776  if (OBJ_FROZEN(thread)) {
3777  rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3778  }
3779 
3780  locals = rb_thread_local_storage(thread);
3781  return rb_hash_aset(locals, rb_to_symbol(key), val);
3782 }
3783 
3784 /*
3785  * call-seq:
3786  * thr.key?(sym) -> true or false
3787  *
3788  * Returns +true+ if the given string (or symbol) exists as a fiber-local
3789  * variable.
3790  *
3791  * me = Thread.current
3792  * me[:oliver] = "a"
3793  * me.key?(:oliver) #=> true
3794  * me.key?(:stanley) #=> false
3795  */
3796 
3797 static VALUE
3798 rb_thread_key_p(VALUE self, VALUE key)
3799 {
3800  VALUE val;
3801  ID id = rb_check_id(&key);
3802  struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3803 
3804  if (!id || local_storage == NULL) {
3805  return Qfalse;
3806  }
3807  return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3808 }
3809 
3810 static enum rb_id_table_iterator_result
3811 thread_keys_i(ID key, VALUE value, void *ary)
3812 {
3813  rb_ary_push((VALUE)ary, ID2SYM(key));
3814  return ID_TABLE_CONTINUE;
3815 }
3816 
3817 int
3818 rb_thread_alone(void)
3819 {
3820  // TODO
3821  return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3822 }
3823 
3824 /*
3825  * call-seq:
3826  * thr.keys -> array
3827  *
3828  * Returns an array of the names of the fiber-local variables (as Symbols).
3829  *
3830  * thr = Thread.new do
3831  * Thread.current[:cat] = 'meow'
3832  * Thread.current["dog"] = 'woof'
3833  * end
3834  * thr.join #=> #<Thread:0x401b3f10 dead>
3835  * thr.keys #=> [:dog, :cat]
3836  */
3837 
3838 static VALUE
3839 rb_thread_keys(VALUE self)
3840 {
3841  struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3842  VALUE ary = rb_ary_new();
3843 
3844  if (local_storage) {
3845  rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3846  }
3847  return ary;
3848 }
3849 
3850 static int
3851 keys_i(VALUE key, VALUE value, VALUE ary)
3852 {
3853  rb_ary_push(ary, key);
3854  return ST_CONTINUE;
3855 }
3856 
3857 /*
3858  * call-seq:
3859  * thr.thread_variables -> array
3860  *
3861  * Returns an array of the names of the thread-local variables (as Symbols).
3862  *
3863  * thr = Thread.new do
3864  * Thread.current.thread_variable_set(:cat, 'meow')
3865  * Thread.current.thread_variable_set("dog", 'woof')
3866  * end
3867  * thr.join #=> #<Thread:0x401b3f10 dead>
3868  * thr.thread_variables #=> [:dog, :cat]
3869  *
3870  * Note that these are not fiber local variables. Please see Thread#[] and
3871  * Thread#thread_variable_get for more details.
3872  */
3873 
3874 static VALUE
3875 rb_thread_variables(VALUE thread)
3876 {
3877  VALUE locals;
3878  VALUE ary;
3879 
3880  ary = rb_ary_new();
3881  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3882  return ary;
3883  }
3884  locals = rb_thread_local_storage(thread);
3885  rb_hash_foreach(locals, keys_i, ary);
3886 
3887  return ary;
3888 }
3889 
3890 /*
3891  * call-seq:
3892  * thr.thread_variable?(key) -> true or false
3893  *
3894  * Returns +true+ if the given string (or symbol) exists as a thread-local
3895  * variable.
3896  *
3897  * me = Thread.current
3898  * me.thread_variable_set(:oliver, "a")
3899  * me.thread_variable?(:oliver) #=> true
3900  * me.thread_variable?(:stanley) #=> false
3901  *
3902  * Note that these are not fiber local variables. Please see Thread#[] and
3903  * Thread#thread_variable_get for more details.
3904  */
3905 
3906 static VALUE
3907 rb_thread_variable_p(VALUE thread, VALUE key)
3908 {
3909  VALUE locals;
3910  VALUE symbol = rb_to_symbol(key);
3911 
3912  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3913  return Qfalse;
3914  }
3915  locals = rb_thread_local_storage(thread);
3916 
3917  return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
3918 }
3919 
3920 /*
3921  * call-seq:
3922  * thr.priority -> integer
3923  *
3924  * Returns the priority of <i>thr</i>. Default is inherited from the
3925  * current thread which creating the new thread, or zero for the
3926  * initial main thread; higher-priority thread will run more frequently
3927  * than lower-priority threads (but lower-priority threads can also run).
3928  *
3929  * This is just hint for Ruby thread scheduler. It may be ignored on some
3930  * platform.
3931  *
3932  * Thread.current.priority #=> 0
3933  */
3934 
3935 static VALUE
3936 rb_thread_priority(VALUE thread)
3937 {
3938  return INT2NUM(rb_thread_ptr(thread)->priority);
3939 }
3940 
3941 
3942 /*
3943  * call-seq:
3944  * thr.priority= integer -> thr
3945  *
3946  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3947  * will run more frequently than lower-priority threads (but lower-priority
3948  * threads can also run).
3949  *
3950  * This is just hint for Ruby thread scheduler. It may be ignored on some
3951  * platform.
3952  *
3953  * count1 = count2 = 0
3954  * a = Thread.new do
3955  * loop { count1 += 1 }
3956  * end
3957  * a.priority = -1
3958  *
3959  * b = Thread.new do
3960  * loop { count2 += 1 }
3961  * end
3962  * b.priority = -2
3963  * sleep 1 #=> 1
3964  * count1 #=> 622504
3965  * count2 #=> 5832
3966  */
3967 
3968 static VALUE
3969 rb_thread_priority_set(VALUE thread, VALUE prio)
3970 {
3971  rb_thread_t *target_th = rb_thread_ptr(thread);
3972  int priority;
3973 
3974 #if USE_NATIVE_THREAD_PRIORITY
3975  target_th->priority = NUM2INT(prio);
3976  native_thread_apply_priority(th);
3977 #else
3978  priority = NUM2INT(prio);
3979  if (priority > RUBY_THREAD_PRIORITY_MAX) {
3980  priority = RUBY_THREAD_PRIORITY_MAX;
3981  }
3982  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3983  priority = RUBY_THREAD_PRIORITY_MIN;
3984  }
3985  target_th->priority = (int8_t)priority;
3986 #endif
3987  return INT2NUM(target_th->priority);
3988 }
3989 
3990 /* for IO */
3991 
3992 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3993 
3994 /*
3995  * several Unix platforms support file descriptors bigger than FD_SETSIZE
3996  * in select(2) system call.
3997  *
3998  * - Linux 2.2.12 (?)
3999  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4000  * select(2) documents how to allocate fd_set dynamically.
4001  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4002  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4003  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4004  * select(2) documents how to allocate fd_set dynamically.
4005  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4006  * - Solaris 8 has select_large_fdset
4007  * - Mac OS X 10.7 (Lion)
4008  * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4009  * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4010  * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4011  *
4012  * When fd_set is not big enough to hold big file descriptors,
4013  * it should be allocated dynamically.
4014  * Note that this assumes fd_set is structured as bitmap.
4015  *
4016  * rb_fd_init allocates the memory.
4017  * rb_fd_term free the memory.
4018  * rb_fd_set may re-allocates bitmap.
4019  *
4020  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4021  */
4022 
4023 void
4024 rb_fd_init(rb_fdset_t *fds)
4025 {
4026  fds->maxfd = 0;
4027  fds->fdset = ALLOC(fd_set);
4028  FD_ZERO(fds->fdset);
4029 }
4030 
4031 void
4032 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4033 {
4034  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4035 
4036  if (size < sizeof(fd_set))
4037  size = sizeof(fd_set);
4038  dst->maxfd = src->maxfd;
4039  dst->fdset = xmalloc(size);
4040  memcpy(dst->fdset, src->fdset, size);
4041 }
4042 
4043 void
4044 rb_fd_term(rb_fdset_t *fds)
4045 {
4046  xfree(fds->fdset);
4047  fds->maxfd = 0;
4048  fds->fdset = 0;
4049 }
4050 
4051 void
4052 rb_fd_zero(rb_fdset_t *fds)
4053 {
4054  if (fds->fdset)
4055  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4056 }
4057 
4058 static void
4059 rb_fd_resize(int n, rb_fdset_t *fds)
4060 {
4061  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4062  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4063 
4064  if (m < sizeof(fd_set)) m = sizeof(fd_set);
4065  if (o < sizeof(fd_set)) o = sizeof(fd_set);
4066 
4067  if (m > o) {
4068  fds->fdset = xrealloc(fds->fdset, m);
4069  memset((char *)fds->fdset + o, 0, m - o);
4070  }
4071  if (n >= fds->maxfd) fds->maxfd = n + 1;
4072 }
4073 
4074 void
4075 rb_fd_set(int n, rb_fdset_t *fds)
4076 {
4077  rb_fd_resize(n, fds);
4078  FD_SET(n, fds->fdset);
4079 }
4080 
4081 void
4082 rb_fd_clr(int n, rb_fdset_t *fds)
4083 {
4084  if (n >= fds->maxfd) return;
4085  FD_CLR(n, fds->fdset);
4086 }
4087 
4088 int
4089 rb_fd_isset(int n, const rb_fdset_t *fds)
4090 {
4091  if (n >= fds->maxfd) return 0;
4092  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4093 }
4094 
4095 void
4096 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4097 {
4098  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4099 
4100  if (size < sizeof(fd_set)) size = sizeof(fd_set);
4101  dst->maxfd = max;
4102  dst->fdset = xrealloc(dst->fdset, size);
4103  memcpy(dst->fdset, src, size);
4104 }
4105 
4106 void
4107 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4108 {
4109  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4110 
4111  if (size < sizeof(fd_set))
4112  size = sizeof(fd_set);
4113  dst->maxfd = src->maxfd;
4114  dst->fdset = xrealloc(dst->fdset, size);
4115  memcpy(dst->fdset, src->fdset, size);
4116 }
4117 
4118 int
4119 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4120 {
4121  fd_set *r = NULL, *w = NULL, *e = NULL;
4122  if (readfds) {
4123  rb_fd_resize(n - 1, readfds);
4124  r = rb_fd_ptr(readfds);
4125  }
4126  if (writefds) {
4127  rb_fd_resize(n - 1, writefds);
4128  w = rb_fd_ptr(writefds);
4129  }
4130  if (exceptfds) {
4131  rb_fd_resize(n - 1, exceptfds);
4132  e = rb_fd_ptr(exceptfds);
4133  }
4134  return select(n, r, w, e, timeout);
4135 }
4136 
4137 #define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4138 
4139 #undef FD_ZERO
4140 #undef FD_SET
4141 #undef FD_CLR
4142 #undef FD_ISSET
4143 
4144 #define FD_ZERO(f) rb_fd_zero(f)
4145 #define FD_SET(i, f) rb_fd_set((i), (f))
4146 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4147 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4148 
4149 #elif defined(_WIN32)
4150 
4151 void
4152 rb_fd_init(rb_fdset_t *set)
4153 {
4154  set->capa = FD_SETSIZE;
4155  set->fdset = ALLOC(fd_set);
4156  FD_ZERO(set->fdset);
4157 }
4158 
4159 void
4160 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4161 {
4162  rb_fd_init(dst);
4163  rb_fd_dup(dst, src);
4164 }
4165 
4166 void
4167 rb_fd_term(rb_fdset_t *set)
4168 {
4169  xfree(set->fdset);
4170  set->fdset = NULL;
4171  set->capa = 0;
4172 }
4173 
4174 void
4175 rb_fd_set(int fd, rb_fdset_t *set)
4176 {
4177  unsigned int i;
4178  SOCKET s = rb_w32_get_osfhandle(fd);
4179 
4180  for (i = 0; i < set->fdset->fd_count; i++) {
4181  if (set->fdset->fd_array[i] == s) {
4182  return;
4183  }
4184  }
4185  if (set->fdset->fd_count >= (unsigned)set->capa) {
4186  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4187  set->fdset =
4188  rb_xrealloc_mul_add(
4189  set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4190  }
4191  set->fdset->fd_array[set->fdset->fd_count++] = s;
4192 }
4193 
4194 #undef FD_ZERO
4195 #undef FD_SET
4196 #undef FD_CLR
4197 #undef FD_ISSET
4198 
4199 #define FD_ZERO(f) rb_fd_zero(f)
4200 #define FD_SET(i, f) rb_fd_set((i), (f))
4201 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4202 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4203 
4204 #define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4205 
4206 #endif
4207 
4208 #ifndef rb_fd_no_init
4209 #define rb_fd_no_init(fds) (void)(fds)
4210 #endif
4211 
4212 static int
4213 wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4214 {
4215  int r = *result;
4216  if (r < 0) {
4217  switch (errnum) {
4218  case EINTR:
4219 #ifdef ERESTART
4220  case ERESTART:
4221 #endif
4222  *result = 0;
4223  if (rel && hrtime_update_expire(rel, end)) {
4224  *rel = 0;
4225  }
4226  return TRUE;
4227  }
4228  return FALSE;
4229  }
4230  else if (r == 0) {
4231  /* check for spurious wakeup */
4232  if (rel) {
4233  return !hrtime_update_expire(rel, end);
4234  }
4235  return TRUE;
4236  }
4237  return FALSE;
4238 }
4240 struct select_set {
4241  int max;
4242  rb_thread_t *th;
4243  rb_fdset_t *rset;
4244  rb_fdset_t *wset;
4245  rb_fdset_t *eset;
4246  rb_fdset_t orig_rset;
4247  rb_fdset_t orig_wset;
4248  rb_fdset_t orig_eset;
4249  struct timeval *timeout;
4250 };
4251 
4252 static VALUE
4253 select_set_free(VALUE p)
4254 {
4255  struct select_set *set = (struct select_set *)p;
4256 
4257  rb_fd_term(&set->orig_rset);
4258  rb_fd_term(&set->orig_wset);
4259  rb_fd_term(&set->orig_eset);
4260 
4261  return Qfalse;
4262 }
4263 
4264 static VALUE
4265 do_select(VALUE p)
4266 {
4267  struct select_set *set = (struct select_set *)p;
4268  volatile int result = 0;
4269  int lerrno;
4270  rb_hrtime_t *to, rel, end = 0;
4271 
4272  timeout_prepare(&to, &rel, &end, set->timeout);
4273  volatile rb_hrtime_t endtime = end;
4274 #define restore_fdset(dst, src) \
4275  ((dst) ? rb_fd_dup(dst, src) : (void)0)
4276 #define do_select_update() \
4277  (restore_fdset(set->rset, &set->orig_rset), \
4278  restore_fdset(set->wset, &set->orig_wset), \
4279  restore_fdset(set->eset, &set->orig_eset), \
4280  TRUE)
4281 
4282  do {
4283  lerrno = 0;
4284 
4285  BLOCKING_REGION(set->th, {
4286  struct timeval tv;
4287 
4288  if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4289  result = native_fd_select(set->max,
4290  set->rset, set->wset, set->eset,
4291  rb_hrtime2timeval(&tv, to), set->th);
4292  if (result < 0) lerrno = errno;
4293  }
4294  }, ubf_select, set->th, TRUE);
4295 
4296  RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4297  } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4298 
4299  if (result < 0) {
4300  errno = lerrno;
4301  }
4302 
4303  return (VALUE)result;
4304 }
4305 
4306 int
4307 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4308  struct timeval *timeout)
4309 {
4310  struct select_set set;
4311 
4312  set.th = GET_THREAD();
4313  RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4314  set.max = max;
4315  set.rset = read;
4316  set.wset = write;
4317  set.eset = except;
4318  set.timeout = timeout;
4319 
4320  if (!set.rset && !set.wset && !set.eset) {
4321  if (!timeout) {
4323  return 0;
4324  }
4325  rb_thread_wait_for(*timeout);
4326  return 0;
4327  }
4328 
4329 #define fd_init_copy(f) do { \
4330  if (set.f) { \
4331  rb_fd_resize(set.max - 1, set.f); \
4332  if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4333  rb_fd_init_copy(&set.orig_##f, set.f); \
4334  } \
4335  } \
4336  else { \
4337  rb_fd_no_init(&set.orig_##f); \
4338  } \
4339  } while (0)
4340  fd_init_copy(rset);
4341  fd_init_copy(wset);
4342  fd_init_copy(eset);
4343 #undef fd_init_copy
4344 
4345  return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4346 }
4347 
4348 #ifdef USE_POLL
4349 
4350 /* The same with linux kernel. TODO: make platform independent definition. */
4351 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4352 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4353 #define POLLEX_SET (POLLPRI)
4354 
4355 #ifndef POLLERR_SET /* defined for FreeBSD for now */
4356 # define POLLERR_SET (0)
4357 #endif
4358 
4359 static int
4360 wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4361  rb_hrtime_t *const to, volatile int *lerrno)
4362 {
4363  struct timespec ts;
4364  volatile int result = 0;
4365 
4366  *lerrno = 0;
4367  BLOCKING_REGION(th, {
4368  if (!RUBY_VM_INTERRUPTED(th->ec)) {
4369  result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4370  if (result < 0) *lerrno = errno;
4371  }
4372  }, ubf_select, th, TRUE);
4373  return result;
4374 }
4375 
4376 /*
4377  * returns a mask of events
4378  */
4379 int
4380 rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4381 {
4382  struct pollfd fds[1] = {{
4383  .fd = fd,
4384  .events = (short)events,
4385  .revents = 0,
4386  }};
4387  volatile int result = 0;
4388  nfds_t nfds;
4389  struct waiting_fd wfd;
4390  enum ruby_tag_type state;
4391  volatile int lerrno;
4392 
4393  rb_execution_context_t *ec = GET_EC();
4394  rb_thread_t *th = rb_ec_thread_ptr(ec);
4395 
4396  thread_io_setup_wfd(th, fd, &wfd);
4397 
4398  if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4399  // fd is readable
4400  state = 0;
4401  fds[0].revents = events;
4402  errno = 0;
4403  }
4404  else {
4405  EC_PUSH_TAG(wfd.th->ec);
4406  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4407  rb_hrtime_t *to, rel, end = 0;
4408  RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4409  timeout_prepare(&to, &rel, &end, timeout);
4410  do {
4411  nfds = numberof(fds);
4412  result = wait_for_single_fd_blocking_region(wfd.th, fds, nfds, to, &lerrno);
4413 
4414  RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4415  } while (wait_retryable(&result, lerrno, to, end));
4416  }
4417  EC_POP_TAG();
4418  }
4419 
4420  thread_io_wake_pending_closer(&wfd);
4421 
4422  if (state) {
4423  EC_JUMP_TAG(wfd.th->ec, state);
4424  }
4425 
4426  if (result < 0) {
4427  errno = lerrno;
4428  return -1;
4429  }
4430 
4431  if (fds[0].revents & POLLNVAL) {
4432  errno = EBADF;
4433  return -1;
4434  }
4435 
4436  /*
4437  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4438  * Therefore we need to fix it up.
4439  */
4440  result = 0;
4441  if (fds[0].revents & POLLIN_SET)
4442  result |= RB_WAITFD_IN;
4443  if (fds[0].revents & POLLOUT_SET)
4444  result |= RB_WAITFD_OUT;
4445  if (fds[0].revents & POLLEX_SET)
4446  result |= RB_WAITFD_PRI;
4447 
4448  /* all requested events are ready if there is an error */
4449  if (fds[0].revents & POLLERR_SET)
4450  result |= events;
4451 
4452  return result;
4453 }
4454 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4455 struct select_args {
4456  union {
4457  int fd;
4458  int error;
4459  } as;
4460  rb_fdset_t *read;
4461  rb_fdset_t *write;
4462  rb_fdset_t *except;
4463  struct waiting_fd wfd;
4464  struct timeval *tv;
4465 };
4466 
4467 static VALUE
4468 select_single(VALUE ptr)
4469 {
4470  struct select_args *args = (struct select_args *)ptr;
4471  int r;
4472 
4473  r = rb_thread_fd_select(args->as.fd + 1,
4474  args->read, args->write, args->except, args->tv);
4475  if (r == -1)
4476  args->as.error = errno;
4477  if (r > 0) {
4478  r = 0;
4479  if (args->read && rb_fd_isset(args->as.fd, args->read))
4480  r |= RB_WAITFD_IN;
4481  if (args->write && rb_fd_isset(args->as.fd, args->write))
4482  r |= RB_WAITFD_OUT;
4483  if (args->except && rb_fd_isset(args->as.fd, args->except))
4484  r |= RB_WAITFD_PRI;
4485  }
4486  return (VALUE)r;
4487 }
4488 
4489 static VALUE
4490 select_single_cleanup(VALUE ptr)
4491 {
4492  struct select_args *args = (struct select_args *)ptr;
4493 
4494  thread_io_wake_pending_closer(&args->wfd);
4495  if (args->read) rb_fd_term(args->read);
4496  if (args->write) rb_fd_term(args->write);
4497  if (args->except) rb_fd_term(args->except);
4498 
4499  return (VALUE)-1;
4500 }
4501 
4502 static rb_fdset_t *
4503 init_set_fd(int fd, rb_fdset_t *fds)
4504 {
4505  if (fd < 0) {
4506  return 0;
4507  }
4508  rb_fd_init(fds);
4509  rb_fd_set(fd, fds);
4510 
4511  return fds;
4512 }
4513 
4514 int
4515 rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4516 {
4517  rb_fdset_t rfds, wfds, efds;
4518  struct select_args args;
4519  int r;
4520  VALUE ptr = (VALUE)&args;
4521  rb_execution_context_t *ec = GET_EC();
4522  rb_thread_t *th = rb_ec_thread_ptr(ec);
4523 
4524  args.as.fd = fd;
4525  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4526  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4527  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4528  args.tv = timeout;
4529  thread_io_setup_wfd(th, fd, &args.wfd);
4530 
4531  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4532  if (r == -1)
4533  errno = args.as.error;
4534 
4535  return r;
4536 }
4537 #endif /* ! USE_POLL */
4538 
4539 /*
4540  * for GC
4541  */
4542 
4543 #ifdef USE_CONSERVATIVE_STACK_END
4544 void
4545 rb_gc_set_stack_end(VALUE **stack_end_p)
4546 {
4547  VALUE stack_end;
4548 COMPILER_WARNING_PUSH
4549 #if __has_warning("-Wdangling-pointer")
4550 COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4551 #endif
4552  *stack_end_p = &stack_end;
4553 COMPILER_WARNING_POP
4554 }
4555 #endif
4556 
4557 /*
4558  *
4559  */
4560 
4561 void
4562 rb_threadptr_check_signal(rb_thread_t *mth)
4563 {
4564  /* mth must be main_thread */
4565  if (rb_signal_buff_size() > 0) {
4566  /* wakeup main thread */
4567  threadptr_trap_interrupt(mth);
4568  }
4569 }
4570 
4571 static void
4572 async_bug_fd(const char *mesg, int errno_arg, int fd)
4573 {
4574  char buff[64];
4575  size_t n = strlcpy(buff, mesg, sizeof(buff));
4576  if (n < sizeof(buff)-3) {
4577  ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4578  }
4579  rb_async_bug_errno(buff, errno_arg);
4580 }
4581 
4582 /* VM-dependent API is not available for this function */
4583 static int
4584 consume_communication_pipe(int fd)
4585 {
4586 #if USE_EVENTFD
4587  uint64_t buff[1];
4588 #else
4589  /* buffer can be shared because no one refers to them. */
4590  static char buff[1024];
4591 #endif
4592  ssize_t result;
4593  int ret = FALSE; /* for rb_sigwait_sleep */
4594 
4595  while (1) {
4596  result = read(fd, buff, sizeof(buff));
4597 #if USE_EVENTFD
4598  RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4599 #else
4600  RUBY_DEBUG_LOG("result:%d", (int)result);
4601 #endif
4602  if (result > 0) {
4603  ret = TRUE;
4604  if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4605  return ret;
4606  }
4607  }
4608  else if (result == 0) {
4609  return ret;
4610  }
4611  else if (result < 0) {
4612  int e = errno;
4613  switch (e) {
4614  case EINTR:
4615  continue; /* retry */
4616  case EAGAIN:
4617 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4618  case EWOULDBLOCK:
4619 #endif
4620  return ret;
4621  default:
4622  async_bug_fd("consume_communication_pipe: read", e, fd);
4623  }
4624  }
4625  }
4626 }
4627 
4628 void
4629 rb_thread_stop_timer_thread(void)
4630 {
4631  if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4632  native_reset_timer_thread();
4633  }
4634 }
4635 
4636 void
4637 rb_thread_reset_timer_thread(void)
4638 {
4639  native_reset_timer_thread();
4640 }
4641 
4642 void
4643 rb_thread_start_timer_thread(void)
4644 {
4645  system_working = 1;
4646  rb_thread_create_timer_thread();
4647 }
4648 
4649 static int
4650 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4651 {
4652  int i;
4653  VALUE coverage = (VALUE)val;
4654  VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4655  VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4656 
4657  if (lines) {
4658  if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4659  rb_ary_clear(lines);
4660  }
4661  else {
4662  int i;
4663  for (i = 0; i < RARRAY_LEN(lines); i++) {
4664  if (RARRAY_AREF(lines, i) != Qnil)
4665  RARRAY_ASET(lines, i, INT2FIX(0));
4666  }
4667  }
4668  }
4669  if (branches) {
4670  VALUE counters = RARRAY_AREF(branches, 1);
4671  for (i = 0; i < RARRAY_LEN(counters); i++) {
4672  RARRAY_ASET(counters, i, INT2FIX(0));
4673  }
4674  }
4675 
4676  return ST_CONTINUE;
4677 }
4678 
4679 void
4680 rb_clear_coverages(void)
4681 {
4682  VALUE coverages = rb_get_coverages();
4683  if (RTEST(coverages)) {
4684  rb_hash_foreach(coverages, clear_coverage_i, 0);
4685  }
4686 }
4687 
4688 #if defined(HAVE_WORKING_FORK)
4689 
4690 static void
4691 rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4692 {
4693  rb_thread_t *i = 0;
4694  rb_vm_t *vm = th->vm;
4695  rb_ractor_t *r = th->ractor;
4696  vm->ractor.main_ractor = r;
4697  vm->ractor.main_thread = th;
4698  r->threads.main = th;
4699  r->status_ = ractor_created;
4700 
4701  thread_sched_atfork(TH_SCHED(th));
4702  ubf_list_atfork();
4703 
4704  // OK. Only this thread accesses:
4705  ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4706  ccan_list_for_each(&r->threads.set, i, lt_node) {
4707  atfork(i, th);
4708  }
4709  }
4710  rb_vm_living_threads_init(vm);
4711 
4712  rb_ractor_atfork(vm, th);
4713  rb_vm_postponed_job_atfork();
4714 
4715  /* may be held by RJIT threads in parent */
4716  rb_native_mutex_initialize(&vm->workqueue_lock);
4717 
4718  /* may be held by any thread in parent */
4719  rb_native_mutex_initialize(&th->interrupt_lock);
4720 
4721  vm->fork_gen++;
4722  rb_ractor_sleeper_threads_clear(th->ractor);
4723  rb_clear_coverages();
4724 
4725  // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4726  rb_thread_reset_timer_thread();
4727  rb_thread_start_timer_thread();
4728 
4729  VM_ASSERT(vm->ractor.blocking_cnt == 0);
4730  VM_ASSERT(vm->ractor.cnt == 1);
4731 }
4732 
4733 static void
4734 terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4735 {
4736  if (th != current_th) {
4737  rb_mutex_abandon_keeping_mutexes(th);
4738  rb_mutex_abandon_locking_mutex(th);
4739  thread_cleanup_func(th, TRUE);
4740  }
4741 }
4742 
4743 void rb_fiber_atfork(rb_thread_t *);
4744 void
4745 rb_thread_atfork(void)
4746 {
4747  rb_thread_t *th = GET_THREAD();
4748  rb_threadptr_pending_interrupt_clear(th);
4749  rb_thread_atfork_internal(th, terminate_atfork_i);
4750  th->join_list = NULL;
4751  rb_fiber_atfork(th);
4752 
4753  /* We don't want reproduce CVE-2003-0900. */
4755 }
4756 
4757 static void
4758 terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4759 {
4760  if (th != current_th) {
4761  thread_cleanup_func_before_exec(th);
4762  }
4763 }
4764 
4765 void
4767 {
4768  rb_thread_t *th = GET_THREAD();
4769  rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4770 }
4771 #else
4772 void
4773 rb_thread_atfork(void)
4774 {
4775 }
4776 
4777 void
4779 {
4780 }
4781 #endif
4783 struct thgroup {
4784  int enclosed;
4785 };
4786 
4787 static const rb_data_type_t thgroup_data_type = {
4788  "thgroup",
4789  {
4790  0,
4792  NULL, // No external memory to report
4793  },
4794  0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
4795 };
4796 
4797 /*
4798  * Document-class: ThreadGroup
4799  *
4800  * ThreadGroup provides a means of keeping track of a number of threads as a
4801  * group.
4802  *
4803  * A given Thread object can only belong to one ThreadGroup at a time; adding
4804  * a thread to a new group will remove it from any previous group.
4805  *
4806  * Newly created threads belong to the same group as the thread from which they
4807  * were created.
4808  */
4809 
4810 /*
4811  * Document-const: Default
4812  *
4813  * The default ThreadGroup created when Ruby starts; all Threads belong to it
4814  * by default.
4815  */
4816 static VALUE
4817 thgroup_s_alloc(VALUE klass)
4818 {
4819  VALUE group;
4820  struct thgroup *data;
4821 
4822  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4823  data->enclosed = 0;
4824 
4825  return group;
4826 }
4827 
4828 /*
4829  * call-seq:
4830  * thgrp.list -> array
4831  *
4832  * Returns an array of all existing Thread objects that belong to this group.
4833  *
4834  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4835  */
4836 
4837 static VALUE
4838 thgroup_list(VALUE group)
4839 {
4840  VALUE ary = rb_ary_new();
4841  rb_thread_t *th = 0;
4842  rb_ractor_t *r = GET_RACTOR();
4843 
4844  ccan_list_for_each(&r->threads.set, th, lt_node) {
4845  if (th->thgroup == group) {
4846  rb_ary_push(ary, th->self);
4847  }
4848  }
4849  return ary;
4850 }
4851 
4852 
4853 /*
4854  * call-seq:
4855  * thgrp.enclose -> thgrp
4856  *
4857  * Prevents threads from being added to or removed from the receiving
4858  * ThreadGroup.
4859  *
4860  * New threads can still be started in an enclosed ThreadGroup.
4861  *
4862  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4863  * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4864  * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4865  * tg.add thr
4866  * #=> ThreadError: can't move from the enclosed thread group
4867  */
4868 
4869 static VALUE
4870 thgroup_enclose(VALUE group)
4871 {
4872  struct thgroup *data;
4873 
4874  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4875  data->enclosed = 1;
4876 
4877  return group;
4878 }
4879 
4880 
4881 /*
4882  * call-seq:
4883  * thgrp.enclosed? -> true or false
4884  *
4885  * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4886  */
4887 
4888 static VALUE
4889 thgroup_enclosed_p(VALUE group)
4890 {
4891  struct thgroup *data;
4892 
4893  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4894  return RBOOL(data->enclosed);
4895 }
4896 
4897 
4898 /*
4899  * call-seq:
4900  * thgrp.add(thread) -> thgrp
4901  *
4902  * Adds the given +thread+ to this group, removing it from any other
4903  * group to which it may have previously been a member.
4904  *
4905  * puts "Initial group is #{ThreadGroup::Default.list}"
4906  * tg = ThreadGroup.new
4907  * t1 = Thread.new { sleep }
4908  * t2 = Thread.new { sleep }
4909  * puts "t1 is #{t1}"
4910  * puts "t2 is #{t2}"
4911  * tg.add(t1)
4912  * puts "Initial group now #{ThreadGroup::Default.list}"
4913  * puts "tg group now #{tg.list}"
4914  *
4915  * This will produce:
4916  *
4917  * Initial group is #<Thread:0x401bdf4c>
4918  * t1 is #<Thread:0x401b3c90>
4919  * t2 is #<Thread:0x401b3c18>
4920  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4921  * tg group now #<Thread:0x401b3c90>
4922  */
4923 
4924 static VALUE
4925 thgroup_add(VALUE group, VALUE thread)
4926 {
4927  rb_thread_t *target_th = rb_thread_ptr(thread);
4928  struct thgroup *data;
4929 
4930  if (OBJ_FROZEN(group)) {
4931  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4932  }
4933  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4934  if (data->enclosed) {
4935  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4936  }
4937 
4938  if (OBJ_FROZEN(target_th->thgroup)) {
4939  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4940  }
4941  TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4942  if (data->enclosed) {
4944  "can't move from the enclosed thread group");
4945  }
4946 
4947  target_th->thgroup = group;
4948  return group;
4949 }
4950 
4951 /*
4952  * Document-class: ThreadShield
4953  */
4954 static void
4955 thread_shield_mark(void *ptr)
4956 {
4957  rb_gc_mark((VALUE)ptr);
4958 }
4959 
4960 static const rb_data_type_t thread_shield_data_type = {
4961  "thread_shield",
4962  {thread_shield_mark, 0, 0,},
4963  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4964 };
4965 
4966 static VALUE
4967 thread_shield_alloc(VALUE klass)
4968 {
4969  return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4970 }
4971 
4972 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4973 #define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4974 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4975 #define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4976 STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
4977 static inline unsigned int
4978 rb_thread_shield_waiting(VALUE b)
4979 {
4980  return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
4981 }
4982 
4983 static inline void
4984 rb_thread_shield_waiting_inc(VALUE b)
4985 {
4986  unsigned int w = rb_thread_shield_waiting(b);
4987  w++;
4988  if (w > THREAD_SHIELD_WAITING_MAX)
4989  rb_raise(rb_eRuntimeError, "waiting count overflow");
4990  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4991  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4992 }
4993 
4994 static inline void
4995 rb_thread_shield_waiting_dec(VALUE b)
4996 {
4997  unsigned int w = rb_thread_shield_waiting(b);
4998  if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4999  w--;
5000  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5001  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5002 }
5003 
5004 VALUE
5005 rb_thread_shield_new(void)
5006 {
5007  VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5008  rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5009  return thread_shield;
5010 }
5011 
5012 bool
5013 rb_thread_shield_owned(VALUE self)
5014 {
5015  VALUE mutex = GetThreadShieldPtr(self);
5016  if (!mutex) return false;
5017 
5018  rb_mutex_t *m = mutex_ptr(mutex);
5019 
5020  return m->fiber == GET_EC()->fiber_ptr;
5021 }
5022 
5023 /*
5024  * Wait a thread shield.
5025  *
5026  * Returns
5027  * true: acquired the thread shield
5028  * false: the thread shield was destroyed and no other threads waiting
5029  * nil: the thread shield was destroyed but still in use
5030  */
5031 VALUE
5032 rb_thread_shield_wait(VALUE self)
5033 {
5034  VALUE mutex = GetThreadShieldPtr(self);
5035  rb_mutex_t *m;
5036 
5037  if (!mutex) return Qfalse;
5038  m = mutex_ptr(mutex);
5039  if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5040  rb_thread_shield_waiting_inc(self);
5041  rb_mutex_lock(mutex);
5042  rb_thread_shield_waiting_dec(self);
5043  if (DATA_PTR(self)) return Qtrue;
5044  rb_mutex_unlock(mutex);
5045  return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5046 }
5047 
5048 static VALUE
5049 thread_shield_get_mutex(VALUE self)
5050 {
5051  VALUE mutex = GetThreadShieldPtr(self);
5052  if (!mutex)
5053  rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5054  return mutex;
5055 }
5056 
5057 /*
5058  * Release a thread shield, and return true if it has waiting threads.
5059  */
5060 VALUE
5061 rb_thread_shield_release(VALUE self)
5062 {
5063  VALUE mutex = thread_shield_get_mutex(self);
5064  rb_mutex_unlock(mutex);
5065  return RBOOL(rb_thread_shield_waiting(self) > 0);
5066 }
5067 
5068 /*
5069  * Release and destroy a thread shield, and return true if it has waiting threads.
5070  */
5071 VALUE
5072 rb_thread_shield_destroy(VALUE self)
5073 {
5074  VALUE mutex = thread_shield_get_mutex(self);
5075  DATA_PTR(self) = 0;
5076  rb_mutex_unlock(mutex);
5077  return RBOOL(rb_thread_shield_waiting(self) > 0);
5078 }
5079 
5080 static VALUE
5081 threadptr_recursive_hash(rb_thread_t *th)
5082 {
5083  return th->ec->local_storage_recursive_hash;
5084 }
5085 
5086 static void
5087 threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5088 {
5089  th->ec->local_storage_recursive_hash = hash;
5090 }
5091 
5092 ID rb_frame_last_func(void);
5093 
5094 /*
5095  * Returns the current "recursive list" used to detect recursion.
5096  * This list is a hash table, unique for the current thread and for
5097  * the current __callee__.
5098  */
5099 
5100 static VALUE
5101 recursive_list_access(VALUE sym)
5102 {
5103  rb_thread_t *th = GET_THREAD();
5104  VALUE hash = threadptr_recursive_hash(th);
5105  VALUE list;
5106  if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5107  hash = rb_ident_hash_new();
5108  threadptr_recursive_hash_set(th, hash);
5109  list = Qnil;
5110  }
5111  else {
5112  list = rb_hash_aref(hash, sym);
5113  }
5114  if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5115  list = rb_ident_hash_new();
5116  rb_hash_aset(hash, sym, list);
5117  }
5118  return list;
5119 }
5120 
5121 /*
5122  * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5123  * in the recursion list.
5124  * Assumes the recursion list is valid.
5125  */
5126 
5127 static bool
5128 recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5129 {
5130 #if SIZEOF_LONG == SIZEOF_VOIDP
5131  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5132 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5133  #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5134  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5135 #endif
5136 
5137  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5138  if (UNDEF_P(pair_list))
5139  return false;
5140  if (paired_obj_id) {
5141  if (!RB_TYPE_P(pair_list, T_HASH)) {
5142  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5143  return false;
5144  }
5145  else {
5146  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5147  return false;
5148  }
5149  }
5150  return true;
5151 }
5152 
5153 /*
5154  * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5155  * For a single obj, it sets list[obj] to Qtrue.
5156  * For a pair, it sets list[obj] to paired_obj_id if possible,
5157  * otherwise list[obj] becomes a hash like:
5158  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5159  * Assumes the recursion list is valid.
5160  */
5161 
5162 static void
5163 recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5164 {
5165  VALUE pair_list;
5166 
5167  if (!paired_obj) {
5168  rb_hash_aset(list, obj, Qtrue);
5169  }
5170  else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5171  rb_hash_aset(list, obj, paired_obj);
5172  }
5173  else {
5174  if (!RB_TYPE_P(pair_list, T_HASH)){
5175  VALUE other_paired_obj = pair_list;
5176  pair_list = rb_hash_new();
5177  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5178  rb_hash_aset(list, obj, pair_list);
5179  }
5180  rb_hash_aset(pair_list, paired_obj, Qtrue);
5181  }
5182 }
5183 
5184 /*
5185  * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5186  * For a pair, if list[obj] is a hash, then paired_obj_id is
5187  * removed from the hash and no attempt is made to simplify
5188  * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5189  * Assumes the recursion list is valid.
5190  */
5191 
5192 static int
5193 recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5194 {
5195  if (paired_obj) {
5196  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5197  if (UNDEF_P(pair_list)) {
5198  return 0;
5199  }
5200  if (RB_TYPE_P(pair_list, T_HASH)) {
5201  rb_hash_delete_entry(pair_list, paired_obj);
5202  if (!RHASH_EMPTY_P(pair_list)) {
5203  return 1; /* keep hash until is empty */
5204  }
5205  }
5206  }
5207  rb_hash_delete_entry(list, obj);
5208  return 1;
5209 }
5211 struct exec_recursive_params {
5212  VALUE (*func) (VALUE, VALUE, int);
5213  VALUE list;
5214  VALUE obj;
5215  VALUE pairid;
5216  VALUE arg;
5217 };
5218 
5219 static VALUE
5220 exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5221 {
5222  struct exec_recursive_params *p = (void *)data;
5223  return (*p->func)(p->obj, p->arg, FALSE);
5224 }
5225 
5226 /*
5227  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5228  * current method is called recursively on obj, or on the pair <obj, pairid>
5229  * If outer is 0, then the innermost func will be called with recursive set
5230  * to true, otherwise the outermost func will be called. In the latter case,
5231  * all inner func are short-circuited by throw.
5232  * Implementation details: the value thrown is the recursive list which is
5233  * proper to the current method and unlikely to be caught anywhere else.
5234  * list[recursive_key] is used as a flag for the outermost call.
5235  */
5236 
5237 static VALUE
5238 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5239 {
5240  VALUE result = Qundef;
5241  const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5242  struct exec_recursive_params p;
5243  int outermost;
5244  p.list = recursive_list_access(sym);
5245  p.obj = obj;
5246  p.pairid = pairid;
5247  p.arg = arg;
5248  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5249 
5250  if (recursive_check(p.list, p.obj, pairid)) {
5251  if (outer && !outermost) {
5252  rb_throw_obj(p.list, p.list);
5253  }
5254  return (*func)(obj, arg, TRUE);
5255  }
5256  else {
5257  enum ruby_tag_type state;
5258 
5259  p.func = func;
5260 
5261  if (outermost) {
5262  recursive_push(p.list, ID2SYM(recursive_key), 0);
5263  recursive_push(p.list, p.obj, p.pairid);
5264  result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5265  if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5266  if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5267  if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5268  if (result == p.list) {
5269  result = (*func)(obj, arg, TRUE);
5270  }
5271  }
5272  else {
5273  volatile VALUE ret = Qundef;
5274  recursive_push(p.list, p.obj, p.pairid);
5275  EC_PUSH_TAG(GET_EC());
5276  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5277  ret = (*func)(obj, arg, FALSE);
5278  }
5279  EC_POP_TAG();
5280  if (!recursive_pop(p.list, p.obj, p.pairid)) {
5281  goto invalid;
5282  }
5283  if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5284  result = ret;
5285  }
5286  }
5287  *(volatile struct exec_recursive_params *)&p;
5288  return result;
5289 
5290  invalid:
5291  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5292  "for %+"PRIsVALUE" in %+"PRIsVALUE,
5293  sym, rb_thread_current());
5295 }
5296 
5297 /*
5298  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5299  * current method is called recursively on obj
5300  */
5301 
5302 VALUE
5303 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5304 {
5305  return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5306 }
5307 
5308 /*
5309  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5310  * current method is called recursively on the ordered pair <obj, paired_obj>
5311  */
5312 
5313 VALUE
5314 rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5315 {
5316  return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5317 }
5318 
5319 /*
5320  * If recursion is detected on the current method and obj, the outermost
5321  * func will be called with (obj, arg, true). All inner func will be
5322  * short-circuited using throw.
5323  */
5324 
5325 VALUE
5326 rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5327 {
5328  return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5329 }
5330 
5331 VALUE
5332 rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5333 {
5334  return exec_recursive(func, obj, 0, arg, 1, mid);
5335 }
5336 
5337 /*
5338  * If recursion is detected on the current method, obj and paired_obj,
5339  * the outermost func will be called with (obj, arg, true). All inner
5340  * func will be short-circuited using throw.
5341  */
5342 
5343 VALUE
5344 rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5345 {
5346  return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5347 }
5348 
5349 /*
5350  * call-seq:
5351  * thread.backtrace -> array or nil
5352  *
5353  * Returns the current backtrace of the target thread.
5354  *
5355  */
5356 
5357 static VALUE
5358 rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5359 {
5360  return rb_vm_thread_backtrace(argc, argv, thval);
5361 }
5362 
5363 /* call-seq:
5364  * thread.backtrace_locations(*args) -> array or nil
5365  *
5366  * Returns the execution stack for the target thread---an array containing
5367  * backtrace location objects.
5368  *
5369  * See Thread::Backtrace::Location for more information.
5370  *
5371  * This method behaves similarly to Kernel#caller_locations except it applies
5372  * to a specific thread.
5373  */
5374 static VALUE
5375 rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5376 {
5377  return rb_vm_thread_backtrace_locations(argc, argv, thval);
5378 }
5379 
5380 void
5381 Init_Thread_Mutex(void)
5382 {
5383  rb_thread_t *th = GET_THREAD();
5384 
5385  rb_native_mutex_initialize(&th->vm->workqueue_lock);
5386  rb_native_mutex_initialize(&th->interrupt_lock);
5387 }
5388 
5389 /*
5390  * Document-class: ThreadError
5391  *
5392  * Raised when an invalid operation is attempted on a thread.
5393  *
5394  * For example, when no other thread has been started:
5395  *
5396  * Thread.stop
5397  *
5398  * This will raises the following exception:
5399  *
5400  * ThreadError: stopping only thread
5401  * note: use sleep to stop forever
5402  */
5403 
5404 void
5405 Init_Thread(void)
5406 {
5407  VALUE cThGroup;
5408  rb_thread_t *th = GET_THREAD();
5409 
5410  sym_never = ID2SYM(rb_intern_const("never"));
5411  sym_immediate = ID2SYM(rb_intern_const("immediate"));
5412  sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5413 
5414  rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5415  rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5416  rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5417  rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5418  rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5419  rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5420  rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5421  rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5422  rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5423  rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5424  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5425  rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5426  rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5427  rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5428  rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5429  rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5430  rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5431  rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5432  rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5433 
5434  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5435  rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5436  rb_define_method(rb_cThread, "join", thread_join_m, -1);
5437  rb_define_method(rb_cThread, "value", thread_value, 0);
5439  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5443  rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5444  rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5445  rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5446  rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5447  rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5448  rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5449  rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5450  rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5451  rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5452  rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5453  rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5454  rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5455  rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5456  rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5457  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5458  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5459  rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5460  rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5461  rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5462  rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5463  rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5464 
5465  rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5466  rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5467  rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5468  rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5469  rb_define_alias(rb_cThread, "inspect", "to_s");
5470 
5471  rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5472  "stream closed in another thread");
5473 
5474  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5475  rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5476  rb_define_method(cThGroup, "list", thgroup_list, 0);
5477  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5478  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5479  rb_define_method(cThGroup, "add", thgroup_add, 1);
5480 
5481  {
5482  th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5483  rb_define_const(cThGroup, "Default", th->thgroup);
5484  }
5485 
5487 
5488  /* init thread core */
5489  {
5490  /* main thread setting */
5491  {
5492  /* acquire global vm lock */
5493 #ifdef HAVE_PTHREAD_NP_H
5494  VM_ASSERT(TH_SCHED(th)->running == th);
5495 #endif
5496  // thread_sched_to_running() should not be called because
5497  // it assumes blocked by thread_sched_to_waiting().
5498  // thread_sched_to_running(sched, th);
5499 
5500  th->pending_interrupt_queue = rb_ary_hidden_new(0);
5501  th->pending_interrupt_queue_checked = 0;
5502  th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5503  }
5504  }
5505 
5506  rb_thread_create_timer_thread();
5507 
5508  Init_thread_sync();
5509 
5510  // TODO: Suppress unused function warning for now
5511  // if (0) rb_thread_sched_destroy(NULL);
5512 }
5513 
5514 int
5516 {
5517  rb_thread_t *th = ruby_thread_from_native();
5518 
5519  return th != 0;
5520 }
5521 
5522 #ifdef NON_SCALAR_THREAD_ID
5523  #define thread_id_str(th) (NULL)
5524 #else
5525  #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5526 #endif
5527 
5528 static void
5529 debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5530 {
5531  rb_thread_t *th = 0;
5532  VALUE sep = rb_str_new_cstr("\n ");
5533 
5534  rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5535  rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5536  (void *)GET_THREAD(), (void *)r->threads.main);
5537 
5538  ccan_list_for_each(&r->threads.set, th, lt_node) {
5539  rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5540  "native:%p int:%u",
5541  th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5542 
5543  if (th->locking_mutex) {
5544  rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5545  rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5546  (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5547  }
5548 
5549  {
5550  struct rb_waiting_list *list = th->join_list;
5551  while (list) {
5552  rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5553  list = list->next;
5554  }
5555  }
5556  rb_str_catf(msg, "\n ");
5557  rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5558  rb_str_catf(msg, "\n");
5559  }
5560 }
5561 
5562 static void
5563 rb_check_deadlock(rb_ractor_t *r)
5564 {
5565  if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5566 
5567 #ifdef RUBY_THREAD_PTHREAD_H
5568  if (r->threads.sched.readyq_cnt > 0) return;
5569 #endif
5570 
5571  int sleeper_num = rb_ractor_sleeper_thread_num(r);
5572  int ltnum = rb_ractor_living_thread_num(r);
5573 
5574  if (ltnum > sleeper_num) return;
5575  if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5576 
5577  int found = 0;
5578  rb_thread_t *th = NULL;
5579 
5580  ccan_list_for_each(&r->threads.set, th, lt_node) {
5581  if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5582  found = 1;
5583  }
5584  else if (th->locking_mutex) {
5585  rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5586  if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5587  found = 1;
5588  }
5589  }
5590  if (found)
5591  break;
5592  }
5593 
5594  if (!found) {
5595  VALUE argv[2];
5596  argv[0] = rb_eFatal;
5597  argv[1] = rb_str_new2("No live threads left. Deadlock?");
5598  debug_deadlock_check(r, argv[1]);
5599  rb_ractor_sleeper_threads_dec(GET_RACTOR());
5600  rb_threadptr_raise(r->threads.main, 2, argv);
5601  }
5602 }
5603 
5604 // Used for VM memsize reporting. Returns the size of a list of waiting_fd
5605 // structs. Defined here because the struct definition lives here as well.
5606 size_t
5607 rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5608 {
5609  struct waiting_fd *waitfd = 0;
5610  size_t size = 0;
5611 
5612  ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5613  size += sizeof(struct waiting_fd);
5614  }
5615 
5616  return size;
5617 }
5618 
5619 static void
5620 update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5621 {
5622  const rb_control_frame_t *cfp = GET_EC()->cfp;
5623  VALUE coverage = rb_iseq_coverage(cfp->iseq);
5624  if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5625  VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5626  if (lines) {
5627  long line = rb_sourceline() - 1;
5628  long count;
5629  VALUE num;
5630  void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5631  if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5632  rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5633  rb_ary_push(lines, LONG2FIX(line + 1));
5634  return;
5635  }
5636  if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5637  return;
5638  }
5639  num = RARRAY_AREF(lines, line);
5640  if (!FIXNUM_P(num)) return;
5641  count = FIX2LONG(num) + 1;
5642  if (POSFIXABLE(count)) {
5643  RARRAY_ASET(lines, line, LONG2FIX(count));
5644  }
5645  }
5646  }
5647 }
5648 
5649 static void
5650 update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5651 {
5652  const rb_control_frame_t *cfp = GET_EC()->cfp;
5653  VALUE coverage = rb_iseq_coverage(cfp->iseq);
5654  if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5655  VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5656  if (branches) {
5657  long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5658  long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5659  VALUE counters = RARRAY_AREF(branches, 1);
5660  VALUE num = RARRAY_AREF(counters, idx);
5661  count = FIX2LONG(num) + 1;
5662  if (POSFIXABLE(count)) {
5663  RARRAY_ASET(counters, idx, LONG2FIX(count));
5664  }
5665  }
5666  }
5667 }
5668 
5669 const rb_method_entry_t *
5670 rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5671 {
5672  VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5673 
5674  if (!me->def) return NULL; // negative cme
5675 
5676  retry:
5677  switch (me->def->type) {
5678  case VM_METHOD_TYPE_ISEQ: {
5679  const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5680  rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5681  path = rb_iseq_path(iseq);
5682  beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5683  beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5684  end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5685  end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5686  break;
5687  }
5688  case VM_METHOD_TYPE_BMETHOD: {
5689  const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5690  if (iseq) {
5691  rb_iseq_location_t *loc;
5692  rb_iseq_check(iseq);
5693  path = rb_iseq_path(iseq);
5694  loc = &ISEQ_BODY(iseq)->location;
5695  beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5696  beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5697  end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5698  end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5699  break;
5700  }
5701  return NULL;
5702  }
5703  case VM_METHOD_TYPE_ALIAS:
5704  me = me->def->body.alias.original_me;
5705  goto retry;
5706  case VM_METHOD_TYPE_REFINED:
5707  me = me->def->body.refined.orig_me;
5708  if (!me) return NULL;
5709  goto retry;
5710  default:
5711  return NULL;
5712  }
5713 
5714  /* found */
5715  if (RB_TYPE_P(path, T_ARRAY)) {
5716  path = rb_ary_entry(path, 1);
5717  if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5718  }
5719  if (resolved_location) {
5720  resolved_location[0] = path;
5721  resolved_location[1] = beg_pos_lineno;
5722  resolved_location[2] = beg_pos_column;
5723  resolved_location[3] = end_pos_lineno;
5724  resolved_location[4] = end_pos_column;
5725  }
5726  return me;
5727 }
5728 
5729 static void
5730 update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5731 {
5732  const rb_control_frame_t *cfp = GET_EC()->cfp;
5733  const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5734  const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5735  VALUE rcount;
5736  long count;
5737 
5738  me = rb_resolve_me_location(me, 0);
5739  if (!me) return;
5740 
5741  rcount = rb_hash_aref(me2counter, (VALUE) me);
5742  count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5743  if (POSFIXABLE(count)) {
5744  rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5745  }
5746 }
5747 
5748 VALUE
5749 rb_get_coverages(void)
5750 {
5751  return GET_VM()->coverages;
5752 }
5753 
5754 int
5755 rb_get_coverage_mode(void)
5756 {
5757  return GET_VM()->coverage_mode;
5758 }
5759 
5760 void
5761 rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5762 {
5763  GET_VM()->coverages = coverages;
5764  GET_VM()->me2counter = me2counter;
5765  GET_VM()->coverage_mode = mode;
5766 }
5767 
5768 void
5769 rb_resume_coverages(void)
5770 {
5771  int mode = GET_VM()->coverage_mode;
5772  VALUE me2counter = GET_VM()->me2counter;
5773  rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5774  if (mode & COVERAGE_TARGET_BRANCHES) {
5775  rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5776  }
5777  if (mode & COVERAGE_TARGET_METHODS) {
5778  rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5779  }
5780 }
5781 
5782 void
5783 rb_suspend_coverages(void)
5784 {
5785  rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5786  if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5787  rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5788  }
5789  if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5790  rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5791  }
5792 }
5793 
5794 /* Make coverage arrays empty so old covered files are no longer tracked. */
5795 void
5796 rb_reset_coverages(void)
5797 {
5798  rb_clear_coverages();
5799  rb_iseq_remove_coverage_all();
5800  GET_VM()->coverages = Qfalse;
5801 }
5802 
5803 VALUE
5804 rb_default_coverage(int n)
5805 {
5806  VALUE coverage = rb_ary_hidden_new_fill(3);
5807  VALUE lines = Qfalse, branches = Qfalse;
5808  int mode = GET_VM()->coverage_mode;
5809 
5810  if (mode & COVERAGE_TARGET_LINES) {
5811  lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5812  }
5813  RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5814 
5815  if (mode & COVERAGE_TARGET_BRANCHES) {
5816  branches = rb_ary_hidden_new_fill(2);
5817  /* internal data structures for branch coverage:
5818  *
5819  * { branch base node =>
5820  * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5821  * branch target id =>
5822  * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5823  * ...
5824  * }],
5825  * ...
5826  * }
5827  *
5828  * Example:
5829  * { NODE_CASE =>
5830  * [1, 0, 4, 3, {
5831  * NODE_WHEN => [2, 8, 2, 9, 0],
5832  * NODE_WHEN => [3, 8, 3, 9, 1],
5833  * ...
5834  * }],
5835  * ...
5836  * }
5837  */
5838  VALUE structure = rb_hash_new();
5839  rb_obj_hide(structure);
5840  RARRAY_ASET(branches, 0, structure);
5841  /* branch execution counters */
5842  RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5843  }
5844  RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5845 
5846  return coverage;
5847 }
5848 
5849 static VALUE
5850 uninterruptible_exit(VALUE v)
5851 {
5852  rb_thread_t *cur_th = GET_THREAD();
5853  rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5854 
5855  cur_th->pending_interrupt_queue_checked = 0;
5856  if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5857  RUBY_VM_SET_INTERRUPT(cur_th->ec);
5858  }
5859  return Qnil;
5860 }
5861 
5862 VALUE
5863 rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5864 {
5865  VALUE interrupt_mask = rb_ident_hash_new();
5866  rb_thread_t *cur_th = GET_THREAD();
5867 
5868  rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5869  OBJ_FREEZE(interrupt_mask);
5870  rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5871 
5872  VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5873 
5874  RUBY_VM_CHECK_INTS(cur_th->ec);
5875  return ret;
5876 }
5877 
5878 static void
5879 thread_specific_storage_alloc(rb_thread_t *th)
5880 {
5881  VM_ASSERT(th->specific_storage == NULL);
5882 
5883  if (UNLIKELY(specific_key_count > 0)) {
5884  th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5885  }
5886 }
5887 
5888 rb_internal_thread_specific_key_t
5890 {
5891  rb_vm_t *vm = GET_VM();
5892 
5893  if (specific_key_count == 0 && vm->ractor.cnt > 1) {
5894  rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
5895  }
5896  else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
5897  rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5898  }
5899  else {
5900  rb_internal_thread_specific_key_t key = specific_key_count++;
5901 
5902  if (key == 0) {
5903  // allocate
5904  rb_ractor_t *cr = GET_RACTOR();
5905  rb_thread_t *th;
5906 
5907  ccan_list_for_each(&cr->threads.set, th, lt_node) {
5908  thread_specific_storage_alloc(th);
5909  }
5910  }
5911  return key;
5912  }
5913 }
5914 
5915 // async and native thread safe.
5916 void *
5917 rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
5918 {
5919  rb_thread_t *th = DATA_PTR(thread_val);
5920 
5921  VM_ASSERT(rb_thread_ptr(thread_val) == th);
5922  VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5923  VM_ASSERT(th->specific_storage);
5924 
5925  return th->specific_storage[key];
5926 }
5927 
5928 // async and native thread safe.
5929 void
5930 rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
5931 {
5932  rb_thread_t *th = DATA_PTR(thread_val);
5933 
5934  VM_ASSERT(rb_thread_ptr(thread_val) == th);
5935  VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5936  VM_ASSERT(th->specific_storage);
5937 
5938  th->specific_storage[key] = data;
5939 }
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition: assert.h:199
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:685
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition: event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition: vm_trace.c:315
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition: event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition: event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition: event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition: event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition: fl_type.h:606
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:980
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:2345
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition: eval.c:1139
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
Definition: class.c:2142
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition: eval.c:929
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:916
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition: string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition: memory.h:395
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition: xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition: fl_type.h:137
#define xrealloc
Old name of ruby_xrealloc.
Definition: xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition: fl_type.h:135
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition: assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition: globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition: xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition: long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition: int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition: memory.h:396
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition: value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition: int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition: int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition: long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition: value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition: fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition: value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:288
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition: error.h:482
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition: error.c:3627
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:676
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:1088
VALUE rb_eSystemExit
SystemExit exception.
Definition: error.c:1396
VALUE rb_eIOError
IOError exception.
Definition: io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition: error.c:1400
VALUE rb_eTypeError
TypeError exception.
Definition: error.c:1403
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition: error.c:3951
VALUE rb_eFatal
fatal exception.
Definition: error.c:1399
VALUE rb_eRuntimeError
RuntimeError exception.
Definition: error.c:1401
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition: error.c:465
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition: error.c:1441
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1404
VALUE rb_eException
Mother of all exceptions.
Definition: error.c:1395
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1045
VALUE rb_eThreadError
ThreadError exception.
Definition: eval.c:934
void rb_exit(int status)
Terminates the current execution context.
Definition: process.c:4451
VALUE rb_eSignal
SignalException exception.
Definition: error.c:1398
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition: object.c:2091
VALUE rb_cInteger
Module class.
Definition: numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:104
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition: object.c:247
VALUE rb_cThread
Thread class.
Definition: vm.c:543
VALUE rb_cModule
Module class.
Definition: object.c:67
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition: object.c:3684
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition: object.c:863
static bool rb_enc_asciicompat(rb_encoding *enc)
Queries if the passed encoding is in some sense compatible with ASCII.
Definition: encoding.h:768
rb_encoding * rb_enc_get(VALUE obj)
Identical to rb_enc_get_index(), except the return type.
Definition: encoding.c:1013
static const char * rb_enc_name(rb_encoding *enc)
Queries the (canonical) name of the passed encoding.
Definition: encoding.h:417
void rb_gc_mark(VALUE obj)
Marks an object.
Definition: gc.c:2094
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
Definition: array.c:1496
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
Definition: array.c:2777
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
Definition: array.c:4045
VALUE rb_ary_new(void)
Allocates a new, empty array.
Definition: array.c:747
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
Definition: array.c:1431
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
Definition: array.c:859
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
Definition: array.c:4642
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
Definition: array.c:1384
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
Definition: array.c:1737
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
Definition: array.c:2891
VALUE rb_fiber_current(void)
Queries the fiber which is calling this function.
Definition: cont.c:2553
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition: error.h:284
VALUE rb_make_exception(int argc, const VALUE *argv)
Constructs an exception object from the list of arguments, in a manner similar to Ruby's raise.
Definition: eval.c:883
void rb_obj_call_init_kw(VALUE, int, const VALUE *, int)
Identical to rb_obj_call_init(), except you can specify how to handle the last element of the given a...
Definition: eval.c:1743
void rb_hash_foreach(VALUE hash, int(*func)(VALUE key, VALUE val, VALUE arg), VALUE arg)
Iterates over a hash.
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
Identical to rb_hash_lookup(), except you can specify what to return on misshits.
Definition: hash.c:2086
VALUE rb_hash_aref(VALUE hash, VALUE key)
Queries the given key in the given hash table.
Definition: hash.c:2073
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2893
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Identical to rb_hash_aref(), except it always returns RUBY_Qnil for misshits.
Definition: hash.c:2099
VALUE rb_hash_new(void)
Creates a new, empty hash object.
Definition: hash.c:1475
VALUE rb_memory_id(VALUE obj)
Identical to rb_obj_id(), except it hesitates from allocating a new instance of rb_cInteger.
Definition: gc.c:1691
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition: proc.c:813
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition: random.c:1787
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition: string.c:1446
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition: string.c:3904
VALUE rb_str_new_cstr(const char *ptr)
Identical to rb_str_new(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:1054
VALUE rb_str_cat_cstr(VALUE dst, const char *src)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:3440
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition: thread.c:1433
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition: thread.c:3544
VALUE rb_mutex_new(void)
Creates a mutex.
Definition: thread_sync.c:191
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition: thread.c:2737
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition: thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition: thread.c:2976
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition: thread.c:1369
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition: thread.c:2678
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition: thread.c:1401
VALUE rb_thread_stop(void)
Stops the current thread.
Definition: thread.c:2888
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
Definition: thread_sync.c:567
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition: thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition: thread.c:4777
VALUE rb_thread_create(VALUE(*f)(void *g), void *g)
Creates a Ruby thread that is backended by a C function.
void rb_thread_check_ints(void)
Checks for interrupts.
Definition: thread.c:1416
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition: thread.c:2879
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition: thread.c:2832
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
Definition: thread_sync.c:505
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition: thread.c:1376
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition: thread.c:4772
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition: thread.c:2955
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition: thread.c:3817
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition: thread.c:3692
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition: thread.c:1464
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition: thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition: thread.c:2841
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
Definition: thread_sync.c:432
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition: thread.c:1439
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition: time.c:1947
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition: time.c:2896
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition: variable.c:1859
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition: variable.c:1350
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition: variable.c:293
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition: vm.c:1868
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition: symbol.h:276
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition: symbol.c:1117
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition: string.c:12460
ID rb_to_id(VALUE str)
Identical to rb_intern(), except it takes an instance of rb_cString.
Definition: string.c:12450
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition: variable.c:3713
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition: io.c:190
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition: io.h:2
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition: thread.c:1663
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition: thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition: thread.c:5916
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition: thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition: thread.c:5929
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition: thread.c:5888
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition: thread.c:1889
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition: thread.c:1521
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition: sprintf.c:1217
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition: sprintf.c:1240
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition: iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition: vm_eval.c:1354
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition: vm_eval.c:2402
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition: largesize.h:209
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition: largesize.h:195
int rb_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
int rb_fd_isset(int fd, const rb_fdset_t *f)
Queries if the given FD is in the given set.
void rb_fd_clr(int fd, rb_fdset_t *f)
Releases a specific FD from the given fdset.
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
void rb_fd_zero(rb_fdset_t *f)
Wipes out the current set of FDs.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition: memory.h:367
#define ALLOCA_N(type, n)
Definition: memory.h:287
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition: memory.h:355
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition: memory.h:162
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition: posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition: posix.h:54
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition: rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition: rarray.h:386
#define RARRAY_AREF(a, i)
Definition: rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition: rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition: rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition: rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition: rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition: rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition: rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition: rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition: rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition: rtypeddata.h:449
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition: rtypeddata.h:497
int rb_errno(void)
Identical to system errno.
Definition: eval.c:2165
void rb_errno_set(int err)
Set the errno.
Definition: eval.c:2171
#define errno
Ractor-aware version of errno.
Definition: ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition: thread.c:5514
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition: sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition: scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition: scheduler.c:226
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition: scheduler.c:390
VALUE rb_fiber_scheduler_blocking_region(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_region_state *state)
Defer the execution of the passed function to the scheduler.
Definition: scheduler.c:743
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition: scheduler.c:187
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition: scheduler.c:409
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition: thread.c:4306
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition: select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition: stdarg.h:35
Definition: method.h:62
This is the struct that holds necessary info for a struct.
Definition: rtypeddata.h:200
The data structure which wraps the fd_set bitmap used by select(2).
Definition: largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition: largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition: largesize.h:73
int capa
Maximum allowed number of FDs.
Definition: win32.h:50
Definition: method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition: thread.c:298
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition: thread.c:304
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition: thread.c:286
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition: thread.c:292
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:376