Ruby  3.4.0dev (2024-11-22 revision 37a72b0150ec36b4ea27175039afc28c62207b0c)
thread.c (37a72b0150ec36b4ea27175039afc28c62207b0c)
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author$
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23  model 4: M:N User:Native threads with Global VM lock
24  Combination of model 1 and 2
25 
26  model 5: M:N User:Native thread with fine grain lock
27  Combination of model 1 and 3
28 
29 ------------------------------------------------------------------------
30 
31  model 2:
32  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33  When thread scheduling, running thread release GVL. If running thread
34  try blocking operation, this thread must release GVL and another
35  thread can continue this flow. After blocking operation, thread
36  must check interrupt (RUBY_VM_CHECK_INTS).
37 
38  Every VM can run parallel.
39 
40  Ruby threads are scheduled by OS thread scheduler.
41 
42 ------------------------------------------------------------------------
43 
44  model 3:
45  Every threads run concurrent or parallel and to access shared object
46  exclusive access control is needed. For example, to access String
47  object or Array object, fine grain lock must be locked every time.
48  */
49 
50 
51 /*
52  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53  * 2.15 or later and set _FORTIFY_SOURCE > 0.
54  * However, the implementation is wrong. Even though Linux's select(2)
55  * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57  * it doesn't work correctly and makes program abort. Therefore we need to
58  * disable FORTIFY_SOURCE until glibc fixes it.
59  */
60 #undef _FORTIFY_SOURCE
61 #undef __USE_FORTIFY_LEVEL
62 #define __USE_FORTIFY_LEVEL 0
63 
64 /* for model 2 */
65 
66 #include "ruby/internal/config.h"
67 
68 #ifdef __linux__
69 // Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70 # include <alloca.h>
71 #endif
72 
73 #define TH_SCHED(th) (&(th)->ractor->threads.sched)
74 
75 #include "eval_intern.h"
76 #include "hrtime.h"
77 #include "internal.h"
78 #include "internal/class.h"
79 #include "internal/cont.h"
80 #include "internal/error.h"
81 #include "internal/gc.h"
82 #include "internal/hash.h"
83 #include "internal/io.h"
84 #include "internal/object.h"
85 #include "internal/proc.h"
86 #include "ruby/fiber/scheduler.h"
87 #include "internal/signal.h"
88 #include "internal/thread.h"
89 #include "internal/time.h"
90 #include "internal/warnings.h"
91 #include "iseq.h"
92 #include "rjit.h"
93 #include "ruby/debug.h"
94 #include "ruby/io.h"
95 #include "ruby/thread.h"
96 #include "ruby/thread_native.h"
97 #include "timev.h"
98 #include "vm_core.h"
99 #include "ractor_core.h"
100 #include "vm_debug.h"
101 #include "vm_sync.h"
102 
103 #if USE_RJIT && defined(HAVE_SYS_WAIT_H)
104 #include <sys/wait.h>
105 #endif
106 
107 #ifndef USE_NATIVE_THREAD_PRIORITY
108 #define USE_NATIVE_THREAD_PRIORITY 0
109 #define RUBY_THREAD_PRIORITY_MAX 3
110 #define RUBY_THREAD_PRIORITY_MIN -3
111 #endif
112 
113 static VALUE rb_cThreadShield;
114 
115 static VALUE sym_immediate;
116 static VALUE sym_on_blocking;
117 static VALUE sym_never;
118 
119 #define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
120 #define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
121 
122 static inline VALUE
123 rb_thread_local_storage(VALUE thread)
124 {
125  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
126  rb_ivar_set(thread, idLocals, rb_hash_new());
127  RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
128  }
129  return rb_ivar_get(thread, idLocals);
130 }
131 
132 enum SLEEP_FLAGS {
133  SLEEP_DEADLOCKABLE = 0x01,
134  SLEEP_SPURIOUS_CHECK = 0x02,
135  SLEEP_ALLOW_SPURIOUS = 0x04,
136  SLEEP_NO_CHECKINTS = 0x08,
137 };
138 
139 static void sleep_forever(rb_thread_t *th, unsigned int fl);
140 static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
141 
142 static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
143 static int rb_threadptr_dead(rb_thread_t *th);
144 static void rb_check_deadlock(rb_ractor_t *r);
145 static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
146 static const char *thread_status_name(rb_thread_t *th, int detail);
147 static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
148 NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
149 MAYBE_UNUSED(static int consume_communication_pipe(int fd));
150 
151 static volatile int system_working = 1;
152 static rb_internal_thread_specific_key_t specific_key_count;
153 
154 struct waiting_fd {
155  struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
156  rb_thread_t *th;
157  int fd;
158  struct rb_io_close_wait_list *busy;
159 };
160 
161 /********************************************************************************/
162 
163 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
164 
166  enum rb_thread_status prev_status;
167 };
168 
169 static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
170 static void unblock_function_clear(rb_thread_t *th);
171 
172 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
173  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
174 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
175 
176 #define THREAD_BLOCKING_BEGIN(th) do { \
177  struct rb_thread_sched * const sched = TH_SCHED(th); \
178  RB_VM_SAVE_MACHINE_CONTEXT(th); \
179  thread_sched_to_waiting((sched), (th));
180 
181 #define THREAD_BLOCKING_END(th) \
182  thread_sched_to_running((sched), (th)); \
183  rb_ractor_thread_switch(th->ractor, th); \
184 } while(0)
185 
186 #ifdef __GNUC__
187 #ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
188 #define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
189 #else
190 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
191 #endif
192 #else
193 #define only_if_constant(expr, notconst) notconst
194 #endif
195 #define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
196  struct rb_blocking_region_buffer __region; \
197  if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
198  /* always return true unless fail_if_interrupted */ \
199  !only_if_constant(fail_if_interrupted, TRUE)) { \
200  /* Important that this is inlined into the macro, and not part of \
201  * blocking_region_begin - see bug #20493 */ \
202  RB_VM_SAVE_MACHINE_CONTEXT(th); \
203  thread_sched_to_waiting(TH_SCHED(th), th); \
204  exec; \
205  blocking_region_end(th, &__region); \
206  }; \
207 } while(0)
208 
209 /*
210  * returns true if this thread was spuriously interrupted, false otherwise
211  * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
212  */
213 #define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
214 static inline int
215 vm_check_ints_blocking(rb_execution_context_t *ec)
216 {
217  rb_thread_t *th = rb_ec_thread_ptr(ec);
218 
219  if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
220  if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
221  }
222  else {
223  th->pending_interrupt_queue_checked = 0;
224  RUBY_VM_SET_INTERRUPT(ec);
225  }
226  return rb_threadptr_execute_interrupts(th, 1);
227 }
228 
229 int
230 rb_vm_check_ints_blocking(rb_execution_context_t *ec)
231 {
232  return vm_check_ints_blocking(ec);
233 }
234 
235 /*
236  * poll() is supported by many OSes, but so far Linux is the only
237  * one we know of that supports using poll() in all places select()
238  * would work.
239  */
240 #if defined(HAVE_POLL)
241 # if defined(__linux__)
242 # define USE_POLL
243 # endif
244 # if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
245 # define USE_POLL
246  /* FreeBSD does not set POLLOUT when POLLHUP happens */
247 # define POLLERR_SET (POLLHUP | POLLERR)
248 # endif
249 #endif
250 
251 static void
252 timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
253  const struct timeval *timeout)
254 {
255  if (timeout) {
256  *rel = rb_timeval2hrtime(timeout);
257  *end = rb_hrtime_add(rb_hrtime_now(), *rel);
258  *to = rel;
259  }
260  else {
261  *to = 0;
262  }
263 }
264 
265 MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
266 MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
267 MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
268 
269 #include THREAD_IMPL_SRC
270 
271 /*
272  * TODO: somebody with win32 knowledge should be able to get rid of
273  * timer-thread by busy-waiting on signals. And it should be possible
274  * to make the GVL in thread_pthread.c be platform-independent.
275  */
276 #ifndef BUSY_WAIT_SIGNALS
277 # define BUSY_WAIT_SIGNALS (0)
278 #endif
279 
280 #ifndef USE_EVENTFD
281 # define USE_EVENTFD (0)
282 #endif
283 
284 #include "thread_sync.c"
285 
286 void
287 rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
288 {
290 }
291 
292 void
293 rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
294 {
296 }
297 
298 void
299 rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
300 {
301  rb_native_mutex_lock(lock);
302 }
303 
304 void
305 rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
306 {
308 }
309 
310 static int
311 unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
312 {
313  do {
314  if (fail_if_interrupted) {
315  if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
316  return FALSE;
317  }
318  }
319  else {
320  RUBY_VM_CHECK_INTS(th->ec);
321  }
322 
323  rb_native_mutex_lock(&th->interrupt_lock);
324  } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
325  (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
326 
327  VM_ASSERT(th->unblock.func == NULL);
328 
329  th->unblock.func = func;
330  th->unblock.arg = arg;
331  rb_native_mutex_unlock(&th->interrupt_lock);
332 
333  return TRUE;
334 }
335 
336 static void
337 unblock_function_clear(rb_thread_t *th)
338 {
339  rb_native_mutex_lock(&th->interrupt_lock);
340  th->unblock.func = 0;
341  rb_native_mutex_unlock(&th->interrupt_lock);
342 }
343 
344 static void
345 threadptr_interrupt_locked(rb_thread_t *th, bool trap)
346 {
347  // th->interrupt_lock should be acquired here
348 
349  RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
350 
351  if (trap) {
352  RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
353  }
354  else {
355  RUBY_VM_SET_INTERRUPT(th->ec);
356  }
357 
358  if (th->unblock.func != NULL) {
359  (th->unblock.func)(th->unblock.arg);
360  }
361  else {
362  /* none */
363  }
364 }
365 
366 static void
367 threadptr_interrupt(rb_thread_t *th, int trap)
368 {
369  rb_native_mutex_lock(&th->interrupt_lock);
370  {
371  threadptr_interrupt_locked(th, trap);
372  }
373  rb_native_mutex_unlock(&th->interrupt_lock);
374 }
375 
376 void
377 rb_threadptr_interrupt(rb_thread_t *th)
378 {
379  RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
380  threadptr_interrupt(th, false);
381 }
382 
383 static void
384 threadptr_trap_interrupt(rb_thread_t *th)
385 {
386  threadptr_interrupt(th, true);
387 }
388 
389 static void
390 terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
391 {
392  rb_thread_t *th = 0;
393 
394  ccan_list_for_each(&r->threads.set, th, lt_node) {
395  if (th != main_thread) {
396  RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
397 
398  rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
399  rb_threadptr_interrupt(th);
400 
401  RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
402  }
403  else {
404  RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
405  }
406  }
407 }
408 
409 static void
410 rb_threadptr_join_list_wakeup(rb_thread_t *thread)
411 {
412  while (thread->join_list) {
413  struct rb_waiting_list *join_list = thread->join_list;
414 
415  // Consume the entry from the join list:
416  thread->join_list = join_list->next;
417 
418  rb_thread_t *target_thread = join_list->thread;
419 
420  if (target_thread->scheduler != Qnil && join_list->fiber) {
421  rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
422  }
423  else {
424  rb_threadptr_interrupt(target_thread);
425 
426  switch (target_thread->status) {
427  case THREAD_STOPPED:
428  case THREAD_STOPPED_FOREVER:
429  target_thread->status = THREAD_RUNNABLE;
430  break;
431  default:
432  break;
433  }
434  }
435  }
436 }
437 
438 void
439 rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
440 {
441  while (th->keeping_mutexes) {
442  rb_mutex_t *mutex = th->keeping_mutexes;
443  th->keeping_mutexes = mutex->next_mutex;
444 
445  // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
446 
447  const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
448  if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
449  }
450 }
451 
452 void
453 rb_thread_terminate_all(rb_thread_t *th)
454 {
455  rb_ractor_t *cr = th->ractor;
456  rb_execution_context_t * volatile ec = th->ec;
457  volatile int sleeping = 0;
458 
459  if (cr->threads.main != th) {
460  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
461  (void *)cr->threads.main, (void *)th);
462  }
463 
464  /* unlock all locking mutexes */
465  rb_threadptr_unlock_all_locking_mutexes(th);
466 
467  EC_PUSH_TAG(ec);
468  if (EC_EXEC_TAG() == TAG_NONE) {
469  retry:
470  RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
471 
472  terminate_all(cr, th);
473 
474  while (rb_ractor_living_thread_num(cr) > 1) {
475  rb_hrtime_t rel = RB_HRTIME_PER_SEC;
476  /*q
477  * Thread exiting routine in thread_start_func_2 notify
478  * me when the last sub-thread exit.
479  */
480  sleeping = 1;
481  native_sleep(th, &rel);
482  RUBY_VM_CHECK_INTS_BLOCKING(ec);
483  sleeping = 0;
484  }
485  }
486  else {
487  /*
488  * When caught an exception (e.g. Ctrl+C), let's broadcast
489  * kill request again to ensure killing all threads even
490  * if they are blocked on sleep, mutex, etc.
491  */
492  if (sleeping) {
493  sleeping = 0;
494  goto retry;
495  }
496  }
497  EC_POP_TAG();
498 }
499 
500 void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
501 static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
502 
503 static void
504 thread_cleanup_func_before_exec(void *th_ptr)
505 {
506  rb_thread_t *th = th_ptr;
507  th->status = THREAD_KILLED;
508 
509  // The thread stack doesn't exist in the forked process:
510  th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
511 
512  threadptr_interrupt_exec_cleanup(th);
513  rb_threadptr_root_fiber_terminate(th);
514 }
515 
516 static void
517 thread_cleanup_func(void *th_ptr, int atfork)
518 {
519  rb_thread_t *th = th_ptr;
520 
521  th->locking_mutex = Qfalse;
522  thread_cleanup_func_before_exec(th_ptr);
523 
524  /*
525  * Unfortunately, we can't release native threading resource at fork
526  * because libc may have unstable locking state therefore touching
527  * a threading resource may cause a deadlock.
528  */
529  if (atfork) {
530  th->nt = NULL;
531  return;
532  }
533 
534  rb_native_mutex_destroy(&th->interrupt_lock);
535 }
536 
537 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
538 static VALUE rb_thread_to_s(VALUE thread);
539 
540 void
541 ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
542 {
543  native_thread_init_stack(th, local_in_parent_frame);
544 }
545 
546 const VALUE *
547 rb_vm_proc_local_ep(VALUE proc)
548 {
549  const VALUE *ep = vm_proc_ep(proc);
550 
551  if (ep) {
552  return rb_vm_ep_local_ep(ep);
553  }
554  else {
555  return NULL;
556  }
557 }
558 
559 // for ractor, defined in vm.c
560 VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
561  int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
562 
563 static VALUE
564 thread_do_start_proc(rb_thread_t *th)
565 {
566  VALUE args = th->invoke_arg.proc.args;
567  const VALUE *args_ptr;
568  int args_len;
569  VALUE procval = th->invoke_arg.proc.proc;
570  rb_proc_t *proc;
571  GetProcPtr(procval, proc);
572 
573  th->ec->errinfo = Qnil;
574  th->ec->root_lep = rb_vm_proc_local_ep(procval);
575  th->ec->root_svar = Qfalse;
576 
577  vm_check_ints_blocking(th->ec);
578 
579  if (th->invoke_type == thread_invoke_type_ractor_proc) {
580  VALUE self = rb_ractor_self(th->ractor);
581  VM_ASSERT(FIXNUM_P(args));
582  args_len = FIX2INT(args);
583  args_ptr = ALLOCA_N(VALUE, args_len);
584  rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
585  vm_check_ints_blocking(th->ec);
586 
587  return rb_vm_invoke_proc_with_self(
588  th->ec, proc, self,
589  args_len, args_ptr,
590  th->invoke_arg.proc.kw_splat,
591  VM_BLOCK_HANDLER_NONE
592  );
593  }
594  else {
595  args_len = RARRAY_LENINT(args);
596  if (args_len < 8) {
597  /* free proc.args if the length is enough small */
598  args_ptr = ALLOCA_N(VALUE, args_len);
599  MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
600  th->invoke_arg.proc.args = Qnil;
601  }
602  else {
603  args_ptr = RARRAY_CONST_PTR(args);
604  }
605 
606  vm_check_ints_blocking(th->ec);
607 
608  return rb_vm_invoke_proc(
609  th->ec, proc,
610  args_len, args_ptr,
611  th->invoke_arg.proc.kw_splat,
612  VM_BLOCK_HANDLER_NONE
613  );
614  }
615 }
616 
617 static VALUE
618 thread_do_start(rb_thread_t *th)
619 {
620  native_set_thread_name(th);
621  VALUE result = Qundef;
622 
623  switch (th->invoke_type) {
624  case thread_invoke_type_proc:
625  result = thread_do_start_proc(th);
626  break;
627 
628  case thread_invoke_type_ractor_proc:
629  result = thread_do_start_proc(th);
630  rb_ractor_atexit(th->ec, result);
631  break;
632 
633  case thread_invoke_type_func:
634  result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
635  break;
636 
637  case thread_invoke_type_none:
638  rb_bug("unreachable");
639  }
640 
641  return result;
642 }
643 
644 void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
645 
646 static int
647 thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
648 {
649  RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
650  VM_ASSERT(th != th->vm->ractor.main_thread);
651 
652  enum ruby_tag_type state;
653  VALUE errinfo = Qnil;
654  rb_thread_t *ractor_main_th = th->ractor->threads.main;
655 
656  // setup ractor
657  if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
658  RB_VM_LOCK();
659  {
660  rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
661  rb_ractor_t *r = th->ractor;
662  r->r_stdin = rb_io_prep_stdin();
663  r->r_stdout = rb_io_prep_stdout();
664  r->r_stderr = rb_io_prep_stderr();
665  }
666  RB_VM_UNLOCK();
667  }
668 
669  // Ensure that we are not joinable.
670  VM_ASSERT(UNDEF_P(th->value));
671 
672  int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
673  VALUE result = Qundef;
674 
675  EC_PUSH_TAG(th->ec);
676 
677  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
678  EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
679 
680  result = thread_do_start(th);
681  }
682 
683  if (!fiber_scheduler_closed) {
684  fiber_scheduler_closed = 1;
686  }
687 
688  if (!event_thread_end_hooked) {
689  event_thread_end_hooked = 1;
690  EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
691  }
692 
693  if (state == TAG_NONE) {
694  // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
695  th->value = result;
696  } else {
697  errinfo = th->ec->errinfo;
698 
699  VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
700  if (!NIL_P(exc)) errinfo = exc;
701 
702  if (state == TAG_FATAL) {
703  if (th->invoke_type == thread_invoke_type_ractor_proc) {
704  rb_ractor_atexit(th->ec, Qnil);
705  }
706  /* fatal error within this thread, need to stop whole script */
707  }
708  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
709  /* exit on main_thread. */
710  }
711  else {
712  if (th->report_on_exception) {
713  VALUE mesg = rb_thread_to_s(th->self);
714  rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
715  rb_write_error_str(mesg);
716  rb_ec_error_print(th->ec, errinfo);
717  }
718 
719  if (th->invoke_type == thread_invoke_type_ractor_proc) {
720  rb_ractor_atexit_exception(th->ec);
721  }
722 
723  if (th->vm->thread_abort_on_exception ||
724  th->abort_on_exception || RTEST(ruby_debug)) {
725  /* exit on main_thread */
726  }
727  else {
728  errinfo = Qnil;
729  }
730  }
731  th->value = Qnil;
732  }
733 
734  // The thread is effectively finished and can be joined.
735  VM_ASSERT(!UNDEF_P(th->value));
736 
737  rb_threadptr_join_list_wakeup(th);
738  rb_threadptr_unlock_all_locking_mutexes(th);
739 
740  if (th->invoke_type == thread_invoke_type_ractor_proc) {
741  rb_thread_terminate_all(th);
742  rb_ractor_teardown(th->ec);
743  }
744 
745  th->status = THREAD_KILLED;
746  RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
747 
748  if (th->vm->ractor.main_thread == th) {
749  ruby_stop(0);
750  }
751 
752  if (RB_TYPE_P(errinfo, T_OBJECT)) {
753  /* treat with normal error object */
754  rb_threadptr_raise(ractor_main_th, 1, &errinfo);
755  }
756 
757  EC_POP_TAG();
758 
759  rb_ec_clear_current_thread_trace_func(th->ec);
760 
761  /* locking_mutex must be Qfalse */
762  if (th->locking_mutex != Qfalse) {
763  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
764  (void *)th, th->locking_mutex);
765  }
766 
767  if (ractor_main_th->status == THREAD_KILLED &&
768  th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
769  /* I'm last thread. wake up main thread from rb_thread_terminate_all */
770  rb_threadptr_interrupt(ractor_main_th);
771  }
772 
773  rb_check_deadlock(th->ractor);
774 
775  rb_fiber_close(th->ec->fiber_ptr);
776 
777  thread_cleanup_func(th, FALSE);
778  VM_ASSERT(th->ec->vm_stack == NULL);
779 
780  if (th->invoke_type == thread_invoke_type_ractor_proc) {
781  // after rb_ractor_living_threads_remove()
782  // GC will happen anytime and this ractor can be collected (and destroy GVL).
783  // So gvl_release() should be before it.
784  thread_sched_to_dead(TH_SCHED(th), th);
785  rb_ractor_living_threads_remove(th->ractor, th);
786  }
787  else {
788  rb_ractor_living_threads_remove(th->ractor, th);
789  thread_sched_to_dead(TH_SCHED(th), th);
790  }
791 
792  return 0;
793 }
794 
795 struct thread_create_params {
796  enum thread_invoke_type type;
797 
798  // for normal proc thread
799  VALUE args;
800  VALUE proc;
801 
802  // for ractor
803  rb_ractor_t *g;
804 
805  // for func
806  VALUE (*fn)(void *);
807 };
808 
809 static void thread_specific_storage_alloc(rb_thread_t *th);
810 
811 static VALUE
812 thread_create_core(VALUE thval, struct thread_create_params *params)
813 {
814  rb_execution_context_t *ec = GET_EC();
815  rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
816  int err;
817 
818  thread_specific_storage_alloc(th);
819 
820  if (OBJ_FROZEN(current_th->thgroup)) {
822  "can't start a new thread (frozen ThreadGroup)");
823  }
824 
825  rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
826 
827  switch (params->type) {
828  case thread_invoke_type_proc:
829  th->invoke_type = thread_invoke_type_proc;
830  th->invoke_arg.proc.args = params->args;
831  th->invoke_arg.proc.proc = params->proc;
832  th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
833  break;
834 
835  case thread_invoke_type_ractor_proc:
836 #if RACTOR_CHECK_MODE > 0
837  rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
838 #endif
839  th->invoke_type = thread_invoke_type_ractor_proc;
840  th->ractor = params->g;
841  th->ractor->threads.main = th;
842  th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
843  th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
844  th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
845  rb_ractor_send_parameters(ec, params->g, params->args);
846  break;
847 
848  case thread_invoke_type_func:
849  th->invoke_type = thread_invoke_type_func;
850  th->invoke_arg.func.func = params->fn;
851  th->invoke_arg.func.arg = (void *)params->args;
852  break;
853 
854  default:
855  rb_bug("unreachable");
856  }
857 
858  th->priority = current_th->priority;
859  th->thgroup = current_th->thgroup;
860 
861  th->pending_interrupt_queue = rb_ary_hidden_new(0);
862  th->pending_interrupt_queue_checked = 0;
863  th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
864  RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
865 
866  rb_native_mutex_initialize(&th->interrupt_lock);
867 
868  RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
869 
870  rb_ractor_living_threads_insert(th->ractor, th);
871 
872  /* kick thread */
873  err = native_thread_create(th);
874  if (err) {
875  th->status = THREAD_KILLED;
876  rb_ractor_living_threads_remove(th->ractor, th);
877  rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
878  }
879  return thval;
880 }
881 
882 #define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
883 
884 /*
885  * call-seq:
886  * Thread.new { ... } -> thread
887  * Thread.new(*args, &proc) -> thread
888  * Thread.new(*args) { |args| ... } -> thread
889  *
890  * Creates a new thread executing the given block.
891  *
892  * Any +args+ given to ::new will be passed to the block:
893  *
894  * arr = []
895  * a, b, c = 1, 2, 3
896  * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
897  * arr #=> [1, 2, 3]
898  *
899  * A ThreadError exception is raised if ::new is called without a block.
900  *
901  * If you're going to subclass Thread, be sure to call super in your
902  * +initialize+ method, otherwise a ThreadError will be raised.
903  */
904 static VALUE
905 thread_s_new(int argc, VALUE *argv, VALUE klass)
906 {
907  rb_thread_t *th;
908  VALUE thread = rb_thread_alloc(klass);
909 
910  if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
911  rb_raise(rb_eThreadError, "can't alloc thread");
912  }
913 
914  rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
915  th = rb_thread_ptr(thread);
916  if (!threadptr_initialized(th)) {
917  rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
918  klass);
919  }
920  return thread;
921 }
922 
923 /*
924  * call-seq:
925  * Thread.start([args]*) {|args| block } -> thread
926  * Thread.fork([args]*) {|args| block } -> thread
927  *
928  * Basically the same as ::new. However, if class Thread is subclassed, then
929  * calling +start+ in that subclass will not invoke the subclass's
930  * +initialize+ method.
931  */
932 
933 static VALUE
934 thread_start(VALUE klass, VALUE args)
935 {
936  struct thread_create_params params = {
937  .type = thread_invoke_type_proc,
938  .args = args,
939  .proc = rb_block_proc(),
940  };
941  return thread_create_core(rb_thread_alloc(klass), &params);
942 }
943 
944 static VALUE
945 threadptr_invoke_proc_location(rb_thread_t *th)
946 {
947  if (th->invoke_type == thread_invoke_type_proc) {
948  return rb_proc_location(th->invoke_arg.proc.proc);
949  }
950  else {
951  return Qnil;
952  }
953 }
954 
955 /* :nodoc: */
956 static VALUE
957 thread_initialize(VALUE thread, VALUE args)
958 {
959  rb_thread_t *th = rb_thread_ptr(thread);
960 
961  if (!rb_block_given_p()) {
962  rb_raise(rb_eThreadError, "must be called with a block");
963  }
964  else if (th->invoke_type != thread_invoke_type_none) {
965  VALUE loc = threadptr_invoke_proc_location(th);
966  if (!NIL_P(loc)) {
968  "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
969  RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
970  }
971  else {
972  rb_raise(rb_eThreadError, "already initialized thread");
973  }
974  }
975  else {
976  struct thread_create_params params = {
977  .type = thread_invoke_type_proc,
978  .args = args,
979  .proc = rb_block_proc(),
980  };
981  return thread_create_core(thread, &params);
982  }
983 }
984 
985 VALUE
986 rb_thread_create(VALUE (*fn)(void *), void *arg)
987 {
988  struct thread_create_params params = {
989  .type = thread_invoke_type_func,
990  .fn = fn,
991  .args = (VALUE)arg,
992  };
993  return thread_create_core(rb_thread_alloc(rb_cThread), &params);
994 }
995 
996 VALUE
997 rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
998 {
999  struct thread_create_params params = {
1000  .type = thread_invoke_type_ractor_proc,
1001  .g = r,
1002  .args = args,
1003  .proc = proc,
1004  };
1005  return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1006 }
1007 
1009 struct join_arg {
1010  struct rb_waiting_list *waiter;
1011  rb_thread_t *target;
1012  VALUE timeout;
1013  rb_hrtime_t *limit;
1014 };
1015 
1016 static VALUE
1017 remove_from_join_list(VALUE arg)
1018 {
1019  struct join_arg *p = (struct join_arg *)arg;
1020  rb_thread_t *target_thread = p->target;
1021 
1022  if (target_thread->status != THREAD_KILLED) {
1023  struct rb_waiting_list **join_list = &target_thread->join_list;
1024 
1025  while (*join_list) {
1026  if (*join_list == p->waiter) {
1027  *join_list = (*join_list)->next;
1028  break;
1029  }
1030 
1031  join_list = &(*join_list)->next;
1032  }
1033  }
1034 
1035  return Qnil;
1036 }
1037 
1038 static int
1039 thread_finished(rb_thread_t *th)
1040 {
1041  return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1042 }
1043 
1044 static VALUE
1045 thread_join_sleep(VALUE arg)
1046 {
1047  struct join_arg *p = (struct join_arg *)arg;
1048  rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1049  rb_hrtime_t end = 0, *limit = p->limit;
1050 
1051  if (limit) {
1052  end = rb_hrtime_add(*limit, rb_hrtime_now());
1053  }
1054 
1055  while (!thread_finished(target_th)) {
1056  VALUE scheduler = rb_fiber_scheduler_current();
1057 
1058  if (scheduler != Qnil) {
1059  rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1060  // Check if the target thread is finished after blocking:
1061  if (thread_finished(target_th)) break;
1062  // Otherwise, a timeout occurred:
1063  else return Qfalse;
1064  }
1065  else if (!limit) {
1066  sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1067  }
1068  else {
1069  if (hrtime_update_expire(limit, end)) {
1070  RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1071  return Qfalse;
1072  }
1073  th->status = THREAD_STOPPED;
1074  native_sleep(th, limit);
1075  }
1076  RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1077  th->status = THREAD_RUNNABLE;
1078 
1079  RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1080  }
1081 
1082  return Qtrue;
1083 }
1084 
1085 static VALUE
1086 thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1087 {
1088  rb_execution_context_t *ec = GET_EC();
1089  rb_thread_t *th = ec->thread_ptr;
1090  rb_fiber_t *fiber = ec->fiber_ptr;
1091 
1092  if (th == target_th) {
1093  rb_raise(rb_eThreadError, "Target thread must not be current thread");
1094  }
1095 
1096  if (th->ractor->threads.main == target_th) {
1097  rb_raise(rb_eThreadError, "Target thread must not be main thread");
1098  }
1099 
1100  RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1101 
1102  if (target_th->status != THREAD_KILLED) {
1103  struct rb_waiting_list waiter;
1104  waiter.next = target_th->join_list;
1105  waiter.thread = th;
1106  waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1107  target_th->join_list = &waiter;
1108 
1109  struct join_arg arg;
1110  arg.waiter = &waiter;
1111  arg.target = target_th;
1112  arg.timeout = timeout;
1113  arg.limit = limit;
1114 
1115  if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1116  return Qnil;
1117  }
1118  }
1119 
1120  RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1121 
1122  if (target_th->ec->errinfo != Qnil) {
1123  VALUE err = target_th->ec->errinfo;
1124 
1125  if (FIXNUM_P(err)) {
1126  switch (err) {
1127  case INT2FIX(TAG_FATAL):
1128  RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1129 
1130  /* OK. killed. */
1131  break;
1132  default:
1133  rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1134  }
1135  }
1136  else if (THROW_DATA_P(target_th->ec->errinfo)) {
1137  rb_bug("thread_join: THROW_DATA should not reach here.");
1138  }
1139  else {
1140  /* normal exception */
1141  rb_exc_raise(err);
1142  }
1143  }
1144  return target_th->self;
1145 }
1146 
1147 /*
1148  * call-seq:
1149  * thr.join -> thr
1150  * thr.join(limit) -> thr
1151  *
1152  * The calling thread will suspend execution and run this +thr+.
1153  *
1154  * Does not return until +thr+ exits or until the given +limit+ seconds have
1155  * passed.
1156  *
1157  * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1158  * returned.
1159  *
1160  * Any threads not joined will be killed when the main program exits.
1161  *
1162  * If +thr+ had previously raised an exception and the ::abort_on_exception or
1163  * $DEBUG flags are not set, (so the exception has not yet been processed), it
1164  * will be processed at this time.
1165  *
1166  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1167  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1168  * x.join # Let thread x finish, thread a will be killed on exit.
1169  * #=> "axyz"
1170  *
1171  * The following example illustrates the +limit+ parameter.
1172  *
1173  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1174  * puts "Waiting" until y.join(0.15)
1175  *
1176  * This will produce:
1177  *
1178  * tick...
1179  * Waiting
1180  * tick...
1181  * Waiting
1182  * tick...
1183  * tick...
1184  */
1185 
1186 static VALUE
1187 thread_join_m(int argc, VALUE *argv, VALUE self)
1188 {
1189  VALUE timeout = Qnil;
1190  rb_hrtime_t rel = 0, *limit = 0;
1191 
1192  if (rb_check_arity(argc, 0, 1)) {
1193  timeout = argv[0];
1194  }
1195 
1196  // Convert the timeout eagerly, so it's always converted and deterministic
1197  /*
1198  * This supports INFINITY and negative values, so we can't use
1199  * rb_time_interval right now...
1200  */
1201  if (NIL_P(timeout)) {
1202  /* unlimited */
1203  }
1204  else if (FIXNUM_P(timeout)) {
1205  rel = rb_sec2hrtime(NUM2TIMET(timeout));
1206  limit = &rel;
1207  }
1208  else {
1209  limit = double2hrtime(&rel, rb_num2dbl(timeout));
1210  }
1211 
1212  return thread_join(rb_thread_ptr(self), timeout, limit);
1213 }
1214 
1215 /*
1216  * call-seq:
1217  * thr.value -> obj
1218  *
1219  * Waits for +thr+ to complete, using #join, and returns its value or raises
1220  * the exception which terminated the thread.
1221  *
1222  * a = Thread.new { 2 + 2 }
1223  * a.value #=> 4
1224  *
1225  * b = Thread.new { raise 'something went wrong' }
1226  * b.value #=> RuntimeError: something went wrong
1227  */
1228 
1229 static VALUE
1230 thread_value(VALUE self)
1231 {
1232  rb_thread_t *th = rb_thread_ptr(self);
1233  thread_join(th, Qnil, 0);
1234  if (UNDEF_P(th->value)) {
1235  // If the thread is dead because we forked th->value is still Qundef.
1236  return Qnil;
1237  }
1238  return th->value;
1239 }
1240 
1241 /*
1242  * Thread Scheduling
1243  */
1244 
1245 static void
1246 getclockofday(struct timespec *ts)
1247 {
1248 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1249  if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1250  return;
1251 #endif
1252  rb_timespec_now(ts);
1253 }
1254 
1255 /*
1256  * Don't inline this, since library call is already time consuming
1257  * and we don't want "struct timespec" on stack too long for GC
1258  */
1259 NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1260 rb_hrtime_t
1261 rb_hrtime_now(void)
1262 {
1263  struct timespec ts;
1264 
1265  getclockofday(&ts);
1266  return rb_timespec2hrtime(&ts);
1267 }
1268 
1269 /*
1270  * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1271  * being uninitialized, maybe other versions, too.
1272  */
1273 COMPILER_WARNING_PUSH
1274 #if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1275 COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1276 #endif
1277 #ifndef PRIu64
1278 #define PRIu64 PRI_64_PREFIX "u"
1279 #endif
1280 /*
1281  * @end is the absolute time when @ts is set to expire
1282  * Returns true if @end has past
1283  * Updates @ts and returns false otherwise
1284  */
1285 static int
1286 hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1287 {
1288  rb_hrtime_t now = rb_hrtime_now();
1289 
1290  if (now > end) return 1;
1291 
1292  RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1293 
1294  *timeout = end - now;
1295  return 0;
1296 }
1297 COMPILER_WARNING_POP
1298 
1299 static int
1300 sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1301 {
1302  enum rb_thread_status prev_status = th->status;
1303  int woke;
1304  rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1305 
1306  th->status = THREAD_STOPPED;
1307  RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1308  while (th->status == THREAD_STOPPED) {
1309  native_sleep(th, &rel);
1310  woke = vm_check_ints_blocking(th->ec);
1311  if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1312  break;
1313  if (hrtime_update_expire(&rel, end))
1314  break;
1315  woke = 1;
1316  }
1317  th->status = prev_status;
1318  return woke;
1319 }
1320 
1321 static int
1322 sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1323 {
1324  enum rb_thread_status prev_status = th->status;
1325  int woke;
1326  rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1327 
1328  th->status = THREAD_STOPPED;
1329  RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1330  while (th->status == THREAD_STOPPED) {
1331  native_sleep(th, &rel);
1332  woke = vm_check_ints_blocking(th->ec);
1333  if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1334  break;
1335  if (hrtime_update_expire(&rel, end))
1336  break;
1337  woke = 1;
1338  }
1339  th->status = prev_status;
1340  return woke;
1341 }
1342 
1343 static void
1344 sleep_forever(rb_thread_t *th, unsigned int fl)
1345 {
1346  enum rb_thread_status prev_status = th->status;
1347  enum rb_thread_status status;
1348  int woke;
1349 
1350  status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1351  th->status = status;
1352 
1353  if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1354 
1355  while (th->status == status) {
1356  if (fl & SLEEP_DEADLOCKABLE) {
1357  rb_ractor_sleeper_threads_inc(th->ractor);
1358  rb_check_deadlock(th->ractor);
1359  }
1360  {
1361  native_sleep(th, 0);
1362  }
1363  if (fl & SLEEP_DEADLOCKABLE) {
1364  rb_ractor_sleeper_threads_dec(th->ractor);
1365  }
1366  if (fl & SLEEP_ALLOW_SPURIOUS) {
1367  break;
1368  }
1369 
1370  woke = vm_check_ints_blocking(th->ec);
1371 
1372  if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1373  break;
1374  }
1375  }
1376  th->status = prev_status;
1377 }
1378 
1379 void
1381 {
1382  RUBY_DEBUG_LOG("forever");
1383  sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1384 }
1385 
1386 void
1388 {
1389  RUBY_DEBUG_LOG("deadly");
1390  sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1391 }
1392 
1393 static void
1394 rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1395 {
1396  VALUE scheduler = rb_fiber_scheduler_current();
1397  if (scheduler != Qnil) {
1398  rb_fiber_scheduler_block(scheduler, blocker, timeout);
1399  }
1400  else {
1401  RUBY_DEBUG_LOG("...");
1402  if (end) {
1403  sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1404  }
1405  else {
1406  sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1407  }
1408  }
1409 }
1410 
1411 void
1412 rb_thread_wait_for(struct timeval time)
1413 {
1414  rb_thread_t *th = GET_THREAD();
1415 
1416  sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1417 }
1418 
1419 void
1420 rb_ec_check_ints(rb_execution_context_t *ec)
1421 {
1422  RUBY_VM_CHECK_INTS_BLOCKING(ec);
1423 }
1424 
1425 /*
1426  * CAUTION: This function causes thread switching.
1427  * rb_thread_check_ints() check ruby's interrupts.
1428  * some interrupt needs thread switching/invoke handlers,
1429  * and so on.
1430  */
1431 
1432 void
1434 {
1435  rb_ec_check_ints(GET_EC());
1436 }
1437 
1438 /*
1439  * Hidden API for tcl/tk wrapper.
1440  * There is no guarantee to perpetuate it.
1441  */
1442 int
1443 rb_thread_check_trap_pending(void)
1444 {
1445  return rb_signal_buff_size() != 0;
1446 }
1447 
1448 /* This function can be called in blocking region. */
1449 int
1451 {
1452  return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1453 }
1454 
1455 void
1456 rb_thread_sleep(int sec)
1457 {
1459 }
1460 
1461 static void
1462 rb_thread_schedule_limits(uint32_t limits_us)
1463 {
1464  if (!rb_thread_alone()) {
1465  rb_thread_t *th = GET_THREAD();
1466  RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1467 
1468  if (th->running_time_us >= limits_us) {
1469  RUBY_DEBUG_LOG("switch %s", "start");
1470 
1471  RB_VM_SAVE_MACHINE_CONTEXT(th);
1472  thread_sched_yield(TH_SCHED(th), th);
1473  rb_ractor_thread_switch(th->ractor, th);
1474 
1475  RUBY_DEBUG_LOG("switch %s", "done");
1476  }
1477  }
1478 }
1479 
1480 void
1481 rb_thread_schedule(void)
1482 {
1483  rb_thread_schedule_limits(0);
1484  RUBY_VM_CHECK_INTS(GET_EC());
1485 }
1486 
1487 /* blocking region */
1488 
1489 static inline int
1490 blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1491  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1492 {
1493 #ifdef RUBY_ASSERT_CRITICAL_SECTION
1494  VM_ASSERT(ruby_assert_critical_section_entered == 0);
1495 #endif
1496  VM_ASSERT(th == GET_THREAD());
1497 
1498  region->prev_status = th->status;
1499  if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1500  th->blocking_region_buffer = region;
1501  th->status = THREAD_STOPPED;
1502  rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1503 
1504  RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1505  return TRUE;
1506  }
1507  else {
1508  return FALSE;
1509  }
1510 }
1511 
1512 static inline void
1513 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1514 {
1515  /* entry to ubf_list still permitted at this point, make it impossible: */
1516  unblock_function_clear(th);
1517  /* entry to ubf_list impossible at this point, so unregister is safe: */
1518  unregister_ubf_list(th);
1519 
1520  thread_sched_to_running(TH_SCHED(th), th);
1521  rb_ractor_thread_switch(th->ractor, th);
1522 
1523  th->blocking_region_buffer = 0;
1524  rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1525  if (th->status == THREAD_STOPPED) {
1526  th->status = region->prev_status;
1527  }
1528 
1529  RUBY_DEBUG_LOG("end");
1530 
1531 #ifndef _WIN32
1532  // GET_THREAD() clears WSAGetLastError()
1533  VM_ASSERT(th == GET_THREAD());
1534 #endif
1535 }
1536 
1537 void *
1538 rb_nogvl(void *(*func)(void *), void *data1,
1539  rb_unblock_function_t *ubf, void *data2,
1540  int flags)
1541 {
1542  if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1543  VALUE scheduler = rb_fiber_scheduler_current();
1544  if (scheduler != Qnil) {
1546 
1547  VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1548 
1549  if (!UNDEF_P(result)) {
1550  rb_errno_set(state.saved_errno);
1551  return state.result;
1552  }
1553  }
1554  }
1555 
1556  void *val = 0;
1557  rb_execution_context_t *ec = GET_EC();
1558  rb_thread_t *th = rb_ec_thread_ptr(ec);
1559  rb_vm_t *vm = rb_ec_vm_ptr(ec);
1560  bool is_main_thread = vm->ractor.main_thread == th;
1561  int saved_errno = 0;
1562 
1563  if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1564  ubf = ubf_select;
1565  data2 = th;
1566  }
1567  else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1568  if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1569  vm->ubf_async_safe = 1;
1570  }
1571  }
1572 
1573  rb_vm_t *volatile saved_vm = vm;
1574  BLOCKING_REGION(th, {
1575  val = func(data1);
1576  saved_errno = rb_errno();
1577  }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1578  vm = saved_vm;
1579 
1580  if (is_main_thread) vm->ubf_async_safe = 0;
1581 
1582  if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1583  RUBY_VM_CHECK_INTS_BLOCKING(ec);
1584  }
1585 
1586  rb_errno_set(saved_errno);
1587 
1588  return val;
1589 }
1590 
1591 /*
1592  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1593  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1594  * without interrupt process.
1595  *
1596  * rb_thread_call_without_gvl() does:
1597  * (1) Check interrupts.
1598  * (2) release GVL.
1599  * Other Ruby threads may run in parallel.
1600  * (3) call func with data1
1601  * (4) acquire GVL.
1602  * Other Ruby threads can not run in parallel any more.
1603  * (5) Check interrupts.
1604  *
1605  * rb_thread_call_without_gvl2() does:
1606  * (1) Check interrupt and return if interrupted.
1607  * (2) release GVL.
1608  * (3) call func with data1 and a pointer to the flags.
1609  * (4) acquire GVL.
1610  *
1611  * If another thread interrupts this thread (Thread#kill, signal delivery,
1612  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1613  * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1614  * toggling a cancellation flag, canceling the invocation of a call inside
1615  * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1616  *
1617  * There are built-in ubfs and you can specify these ubfs:
1618  *
1619  * * RUBY_UBF_IO: ubf for IO operation
1620  * * RUBY_UBF_PROCESS: ubf for process operation
1621  *
1622  * However, we can not guarantee our built-in ubfs interrupt your `func()'
1623  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1624  * provide proper ubf(), your program will not stop for Control+C or other
1625  * shutdown events.
1626  *
1627  * "Check interrupts" on above list means checking asynchronous
1628  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1629  * request, and so on) and calling corresponding procedures
1630  * (such as `trap' for signals, raise an exception for Thread#raise).
1631  * If `func()' finished and received interrupts, you may skip interrupt
1632  * checking. For example, assume the following func() it reads data from file.
1633  *
1634  * read_func(...) {
1635  * // (a) before read
1636  * read(buffer); // (b) reading
1637  * // (c) after read
1638  * }
1639  *
1640  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1641  * `read_func()' and interrupts are checked. However, if an interrupt occurs
1642  * at (c), after *read* operation is completed, checking interrupts is harmful
1643  * because it causes irrevocable side-effect, the read data will vanish. To
1644  * avoid such problem, the `read_func()' should be used with
1645  * `rb_thread_call_without_gvl2()'.
1646  *
1647  * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1648  * immediately. This function does not show when the execution was interrupted.
1649  * For example, there are 4 possible timing (a), (b), (c) and before calling
1650  * read_func(). You need to record progress of a read_func() and check
1651  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1652  * `rb_thread_check_ints()' correctly or your program can not process proper
1653  * process such as `trap' and so on.
1654  *
1655  * NOTE: You can not execute most of Ruby C API and touch Ruby
1656  * objects in `func()' and `ubf()', including raising an
1657  * exception, because current thread doesn't acquire GVL
1658  * (it causes synchronization problems). If you need to
1659  * call ruby functions either use rb_thread_call_with_gvl()
1660  * or read source code of C APIs and confirm safety by
1661  * yourself.
1662  *
1663  * NOTE: In short, this API is difficult to use safely. I recommend you
1664  * use other ways if you have. We lack experiences to use this API.
1665  * Please report your problem related on it.
1666  *
1667  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1668  * for a short running `func()'. Be sure to benchmark and use this
1669  * mechanism when `func()' consumes enough time.
1670  *
1671  * Safe C API:
1672  * * rb_thread_interrupted() - check interrupt flag
1673  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1674  * they will work without GVL, and may acquire GVL when GC is needed.
1675  */
1676 void *
1677 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1678  rb_unblock_function_t *ubf, void *data2)
1679 {
1680  return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1681 }
1682 
1683 void *
1684 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1685  rb_unblock_function_t *ubf, void *data2)
1686 {
1687  return rb_nogvl(func, data1, ubf, data2, 0);
1688 }
1689 
1690 static int
1691 waitfd_to_waiting_flag(int wfd_event)
1692 {
1693  return wfd_event << 1;
1694 }
1695 
1696 static void
1697 thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
1698 {
1699  wfd->fd = fd;
1700  wfd->th = th;
1701  wfd->busy = NULL;
1702 
1703  RB_VM_LOCK_ENTER();
1704  {
1705  ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
1706  }
1707  RB_VM_LOCK_LEAVE();
1708 }
1709 
1710 static void
1711 thread_io_wake_pending_closer(struct waiting_fd *wfd)
1712 {
1713  bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
1714  if (has_waiter) {
1715  rb_mutex_lock(wfd->busy->wakeup_mutex);
1716  }
1717 
1718  /* Needs to be protected with RB_VM_LOCK because we don't know if
1719  wfd is on the global list of pending FD ops or if it's on a
1720  struct rb_io_close_wait_list close-waiter. */
1721  RB_VM_LOCK_ENTER();
1722  ccan_list_del(&wfd->wfd_node);
1723  RB_VM_LOCK_LEAVE();
1724 
1725  if (has_waiter) {
1726  rb_thread_t *th = rb_thread_ptr(wfd->busy->closing_thread);
1727  if (th->scheduler != Qnil) {
1728  rb_fiber_scheduler_unblock(th->scheduler, wfd->busy->closing_thread, wfd->busy->closing_fiber);
1729  } else {
1730  rb_thread_wakeup(wfd->busy->closing_thread);
1731  }
1732  rb_mutex_unlock(wfd->busy->wakeup_mutex);
1733  }
1734 }
1735 
1736 static bool
1737 thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1738 {
1739 #if defined(USE_MN_THREADS) && USE_MN_THREADS
1740  return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1741 #else
1742  return false;
1743 #endif
1744 }
1745 
1746 // true if need retry
1747 static bool
1748 thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1749 {
1750 #if defined(USE_MN_THREADS) && USE_MN_THREADS
1751  if (thread_io_mn_schedulable(th, events, timeout)) {
1752  rb_hrtime_t rel, *prel;
1753 
1754  if (timeout) {
1755  rel = rb_timeval2hrtime(timeout);
1756  prel = &rel;
1757  }
1758  else {
1759  prel = NULL;
1760  }
1761 
1762  VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1763 
1764  if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1765  // timeout
1766  return false;
1767  }
1768  else {
1769  return true;
1770  }
1771  }
1772 #endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1773  return false;
1774 }
1775 
1776 // assume read/write
1777 static bool
1778 blocking_call_retryable_p(int r, int eno)
1779 {
1780  if (r != -1) return false;
1781 
1782  switch (eno) {
1783  case EAGAIN:
1784 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1785  case EWOULDBLOCK:
1786 #endif
1787  return true;
1788  default:
1789  return false;
1790  }
1791 }
1792 
1793 bool
1794 rb_thread_mn_schedulable(VALUE thval)
1795 {
1796  rb_thread_t *th = rb_thread_ptr(thval);
1797  return th->mn_schedulable;
1798 }
1799 
1800 VALUE
1801 rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events)
1802 {
1803  rb_execution_context_t *volatile ec = GET_EC();
1804  rb_thread_t *volatile th = rb_ec_thread_ptr(ec);
1805 
1806  RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), fd, events);
1807 
1808  struct waiting_fd waiting_fd;
1809  volatile VALUE val = Qundef; /* shouldn't be used */
1810  volatile int saved_errno = 0;
1811  enum ruby_tag_type state;
1812  bool prev_mn_schedulable = th->mn_schedulable;
1813  th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1814 
1815  // `errno` is only valid when there is an actual error - but we can't
1816  // extract that from the return value of `func` alone, so we clear any
1817  // prior `errno` value here so that we can later check if it was set by
1818  // `func` or not (as opposed to some previously set value).
1819  errno = 0;
1820 
1821  thread_io_setup_wfd(th, fd, &waiting_fd);
1822  {
1823  EC_PUSH_TAG(ec);
1824  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1825  volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1826  retry:
1827  BLOCKING_REGION(waiting_fd.th, {
1828  val = func(data1);
1829  saved_errno = errno;
1830  }, ubf_select, waiting_fd.th, FALSE);
1831 
1832  th = rb_ec_thread_ptr(ec);
1833  if (events &&
1834  blocking_call_retryable_p((int)val, saved_errno) &&
1835  thread_io_wait_events(th, fd, events, NULL)) {
1836  RUBY_VM_CHECK_INTS_BLOCKING(ec);
1837  goto retry;
1838  }
1839  state = saved_state;
1840  }
1841  EC_POP_TAG();
1842 
1843  th = rb_ec_thread_ptr(ec);
1844  th->mn_schedulable = prev_mn_schedulable;
1845  }
1846  /*
1847  * must be deleted before jump
1848  * this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
1849  */
1850  thread_io_wake_pending_closer(&waiting_fd);
1851 
1852  if (state) {
1853  EC_JUMP_TAG(ec, state);
1854  }
1855  /* TODO: check func() */
1856  RUBY_VM_CHECK_INTS_BLOCKING(ec);
1857 
1858  // If the error was a timeout, we raise a specific exception for that:
1859  if (saved_errno == ETIMEDOUT) {
1860  rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1861  }
1862 
1863  errno = saved_errno;
1864 
1865  return val;
1866 }
1867 
1868 VALUE
1869 rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1870 {
1871  return rb_thread_io_blocking_call(func, data1, fd, 0);
1872 }
1873 
1874 /*
1875  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1876  *
1877  * After releasing GVL using
1878  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1879  * methods. If you need to access Ruby you must use this function
1880  * rb_thread_call_with_gvl().
1881  *
1882  * This function rb_thread_call_with_gvl() does:
1883  * (1) acquire GVL.
1884  * (2) call passed function `func'.
1885  * (3) release GVL.
1886  * (4) return a value which is returned at (2).
1887  *
1888  * NOTE: You should not return Ruby object at (2) because such Object
1889  * will not be marked.
1890  *
1891  * NOTE: If an exception is raised in `func', this function DOES NOT
1892  * protect (catch) the exception. If you have any resources
1893  * which should free before throwing exception, you need use
1894  * rb_protect() in `func' and return a value which represents
1895  * exception was raised.
1896  *
1897  * NOTE: This function should not be called by a thread which was not
1898  * created as Ruby thread (created by Thread.new or so). In other
1899  * words, this function *DOES NOT* associate or convert a NON-Ruby
1900  * thread to a Ruby thread.
1901  */
1902 void *
1903 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1904 {
1905  rb_thread_t *th = ruby_thread_from_native();
1906  struct rb_blocking_region_buffer *brb;
1907  struct rb_unblock_callback prev_unblock;
1908  void *r;
1909 
1910  if (th == 0) {
1911  /* Error has occurred, but we can't use rb_bug()
1912  * because this thread is not Ruby's thread.
1913  * What should we do?
1914  */
1915  bp();
1916  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1917  exit(EXIT_FAILURE);
1918  }
1919 
1920  brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1921  prev_unblock = th->unblock;
1922 
1923  if (brb == 0) {
1924  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1925  }
1926 
1927  blocking_region_end(th, brb);
1928  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1929  r = (*func)(data1);
1930  /* leave from Ruby world: You can not access Ruby values, etc. */
1931  int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1932  RUBY_ASSERT_ALWAYS(released);
1933  RB_VM_SAVE_MACHINE_CONTEXT(th);
1934  thread_sched_to_waiting(TH_SCHED(th), th);
1935  return r;
1936 }
1937 
1938 /*
1939  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1940  *
1941  ***
1942  *** This API is EXPERIMENTAL!
1943  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1944  ***
1945  */
1946 
1947 int
1948 ruby_thread_has_gvl_p(void)
1949 {
1950  rb_thread_t *th = ruby_thread_from_native();
1951 
1952  if (th && th->blocking_region_buffer == 0) {
1953  return 1;
1954  }
1955  else {
1956  return 0;
1957  }
1958 }
1959 
1960 /*
1961  * call-seq:
1962  * Thread.pass -> nil
1963  *
1964  * Give the thread scheduler a hint to pass execution to another thread.
1965  * A running thread may or may not switch, it depends on OS and processor.
1966  */
1967 
1968 static VALUE
1969 thread_s_pass(VALUE klass)
1970 {
1972  return Qnil;
1973 }
1974 
1975 /*****************************************************/
1976 
1977 /*
1978  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1979  *
1980  * Async events such as an exception thrown by Thread#raise,
1981  * Thread#kill and thread termination (after main thread termination)
1982  * will be queued to th->pending_interrupt_queue.
1983  * - clear: clear the queue.
1984  * - enque: enqueue err object into queue.
1985  * - deque: dequeue err object from queue.
1986  * - active_p: return 1 if the queue should be checked.
1987  *
1988  * All rb_threadptr_pending_interrupt_* functions are called by
1989  * a GVL acquired thread, of course.
1990  * Note that all "rb_" prefix APIs need GVL to call.
1991  */
1992 
1993 void
1994 rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1995 {
1996  rb_ary_clear(th->pending_interrupt_queue);
1997 }
1998 
1999 void
2000 rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
2001 {
2002  rb_ary_push(th->pending_interrupt_queue, v);
2003  th->pending_interrupt_queue_checked = 0;
2004 }
2005 
2006 static void
2007 threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2008 {
2009  if (!th->pending_interrupt_queue) {
2010  rb_raise(rb_eThreadError, "uninitialized thread");
2011  }
2012 }
2013 
2014 enum handle_interrupt_timing {
2015  INTERRUPT_NONE,
2016  INTERRUPT_IMMEDIATE,
2017  INTERRUPT_ON_BLOCKING,
2018  INTERRUPT_NEVER
2019 };
2020 
2021 static enum handle_interrupt_timing
2022 rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2023 {
2024  if (sym == sym_immediate) {
2025  return INTERRUPT_IMMEDIATE;
2026  }
2027  else if (sym == sym_on_blocking) {
2028  return INTERRUPT_ON_BLOCKING;
2029  }
2030  else if (sym == sym_never) {
2031  return INTERRUPT_NEVER;
2032  }
2033  else {
2034  rb_raise(rb_eThreadError, "unknown mask signature");
2035  }
2036 }
2037 
2038 static enum handle_interrupt_timing
2039 rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2040 {
2041  VALUE mask;
2042  long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2043  const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2044  VALUE mod;
2045  long i;
2046 
2047  for (i=0; i<mask_stack_len; i++) {
2048  mask = mask_stack[mask_stack_len-(i+1)];
2049 
2050  if (SYMBOL_P(mask)) {
2051  /* do not match RUBY_FATAL_THREAD_KILLED etc */
2052  if (err != rb_cInteger) {
2053  return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2054  }
2055  else {
2056  continue;
2057  }
2058  }
2059 
2060  for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2061  VALUE klass = mod;
2062  VALUE sym;
2063 
2064  if (BUILTIN_TYPE(mod) == T_ICLASS) {
2065  klass = RBASIC(mod)->klass;
2066  }
2067  else if (mod != RCLASS_ORIGIN(mod)) {
2068  continue;
2069  }
2070 
2071  if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2072  return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2073  }
2074  }
2075  /* try to next mask */
2076  }
2077  return INTERRUPT_NONE;
2078 }
2079 
2080 static int
2081 rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2082 {
2083  return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2084 }
2085 
2086 static int
2087 rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2088 {
2089  int i;
2090  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2091  VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2092  if (rb_obj_is_kind_of(e, err)) {
2093  return TRUE;
2094  }
2095  }
2096  return FALSE;
2097 }
2098 
2099 static VALUE
2100 rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2101 {
2102 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2103  int i;
2104 
2105  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2106  VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2107 
2108  enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2109 
2110  switch (mask_timing) {
2111  case INTERRUPT_ON_BLOCKING:
2112  if (timing != INTERRUPT_ON_BLOCKING) {
2113  break;
2114  }
2115  /* fall through */
2116  case INTERRUPT_NONE: /* default: IMMEDIATE */
2117  case INTERRUPT_IMMEDIATE:
2118  rb_ary_delete_at(th->pending_interrupt_queue, i);
2119  return err;
2120  case INTERRUPT_NEVER:
2121  break;
2122  }
2123  }
2124 
2125  th->pending_interrupt_queue_checked = 1;
2126  return Qundef;
2127 #else
2128  VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2129  if (rb_threadptr_pending_interrupt_empty_p(th)) {
2130  th->pending_interrupt_queue_checked = 1;
2131  }
2132  return err;
2133 #endif
2134 }
2135 
2136 static int
2137 threadptr_pending_interrupt_active_p(rb_thread_t *th)
2138 {
2139  /*
2140  * For optimization, we don't check async errinfo queue
2141  * if the queue and the thread interrupt mask were not changed
2142  * since last check.
2143  */
2144  if (th->pending_interrupt_queue_checked) {
2145  return 0;
2146  }
2147 
2148  if (rb_threadptr_pending_interrupt_empty_p(th)) {
2149  return 0;
2150  }
2151 
2152  return 1;
2153 }
2154 
2155 static int
2156 handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2157 {
2158  VALUE *maskp = (VALUE *)args;
2159 
2160  if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2161  rb_raise(rb_eArgError, "unknown mask signature");
2162  }
2163 
2164  if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2165  *maskp = val;
2166  return ST_CONTINUE;
2167  }
2168 
2169  if (RTEST(*maskp)) {
2170  if (!RB_TYPE_P(*maskp, T_HASH)) {
2171  VALUE prev = *maskp;
2172  *maskp = rb_ident_hash_new();
2173  if (SYMBOL_P(prev)) {
2174  rb_hash_aset(*maskp, rb_eException, prev);
2175  }
2176  }
2177  rb_hash_aset(*maskp, key, val);
2178  }
2179  else {
2180  *maskp = Qfalse;
2181  }
2182 
2183  return ST_CONTINUE;
2184 }
2185 
2186 /*
2187  * call-seq:
2188  * Thread.handle_interrupt(hash) { ... } -> result of the block
2189  *
2190  * Changes asynchronous interrupt timing.
2191  *
2192  * _interrupt_ means asynchronous event and corresponding procedure
2193  * by Thread#raise, Thread#kill, signal trap (not supported yet)
2194  * and main thread termination (if main thread terminates, then all
2195  * other thread will be killed).
2196  *
2197  * The given +hash+ has pairs like <code>ExceptionClass =>
2198  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2199  * the given block. The TimingSymbol can be one of the following symbols:
2200  *
2201  * [+:immediate+] Invoke interrupts immediately.
2202  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2203  * [+:never+] Never invoke all interrupts.
2204  *
2205  * _BlockingOperation_ means that the operation will block the calling thread,
2206  * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2207  * operation executed without GVL.
2208  *
2209  * Masked asynchronous interrupts are delayed until they are enabled.
2210  * This method is similar to sigprocmask(3).
2211  *
2212  * === NOTE
2213  *
2214  * Asynchronous interrupts are difficult to use.
2215  *
2216  * If you need to communicate between threads, please consider to use another way such as Queue.
2217  *
2218  * Or use them with deep understanding about this method.
2219  *
2220  * === Usage
2221  *
2222  * In this example, we can guard from Thread#raise exceptions.
2223  *
2224  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2225  * ignored in the first block of the main thread. In the second
2226  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2227  *
2228  * th = Thread.new do
2229  * Thread.handle_interrupt(RuntimeError => :never) {
2230  * begin
2231  * # You can write resource allocation code safely.
2232  * Thread.handle_interrupt(RuntimeError => :immediate) {
2233  * # ...
2234  * }
2235  * ensure
2236  * # You can write resource deallocation code safely.
2237  * end
2238  * }
2239  * end
2240  * Thread.pass
2241  * # ...
2242  * th.raise "stop"
2243  *
2244  * While we are ignoring the RuntimeError exception, it's safe to write our
2245  * resource allocation code. Then, the ensure block is where we can safely
2246  * deallocate your resources.
2247  *
2248  * ==== Stack control settings
2249  *
2250  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2251  * to control more than one ExceptionClass and TimingSymbol at a time.
2252  *
2253  * Thread.handle_interrupt(FooError => :never) {
2254  * Thread.handle_interrupt(BarError => :never) {
2255  * # FooError and BarError are prohibited.
2256  * }
2257  * }
2258  *
2259  * ==== Inheritance with ExceptionClass
2260  *
2261  * All exceptions inherited from the ExceptionClass parameter will be considered.
2262  *
2263  * Thread.handle_interrupt(Exception => :never) {
2264  * # all exceptions inherited from Exception are prohibited.
2265  * }
2266  *
2267  * For handling all interrupts, use +Object+ and not +Exception+
2268  * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2269  */
2270 static VALUE
2271 rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2272 {
2273  VALUE mask = Qundef;
2274  rb_execution_context_t * volatile ec = GET_EC();
2275  rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2276  volatile VALUE r = Qnil;
2277  enum ruby_tag_type state;
2278 
2279  if (!rb_block_given_p()) {
2280  rb_raise(rb_eArgError, "block is needed.");
2281  }
2282 
2283  mask_arg = rb_to_hash_type(mask_arg);
2284 
2285  if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2286  mask = Qnil;
2287  }
2288 
2289  rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2290 
2291  if (UNDEF_P(mask)) {
2292  return rb_yield(Qnil);
2293  }
2294 
2295  if (!RTEST(mask)) {
2296  mask = mask_arg;
2297  }
2298  else if (RB_TYPE_P(mask, T_HASH)) {
2299  OBJ_FREEZE(mask);
2300  }
2301 
2302  rb_ary_push(th->pending_interrupt_mask_stack, mask);
2303  if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2304  th->pending_interrupt_queue_checked = 0;
2305  RUBY_VM_SET_INTERRUPT(th->ec);
2306  }
2307 
2308  EC_PUSH_TAG(th->ec);
2309  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2310  r = rb_yield(Qnil);
2311  }
2312  EC_POP_TAG();
2313 
2314  rb_ary_pop(th->pending_interrupt_mask_stack);
2315  if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2316  th->pending_interrupt_queue_checked = 0;
2317  RUBY_VM_SET_INTERRUPT(th->ec);
2318  }
2319 
2320  RUBY_VM_CHECK_INTS(th->ec);
2321 
2322  if (state) {
2323  EC_JUMP_TAG(th->ec, state);
2324  }
2325 
2326  return r;
2327 }
2328 
2329 /*
2330  * call-seq:
2331  * target_thread.pending_interrupt?(error = nil) -> true/false
2332  *
2333  * Returns whether or not the asynchronous queue is empty for the target thread.
2334  *
2335  * If +error+ is given, then check only for +error+ type deferred events.
2336  *
2337  * See ::pending_interrupt? for more information.
2338  */
2339 static VALUE
2340 rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2341 {
2342  rb_thread_t *target_th = rb_thread_ptr(target_thread);
2343 
2344  if (!target_th->pending_interrupt_queue) {
2345  return Qfalse;
2346  }
2347  if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2348  return Qfalse;
2349  }
2350  if (rb_check_arity(argc, 0, 1)) {
2351  VALUE err = argv[0];
2352  if (!rb_obj_is_kind_of(err, rb_cModule)) {
2353  rb_raise(rb_eTypeError, "class or module required for rescue clause");
2354  }
2355  return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2356  }
2357  else {
2358  return Qtrue;
2359  }
2360 }
2361 
2362 /*
2363  * call-seq:
2364  * Thread.pending_interrupt?(error = nil) -> true/false
2365  *
2366  * Returns whether or not the asynchronous queue is empty.
2367  *
2368  * Since Thread::handle_interrupt can be used to defer asynchronous events,
2369  * this method can be used to determine if there are any deferred events.
2370  *
2371  * If you find this method returns true, then you may finish +:never+ blocks.
2372  *
2373  * For example, the following method processes deferred asynchronous events
2374  * immediately.
2375  *
2376  * def Thread.kick_interrupt_immediately
2377  * Thread.handle_interrupt(Object => :immediate) {
2378  * Thread.pass
2379  * }
2380  * end
2381  *
2382  * If +error+ is given, then check only for +error+ type deferred events.
2383  *
2384  * === Usage
2385  *
2386  * th = Thread.new{
2387  * Thread.handle_interrupt(RuntimeError => :on_blocking){
2388  * while true
2389  * ...
2390  * # reach safe point to invoke interrupt
2391  * if Thread.pending_interrupt?
2392  * Thread.handle_interrupt(Object => :immediate){}
2393  * end
2394  * ...
2395  * end
2396  * }
2397  * }
2398  * ...
2399  * th.raise # stop thread
2400  *
2401  * This example can also be written as the following, which you should use to
2402  * avoid asynchronous interrupts.
2403  *
2404  * flag = true
2405  * th = Thread.new{
2406  * Thread.handle_interrupt(RuntimeError => :on_blocking){
2407  * while true
2408  * ...
2409  * # reach safe point to invoke interrupt
2410  * break if flag == false
2411  * ...
2412  * end
2413  * }
2414  * }
2415  * ...
2416  * flag = false # stop thread
2417  */
2418 
2419 static VALUE
2420 rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2421 {
2422  return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2423 }
2424 
2425 NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2426 
2427 static void
2428 rb_threadptr_to_kill(rb_thread_t *th)
2429 {
2430  rb_threadptr_pending_interrupt_clear(th);
2431  th->status = THREAD_RUNNABLE;
2432  th->to_kill = 1;
2433  th->ec->errinfo = INT2FIX(TAG_FATAL);
2434  EC_JUMP_TAG(th->ec, TAG_FATAL);
2435 }
2436 
2437 static inline rb_atomic_t
2438 threadptr_get_interrupts(rb_thread_t *th)
2439 {
2440  rb_execution_context_t *ec = th->ec;
2441  rb_atomic_t interrupt;
2442  rb_atomic_t old;
2443 
2444  do {
2445  interrupt = ec->interrupt_flag;
2446  old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2447  } while (old != interrupt);
2448  return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2449 }
2450 
2451 static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2452 
2453 int
2454 rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2455 {
2456  rb_atomic_t interrupt;
2457  int postponed_job_interrupt = 0;
2458  int ret = FALSE;
2459 
2460  if (th->ec->raised_flag) return ret;
2461 
2462  while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2463  int sig;
2464  int timer_interrupt;
2465  int pending_interrupt;
2466  int trap_interrupt;
2467  int terminate_interrupt;
2468 
2469  timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2470  pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2471  postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2472  trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2473  terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2474 
2475  if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2476  RB_VM_LOCK_ENTER();
2477  RB_VM_LOCK_LEAVE();
2478  }
2479 
2480  if (postponed_job_interrupt) {
2481  rb_postponed_job_flush(th->vm);
2482  }
2483 
2484  if (trap_interrupt) {
2485  /* signal handling */
2486  if (th == th->vm->ractor.main_thread) {
2487  enum rb_thread_status prev_status = th->status;
2488 
2489  th->status = THREAD_RUNNABLE;
2490  {
2491  while ((sig = rb_get_next_signal()) != 0) {
2492  ret |= rb_signal_exec(th, sig);
2493  }
2494  }
2495  th->status = prev_status;
2496  }
2497 
2498  if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2499  enum rb_thread_status prev_status = th->status;
2500 
2501  th->status = THREAD_RUNNABLE;
2502  {
2503  threadptr_interrupt_exec_exec(th);
2504  }
2505  th->status = prev_status;
2506  }
2507  }
2508 
2509  /* exception from another thread */
2510  if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2511  VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2512  RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2513  ret = TRUE;
2514 
2515  if (UNDEF_P(err)) {
2516  /* no error */
2517  }
2518  else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2519  err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2520  err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2521  terminate_interrupt = 1;
2522  }
2523  else {
2524  if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2525  /* the only special exception to be queued across thread */
2526  err = ruby_vm_special_exception_copy(err);
2527  }
2528  /* set runnable if th was slept. */
2529  if (th->status == THREAD_STOPPED ||
2530  th->status == THREAD_STOPPED_FOREVER)
2531  th->status = THREAD_RUNNABLE;
2532  rb_exc_raise(err);
2533  }
2534  }
2535 
2536  if (terminate_interrupt) {
2537  rb_threadptr_to_kill(th);
2538  }
2539 
2540  if (timer_interrupt) {
2541  uint32_t limits_us = TIME_QUANTUM_USEC;
2542 
2543  if (th->priority > 0)
2544  limits_us <<= th->priority;
2545  else
2546  limits_us >>= -th->priority;
2547 
2548  if (th->status == THREAD_RUNNABLE)
2549  th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2550 
2551  VM_ASSERT(th->ec->cfp);
2552  EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2553  0, 0, 0, Qundef);
2554 
2555  rb_thread_schedule_limits(limits_us);
2556  }
2557  }
2558  return ret;
2559 }
2560 
2561 void
2562 rb_thread_execute_interrupts(VALUE thval)
2563 {
2564  rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2565 }
2566 
2567 static void
2568 rb_threadptr_ready(rb_thread_t *th)
2569 {
2570  rb_threadptr_interrupt(th);
2571 }
2572 
2573 static VALUE
2574 rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2575 {
2576  VALUE exc;
2577 
2578  if (rb_threadptr_dead(target_th)) {
2579  return Qnil;
2580  }
2581 
2582  if (argc == 0) {
2583  exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2584  }
2585  else {
2586  exc = rb_make_exception(argc, argv);
2587  }
2588 
2589  /* making an exception object can switch thread,
2590  so we need to check thread deadness again */
2591  if (rb_threadptr_dead(target_th)) {
2592  return Qnil;
2593  }
2594 
2595  rb_ec_setup_exception(GET_EC(), exc, Qundef);
2596  rb_threadptr_pending_interrupt_enque(target_th, exc);
2597  rb_threadptr_interrupt(target_th);
2598  return Qnil;
2599 }
2600 
2601 void
2602 rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2603 {
2604  VALUE argv[2];
2605 
2606  argv[0] = rb_eSignal;
2607  argv[1] = INT2FIX(sig);
2608  rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2609 }
2610 
2611 void
2612 rb_threadptr_signal_exit(rb_thread_t *th)
2613 {
2614  VALUE argv[2];
2615 
2616  argv[0] = rb_eSystemExit;
2617  argv[1] = rb_str_new2("exit");
2618 
2619  // TODO: check signal raise deliverly
2620  rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2621 }
2622 
2623 int
2624 rb_ec_set_raised(rb_execution_context_t *ec)
2625 {
2626  if (ec->raised_flag & RAISED_EXCEPTION) {
2627  return 1;
2628  }
2629  ec->raised_flag |= RAISED_EXCEPTION;
2630  return 0;
2631 }
2632 
2633 int
2634 rb_ec_reset_raised(rb_execution_context_t *ec)
2635 {
2636  if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2637  return 0;
2638  }
2639  ec->raised_flag &= ~RAISED_EXCEPTION;
2640  return 1;
2641 }
2642 
2643 int
2644 rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
2645 {
2646  rb_vm_t *vm = GET_THREAD()->vm;
2647  struct waiting_fd *wfd = 0, *next;
2648  ccan_list_head_init(&busy->pending_fd_users);
2649  int has_any;
2650  VALUE wakeup_mutex;
2651 
2652  RB_VM_LOCK_ENTER();
2653  {
2654  ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2655  if (wfd->fd == fd) {
2656  rb_thread_t *th = wfd->th;
2657  VALUE err;
2658 
2659  ccan_list_del(&wfd->wfd_node);
2660  ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
2661 
2662  wfd->busy = busy;
2663  err = th->vm->special_exceptions[ruby_error_stream_closed];
2664  rb_threadptr_pending_interrupt_enque(th, err);
2665  rb_threadptr_interrupt(th);
2666  }
2667  }
2668  }
2669 
2670  has_any = !ccan_list_empty(&busy->pending_fd_users);
2671  busy->closing_thread = rb_thread_current();
2672  busy->closing_fiber = rb_fiber_current();
2673  wakeup_mutex = Qnil;
2674  if (has_any) {
2675  wakeup_mutex = rb_mutex_new();
2676  RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
2677  }
2678  busy->wakeup_mutex = wakeup_mutex;
2679 
2680  RB_VM_LOCK_LEAVE();
2681 
2682  /* If the caller didn't pass *busy as a pointer to something on the stack,
2683  we need to guard this mutex object on _our_ C stack for the duration
2684  of this function. */
2685  RB_GC_GUARD(wakeup_mutex);
2686  return has_any;
2687 }
2688 
2689 void
2690 rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
2691 {
2692  if (!RB_TEST(busy->wakeup_mutex)) {
2693  /* There was nobody else using this file when we closed it, so we
2694  never bothered to allocate a mutex*/
2695  return;
2696  }
2697 
2698  rb_mutex_lock(busy->wakeup_mutex);
2699  while (!ccan_list_empty(&busy->pending_fd_users)) {
2700  rb_mutex_sleep(busy->wakeup_mutex, Qnil);
2701  }
2702  rb_mutex_unlock(busy->wakeup_mutex);
2703 }
2704 
2705 void
2706 rb_thread_fd_close(int fd)
2707 {
2708  struct rb_io_close_wait_list busy;
2709 
2710  if (rb_notify_fd_close(fd, &busy)) {
2711  rb_notify_fd_close_wait(&busy);
2712  }
2713 }
2714 
2715 /*
2716  * call-seq:
2717  * thr.raise
2718  * thr.raise(string)
2719  * thr.raise(exception [, string [, array]])
2720  *
2721  * Raises an exception from the given thread. The caller does not have to be
2722  * +thr+. See Kernel#raise for more information.
2723  *
2724  * Thread.abort_on_exception = true
2725  * a = Thread.new { sleep(200) }
2726  * a.raise("Gotcha")
2727  *
2728  * This will produce:
2729  *
2730  * prog.rb:3: Gotcha (RuntimeError)
2731  * from prog.rb:2:in `initialize'
2732  * from prog.rb:2:in `new'
2733  * from prog.rb:2
2734  */
2735 
2736 static VALUE
2737 thread_raise_m(int argc, VALUE *argv, VALUE self)
2738 {
2739  rb_thread_t *target_th = rb_thread_ptr(self);
2740  const rb_thread_t *current_th = GET_THREAD();
2741 
2742  threadptr_check_pending_interrupt_queue(target_th);
2743  rb_threadptr_raise(target_th, argc, argv);
2744 
2745  /* To perform Thread.current.raise as Kernel.raise */
2746  if (current_th == target_th) {
2747  RUBY_VM_CHECK_INTS(target_th->ec);
2748  }
2749  return Qnil;
2750 }
2751 
2752 
2753 /*
2754  * call-seq:
2755  * thr.exit -> thr
2756  * thr.kill -> thr
2757  * thr.terminate -> thr
2758  *
2759  * Terminates +thr+ and schedules another thread to be run, returning
2760  * the terminated Thread. If this is the main thread, or the last
2761  * thread, exits the process.
2762  */
2763 
2765 rb_thread_kill(VALUE thread)
2766 {
2767  rb_thread_t *target_th = rb_thread_ptr(thread);
2768 
2769  if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2770  return thread;
2771  }
2772  if (target_th == target_th->vm->ractor.main_thread) {
2773  rb_exit(EXIT_SUCCESS);
2774  }
2775 
2776  RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2777 
2778  if (target_th == GET_THREAD()) {
2779  /* kill myself immediately */
2780  rb_threadptr_to_kill(target_th);
2781  }
2782  else {
2783  threadptr_check_pending_interrupt_queue(target_th);
2784  rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2785  rb_threadptr_interrupt(target_th);
2786  }
2787 
2788  return thread;
2789 }
2790 
2791 int
2792 rb_thread_to_be_killed(VALUE thread)
2793 {
2794  rb_thread_t *target_th = rb_thread_ptr(thread);
2795 
2796  if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2797  return TRUE;
2798  }
2799  return FALSE;
2800 }
2801 
2802 /*
2803  * call-seq:
2804  * Thread.kill(thread) -> thread
2805  *
2806  * Causes the given +thread+ to exit, see also Thread::exit.
2807  *
2808  * count = 0
2809  * a = Thread.new { loop { count += 1 } }
2810  * sleep(0.1) #=> 0
2811  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2812  * count #=> 93947
2813  * a.alive? #=> false
2814  */
2815 
2816 static VALUE
2817 rb_thread_s_kill(VALUE obj, VALUE th)
2818 {
2819  return rb_thread_kill(th);
2820 }
2821 
2822 
2823 /*
2824  * call-seq:
2825  * Thread.exit -> thread
2826  *
2827  * Terminates the currently running thread and schedules another thread to be
2828  * run.
2829  *
2830  * If this thread is already marked to be killed, ::exit returns the Thread.
2831  *
2832  * If this is the main thread, or the last thread, exit the process.
2833  */
2834 
2835 static VALUE
2836 rb_thread_exit(VALUE _)
2837 {
2838  rb_thread_t *th = GET_THREAD();
2839  return rb_thread_kill(th->self);
2840 }
2841 
2842 
2843 /*
2844  * call-seq:
2845  * thr.wakeup -> thr
2846  *
2847  * Marks a given thread as eligible for scheduling, however it may still
2848  * remain blocked on I/O.
2849  *
2850  * *Note:* This does not invoke the scheduler, see #run for more information.
2851  *
2852  * c = Thread.new { Thread.stop; puts "hey!" }
2853  * sleep 0.1 while c.status!='sleep'
2854  * c.wakeup
2855  * c.join
2856  * #=> "hey!"
2857  */
2858 
2860 rb_thread_wakeup(VALUE thread)
2861 {
2862  if (!RTEST(rb_thread_wakeup_alive(thread))) {
2863  rb_raise(rb_eThreadError, "killed thread");
2864  }
2865  return thread;
2866 }
2867 
2870 {
2871  rb_thread_t *target_th = rb_thread_ptr(thread);
2872  if (target_th->status == THREAD_KILLED) return Qnil;
2873 
2874  rb_threadptr_ready(target_th);
2875 
2876  if (target_th->status == THREAD_STOPPED ||
2877  target_th->status == THREAD_STOPPED_FOREVER) {
2878  target_th->status = THREAD_RUNNABLE;
2879  }
2880 
2881  return thread;
2882 }
2883 
2884 
2885 /*
2886  * call-seq:
2887  * thr.run -> thr
2888  *
2889  * Wakes up +thr+, making it eligible for scheduling.
2890  *
2891  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2892  * sleep 0.1 while a.status!='sleep'
2893  * puts "Got here"
2894  * a.run
2895  * a.join
2896  *
2897  * This will produce:
2898  *
2899  * a
2900  * Got here
2901  * c
2902  *
2903  * See also the instance method #wakeup.
2904  */
2905 
2907 rb_thread_run(VALUE thread)
2908 {
2909  rb_thread_wakeup(thread);
2911  return thread;
2912 }
2913 
2914 
2916 rb_thread_stop(void)
2917 {
2918  if (rb_thread_alone()) {
2920  "stopping only thread\n\tnote: use sleep to stop forever");
2921  }
2923  return Qnil;
2924 }
2925 
2926 /*
2927  * call-seq:
2928  * Thread.stop -> nil
2929  *
2930  * Stops execution of the current thread, putting it into a ``sleep'' state,
2931  * and schedules execution of another thread.
2932  *
2933  * a = Thread.new { print "a"; Thread.stop; print "c" }
2934  * sleep 0.1 while a.status!='sleep'
2935  * print "b"
2936  * a.run
2937  * a.join
2938  * #=> "abc"
2939  */
2940 
2941 static VALUE
2942 thread_stop(VALUE _)
2943 {
2944  return rb_thread_stop();
2945 }
2946 
2947 /********************************************************************/
2948 
2949 VALUE
2950 rb_thread_list(void)
2951 {
2952  // TODO
2953  return rb_ractor_thread_list();
2954 }
2955 
2956 /*
2957  * call-seq:
2958  * Thread.list -> array
2959  *
2960  * Returns an array of Thread objects for all threads that are either runnable
2961  * or stopped.
2962  *
2963  * Thread.new { sleep(200) }
2964  * Thread.new { 1000000.times {|i| i*i } }
2965  * Thread.new { Thread.stop }
2966  * Thread.list.each {|t| p t}
2967  *
2968  * This will produce:
2969  *
2970  * #<Thread:0x401b3e84 sleep>
2971  * #<Thread:0x401b3f38 run>
2972  * #<Thread:0x401b3fb0 sleep>
2973  * #<Thread:0x401bdf4c run>
2974  */
2975 
2976 static VALUE
2977 thread_list(VALUE _)
2978 {
2979  return rb_thread_list();
2980 }
2981 
2983 rb_thread_current(void)
2984 {
2985  return GET_THREAD()->self;
2986 }
2987 
2988 /*
2989  * call-seq:
2990  * Thread.current -> thread
2991  *
2992  * Returns the currently executing thread.
2993  *
2994  * Thread.current #=> #<Thread:0x401bdf4c run>
2995  */
2996 
2997 static VALUE
2998 thread_s_current(VALUE klass)
2999 {
3000  return rb_thread_current();
3001 }
3002 
3004 rb_thread_main(void)
3005 {
3006  return GET_RACTOR()->threads.main->self;
3007 }
3008 
3009 /*
3010  * call-seq:
3011  * Thread.main -> thread
3012  *
3013  * Returns the main thread.
3014  */
3015 
3016 static VALUE
3017 rb_thread_s_main(VALUE klass)
3018 {
3019  return rb_thread_main();
3020 }
3021 
3022 
3023 /*
3024  * call-seq:
3025  * Thread.abort_on_exception -> true or false
3026  *
3027  * Returns the status of the global ``abort on exception'' condition.
3028  *
3029  * The default is +false+.
3030  *
3031  * When set to +true+, if any thread is aborted by an exception, the
3032  * raised exception will be re-raised in the main thread.
3033  *
3034  * Can also be specified by the global $DEBUG flag or command line option
3035  * +-d+.
3036  *
3037  * See also ::abort_on_exception=.
3038  *
3039  * There is also an instance level method to set this for a specific thread,
3040  * see #abort_on_exception.
3041  */
3042 
3043 static VALUE
3044 rb_thread_s_abort_exc(VALUE _)
3045 {
3046  return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3047 }
3048 
3049 
3050 /*
3051  * call-seq:
3052  * Thread.abort_on_exception= boolean -> true or false
3053  *
3054  * When set to +true+, if any thread is aborted by an exception, the
3055  * raised exception will be re-raised in the main thread.
3056  * Returns the new state.
3057  *
3058  * Thread.abort_on_exception = true
3059  * t1 = Thread.new do
3060  * puts "In new thread"
3061  * raise "Exception from thread"
3062  * end
3063  * sleep(1)
3064  * puts "not reached"
3065  *
3066  * This will produce:
3067  *
3068  * In new thread
3069  * prog.rb:4: Exception from thread (RuntimeError)
3070  * from prog.rb:2:in `initialize'
3071  * from prog.rb:2:in `new'
3072  * from prog.rb:2
3073  *
3074  * See also ::abort_on_exception.
3075  *
3076  * There is also an instance level method to set this for a specific thread,
3077  * see #abort_on_exception=.
3078  */
3079 
3080 static VALUE
3081 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3082 {
3083  GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3084  return val;
3085 }
3086 
3087 
3088 /*
3089  * call-seq:
3090  * thr.abort_on_exception -> true or false
3091  *
3092  * Returns the status of the thread-local ``abort on exception'' condition for
3093  * this +thr+.
3094  *
3095  * The default is +false+.
3096  *
3097  * See also #abort_on_exception=.
3098  *
3099  * There is also a class level method to set this for all threads, see
3100  * ::abort_on_exception.
3101  */
3102 
3103 static VALUE
3104 rb_thread_abort_exc(VALUE thread)
3105 {
3106  return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3107 }
3108 
3109 
3110 /*
3111  * call-seq:
3112  * thr.abort_on_exception= boolean -> true or false
3113  *
3114  * When set to +true+, if this +thr+ is aborted by an exception, the
3115  * raised exception will be re-raised in the main thread.
3116  *
3117  * See also #abort_on_exception.
3118  *
3119  * There is also a class level method to set this for all threads, see
3120  * ::abort_on_exception=.
3121  */
3122 
3123 static VALUE
3124 rb_thread_abort_exc_set(VALUE thread, VALUE val)
3125 {
3126  rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3127  return val;
3128 }
3129 
3130 
3131 /*
3132  * call-seq:
3133  * Thread.report_on_exception -> true or false
3134  *
3135  * Returns the status of the global ``report on exception'' condition.
3136  *
3137  * The default is +true+ since Ruby 2.5.
3138  *
3139  * All threads created when this flag is true will report
3140  * a message on $stderr if an exception kills the thread.
3141  *
3142  * Thread.new { 1.times { raise } }
3143  *
3144  * will produce this output on $stderr:
3145  *
3146  * #<Thread:...> terminated with exception (report_on_exception is true):
3147  * Traceback (most recent call last):
3148  * 2: from -e:1:in `block in <main>'
3149  * 1: from -e:1:in `times'
3150  *
3151  * This is done to catch errors in threads early.
3152  * In some cases, you might not want this output.
3153  * There are multiple ways to avoid the extra output:
3154  *
3155  * * If the exception is not intended, the best is to fix the cause of
3156  * the exception so it does not happen anymore.
3157  * * If the exception is intended, it might be better to rescue it closer to
3158  * where it is raised rather then let it kill the Thread.
3159  * * If it is guaranteed the Thread will be joined with Thread#join or
3160  * Thread#value, then it is safe to disable this report with
3161  * <code>Thread.current.report_on_exception = false</code>
3162  * when starting the Thread.
3163  * However, this might handle the exception much later, or not at all
3164  * if the Thread is never joined due to the parent thread being blocked, etc.
3165  *
3166  * See also ::report_on_exception=.
3167  *
3168  * There is also an instance level method to set this for a specific thread,
3169  * see #report_on_exception=.
3170  *
3171  */
3172 
3173 static VALUE
3174 rb_thread_s_report_exc(VALUE _)
3175 {
3176  return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3177 }
3178 
3179 
3180 /*
3181  * call-seq:
3182  * Thread.report_on_exception= boolean -> true or false
3183  *
3184  * Returns the new state.
3185  * When set to +true+, all threads created afterwards will inherit the
3186  * condition and report a message on $stderr if an exception kills a thread:
3187  *
3188  * Thread.report_on_exception = true
3189  * t1 = Thread.new do
3190  * puts "In new thread"
3191  * raise "Exception from thread"
3192  * end
3193  * sleep(1)
3194  * puts "In the main thread"
3195  *
3196  * This will produce:
3197  *
3198  * In new thread
3199  * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3200  * Traceback (most recent call last):
3201  * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3202  * In the main thread
3203  *
3204  * See also ::report_on_exception.
3205  *
3206  * There is also an instance level method to set this for a specific thread,
3207  * see #report_on_exception=.
3208  */
3209 
3210 static VALUE
3211 rb_thread_s_report_exc_set(VALUE self, VALUE val)
3212 {
3213  GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3214  return val;
3215 }
3216 
3217 
3218 /*
3219  * call-seq:
3220  * Thread.ignore_deadlock -> true or false
3221  *
3222  * Returns the status of the global ``ignore deadlock'' condition.
3223  * The default is +false+, so that deadlock conditions are not ignored.
3224  *
3225  * See also ::ignore_deadlock=.
3226  *
3227  */
3228 
3229 static VALUE
3230 rb_thread_s_ignore_deadlock(VALUE _)
3231 {
3232  return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3233 }
3234 
3235 
3236 /*
3237  * call-seq:
3238  * Thread.ignore_deadlock = boolean -> true or false
3239  *
3240  * Returns the new state.
3241  * When set to +true+, the VM will not check for deadlock conditions.
3242  * It is only useful to set this if your application can break a
3243  * deadlock condition via some other means, such as a signal.
3244  *
3245  * Thread.ignore_deadlock = true
3246  * queue = Thread::Queue.new
3247  *
3248  * trap(:SIGUSR1){queue.push "Received signal"}
3249  *
3250  * # raises fatal error unless ignoring deadlock
3251  * puts queue.pop
3252  *
3253  * See also ::ignore_deadlock.
3254  */
3255 
3256 static VALUE
3257 rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3258 {
3259  GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3260  return val;
3261 }
3262 
3263 
3264 /*
3265  * call-seq:
3266  * thr.report_on_exception -> true or false
3267  *
3268  * Returns the status of the thread-local ``report on exception'' condition for
3269  * this +thr+.
3270  *
3271  * The default value when creating a Thread is the value of
3272  * the global flag Thread.report_on_exception.
3273  *
3274  * See also #report_on_exception=.
3275  *
3276  * There is also a class level method to set this for all new threads, see
3277  * ::report_on_exception=.
3278  */
3279 
3280 static VALUE
3281 rb_thread_report_exc(VALUE thread)
3282 {
3283  return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3284 }
3285 
3286 
3287 /*
3288  * call-seq:
3289  * thr.report_on_exception= boolean -> true or false
3290  *
3291  * When set to +true+, a message is printed on $stderr if an exception
3292  * kills this +thr+. See ::report_on_exception for details.
3293  *
3294  * See also #report_on_exception.
3295  *
3296  * There is also a class level method to set this for all new threads, see
3297  * ::report_on_exception=.
3298  */
3299 
3300 static VALUE
3301 rb_thread_report_exc_set(VALUE thread, VALUE val)
3302 {
3303  rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3304  return val;
3305 }
3306 
3307 
3308 /*
3309  * call-seq:
3310  * thr.group -> thgrp or nil
3311  *
3312  * Returns the ThreadGroup which contains the given thread.
3313  *
3314  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3315  */
3316 
3317 VALUE
3318 rb_thread_group(VALUE thread)
3319 {
3320  return rb_thread_ptr(thread)->thgroup;
3321 }
3322 
3323 static const char *
3324 thread_status_name(rb_thread_t *th, int detail)
3325 {
3326  switch (th->status) {
3327  case THREAD_RUNNABLE:
3328  return th->to_kill ? "aborting" : "run";
3329  case THREAD_STOPPED_FOREVER:
3330  if (detail) return "sleep_forever";
3331  case THREAD_STOPPED:
3332  return "sleep";
3333  case THREAD_KILLED:
3334  return "dead";
3335  default:
3336  return "unknown";
3337  }
3338 }
3339 
3340 static int
3341 rb_threadptr_dead(rb_thread_t *th)
3342 {
3343  return th->status == THREAD_KILLED;
3344 }
3345 
3346 
3347 /*
3348  * call-seq:
3349  * thr.status -> string, false or nil
3350  *
3351  * Returns the status of +thr+.
3352  *
3353  * [<tt>"sleep"</tt>]
3354  * Returned if this thread is sleeping or waiting on I/O
3355  * [<tt>"run"</tt>]
3356  * When this thread is executing
3357  * [<tt>"aborting"</tt>]
3358  * If this thread is aborting
3359  * [+false+]
3360  * When this thread is terminated normally
3361  * [+nil+]
3362  * If terminated with an exception.
3363  *
3364  * a = Thread.new { raise("die now") }
3365  * b = Thread.new { Thread.stop }
3366  * c = Thread.new { Thread.exit }
3367  * d = Thread.new { sleep }
3368  * d.kill #=> #<Thread:0x401b3678 aborting>
3369  * a.status #=> nil
3370  * b.status #=> "sleep"
3371  * c.status #=> false
3372  * d.status #=> "aborting"
3373  * Thread.current.status #=> "run"
3374  *
3375  * See also the instance methods #alive? and #stop?
3376  */
3377 
3378 static VALUE
3379 rb_thread_status(VALUE thread)
3380 {
3381  rb_thread_t *target_th = rb_thread_ptr(thread);
3382 
3383  if (rb_threadptr_dead(target_th)) {
3384  if (!NIL_P(target_th->ec->errinfo) &&
3385  !FIXNUM_P(target_th->ec->errinfo)) {
3386  return Qnil;
3387  }
3388  else {
3389  return Qfalse;
3390  }
3391  }
3392  else {
3393  return rb_str_new2(thread_status_name(target_th, FALSE));
3394  }
3395 }
3396 
3397 
3398 /*
3399  * call-seq:
3400  * thr.alive? -> true or false
3401  *
3402  * Returns +true+ if +thr+ is running or sleeping.
3403  *
3404  * thr = Thread.new { }
3405  * thr.join #=> #<Thread:0x401b3fb0 dead>
3406  * Thread.current.alive? #=> true
3407  * thr.alive? #=> false
3408  *
3409  * See also #stop? and #status.
3410  */
3411 
3412 static VALUE
3413 rb_thread_alive_p(VALUE thread)
3414 {
3415  return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3416 }
3417 
3418 /*
3419  * call-seq:
3420  * thr.stop? -> true or false
3421  *
3422  * Returns +true+ if +thr+ is dead or sleeping.
3423  *
3424  * a = Thread.new { Thread.stop }
3425  * b = Thread.current
3426  * a.stop? #=> true
3427  * b.stop? #=> false
3428  *
3429  * See also #alive? and #status.
3430  */
3431 
3432 static VALUE
3433 rb_thread_stop_p(VALUE thread)
3434 {
3435  rb_thread_t *th = rb_thread_ptr(thread);
3436 
3437  if (rb_threadptr_dead(th)) {
3438  return Qtrue;
3439  }
3440  return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3441 }
3442 
3443 /*
3444  * call-seq:
3445  * thr.name -> string
3446  *
3447  * show the name of the thread.
3448  */
3449 
3450 static VALUE
3451 rb_thread_getname(VALUE thread)
3452 {
3453  return rb_thread_ptr(thread)->name;
3454 }
3455 
3456 /*
3457  * call-seq:
3458  * thr.name=(name) -> string
3459  *
3460  * set given name to the ruby thread.
3461  * On some platform, it may set the name to pthread and/or kernel.
3462  */
3463 
3464 static VALUE
3465 rb_thread_setname(VALUE thread, VALUE name)
3466 {
3467  rb_thread_t *target_th = rb_thread_ptr(thread);
3468 
3469  if (!NIL_P(name)) {
3470  rb_encoding *enc;
3471  StringValueCStr(name);
3472  enc = rb_enc_get(name);
3473  if (!rb_enc_asciicompat(enc)) {
3474  rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3475  rb_enc_name(enc));
3476  }
3477  name = rb_str_new_frozen(name);
3478  }
3479  target_th->name = name;
3480  if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3481  native_set_another_thread_name(target_th->nt->thread_id, name);
3482  }
3483  return name;
3484 }
3485 
3486 #if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3487 /*
3488  * call-seq:
3489  * thr.native_thread_id -> integer
3490  *
3491  * Return the native thread ID which is used by the Ruby thread.
3492  *
3493  * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3494  * * On Linux it is TID returned by gettid(2).
3495  * * On macOS it is the system-wide unique integral ID of thread returned
3496  * by pthread_threadid_np(3).
3497  * * On FreeBSD it is the unique integral ID of the thread returned by
3498  * pthread_getthreadid_np(3).
3499  * * On Windows it is the thread identifier returned by GetThreadId().
3500  * * On other platforms, it raises NotImplementedError.
3501  *
3502  * NOTE:
3503  * If the thread is not associated yet or already deassociated with a native
3504  * thread, it returns _nil_.
3505  * If the Ruby implementation uses M:N thread model, the ID may change
3506  * depending on the timing.
3507  */
3508 
3509 static VALUE
3510 rb_thread_native_thread_id(VALUE thread)
3511 {
3512  rb_thread_t *target_th = rb_thread_ptr(thread);
3513  if (rb_threadptr_dead(target_th)) return Qnil;
3514  return native_thread_native_thread_id(target_th);
3515 }
3516 #else
3517 # define rb_thread_native_thread_id rb_f_notimplement
3518 #endif
3519 
3520 /*
3521  * call-seq:
3522  * thr.to_s -> string
3523  *
3524  * Dump the name, id, and status of _thr_ to a string.
3525  */
3526 
3527 static VALUE
3528 rb_thread_to_s(VALUE thread)
3529 {
3530  VALUE cname = rb_class_path(rb_obj_class(thread));
3531  rb_thread_t *target_th = rb_thread_ptr(thread);
3532  const char *status;
3533  VALUE str, loc;
3534 
3535  status = thread_status_name(target_th, TRUE);
3536  str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3537  if (!NIL_P(target_th->name)) {
3538  rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3539  }
3540  if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3541  rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3542  RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3543  }
3544  rb_str_catf(str, " %s>", status);
3545 
3546  return str;
3547 }
3548 
3549 /* variables for recursive traversals */
3550 #define recursive_key id__recursive_key__
3551 
3552 static VALUE
3553 threadptr_local_aref(rb_thread_t *th, ID id)
3554 {
3555  if (id == recursive_key) {
3556  return th->ec->local_storage_recursive_hash;
3557  }
3558  else {
3559  VALUE val;
3560  struct rb_id_table *local_storage = th->ec->local_storage;
3561 
3562  if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3563  return val;
3564  }
3565  else {
3566  return Qnil;
3567  }
3568  }
3569 }
3570 
3572 rb_thread_local_aref(VALUE thread, ID id)
3573 {
3574  return threadptr_local_aref(rb_thread_ptr(thread), id);
3575 }
3576 
3577 /*
3578  * call-seq:
3579  * thr[sym] -> obj or nil
3580  *
3581  * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3582  * if not explicitly inside a Fiber), using either a symbol or a string name.
3583  * If the specified variable does not exist, returns +nil+.
3584  *
3585  * [
3586  * Thread.new { Thread.current["name"] = "A" },
3587  * Thread.new { Thread.current[:name] = "B" },
3588  * Thread.new { Thread.current["name"] = "C" }
3589  * ].each do |th|
3590  * th.join
3591  * puts "#{th.inspect}: #{th[:name]}"
3592  * end
3593  *
3594  * This will produce:
3595  *
3596  * #<Thread:0x00000002a54220 dead>: A
3597  * #<Thread:0x00000002a541a8 dead>: B
3598  * #<Thread:0x00000002a54130 dead>: C
3599  *
3600  * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3601  * This confusion did not exist in Ruby 1.8 because
3602  * fibers are only available since Ruby 1.9.
3603  * Ruby 1.9 chooses that the methods behaves fiber-local to save
3604  * following idiom for dynamic scope.
3605  *
3606  * def meth(newvalue)
3607  * begin
3608  * oldvalue = Thread.current[:name]
3609  * Thread.current[:name] = newvalue
3610  * yield
3611  * ensure
3612  * Thread.current[:name] = oldvalue
3613  * end
3614  * end
3615  *
3616  * The idiom may not work as dynamic scope if the methods are thread-local
3617  * and a given block switches fiber.
3618  *
3619  * f = Fiber.new {
3620  * meth(1) {
3621  * Fiber.yield
3622  * }
3623  * }
3624  * meth(2) {
3625  * f.resume
3626  * }
3627  * f.resume
3628  * p Thread.current[:name]
3629  * #=> nil if fiber-local
3630  * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3631  *
3632  * For thread-local variables, please see #thread_variable_get and
3633  * #thread_variable_set.
3634  *
3635  */
3636 
3637 static VALUE
3638 rb_thread_aref(VALUE thread, VALUE key)
3639 {
3640  ID id = rb_check_id(&key);
3641  if (!id) return Qnil;
3642  return rb_thread_local_aref(thread, id);
3643 }
3644 
3645 /*
3646  * call-seq:
3647  * thr.fetch(sym) -> obj
3648  * thr.fetch(sym) { } -> obj
3649  * thr.fetch(sym, default) -> obj
3650  *
3651  * Returns a fiber-local for the given key. If the key can't be
3652  * found, there are several options: With no other arguments, it will
3653  * raise a KeyError exception; if <i>default</i> is given, then that
3654  * will be returned; if the optional code block is specified, then
3655  * that will be run and its result returned. See Thread#[] and
3656  * Hash#fetch.
3657  */
3658 static VALUE
3659 rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3660 {
3661  VALUE key, val;
3662  ID id;
3663  rb_thread_t *target_th = rb_thread_ptr(self);
3664  int block_given;
3665 
3666  rb_check_arity(argc, 1, 2);
3667  key = argv[0];
3668 
3669  block_given = rb_block_given_p();
3670  if (block_given && argc == 2) {
3671  rb_warn("block supersedes default value argument");
3672  }
3673 
3674  id = rb_check_id(&key);
3675 
3676  if (id == recursive_key) {
3677  return target_th->ec->local_storage_recursive_hash;
3678  }
3679  else if (id && target_th->ec->local_storage &&
3680  rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3681  return val;
3682  }
3683  else if (block_given) {
3684  return rb_yield(key);
3685  }
3686  else if (argc == 1) {
3687  rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3688  }
3689  else {
3690  return argv[1];
3691  }
3692 }
3693 
3694 static VALUE
3695 threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3696 {
3697  if (id == recursive_key) {
3698  th->ec->local_storage_recursive_hash = val;
3699  return val;
3700  }
3701  else {
3702  struct rb_id_table *local_storage = th->ec->local_storage;
3703 
3704  if (NIL_P(val)) {
3705  if (!local_storage) return Qnil;
3706  rb_id_table_delete(local_storage, id);
3707  return Qnil;
3708  }
3709  else {
3710  if (local_storage == NULL) {
3711  th->ec->local_storage = local_storage = rb_id_table_create(0);
3712  }
3713  rb_id_table_insert(local_storage, id, val);
3714  return val;
3715  }
3716  }
3717 }
3718 
3720 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3721 {
3722  if (OBJ_FROZEN(thread)) {
3723  rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3724  }
3725 
3726  return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3727 }
3728 
3729 /*
3730  * call-seq:
3731  * thr[sym] = obj -> obj
3732  *
3733  * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3734  * using either a symbol or a string.
3735  *
3736  * See also Thread#[].
3737  *
3738  * For thread-local variables, please see #thread_variable_set and
3739  * #thread_variable_get.
3740  */
3741 
3742 static VALUE
3743 rb_thread_aset(VALUE self, VALUE id, VALUE val)
3744 {
3745  return rb_thread_local_aset(self, rb_to_id(id), val);
3746 }
3747 
3748 /*
3749  * call-seq:
3750  * thr.thread_variable_get(key) -> obj or nil
3751  *
3752  * Returns the value of a thread local variable that has been set. Note that
3753  * these are different than fiber local values. For fiber local values,
3754  * please see Thread#[] and Thread#[]=.
3755  *
3756  * Thread local values are carried along with threads, and do not respect
3757  * fibers. For example:
3758  *
3759  * Thread.new {
3760  * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3761  * Thread.current["foo"] = "bar" # set a fiber local
3762  *
3763  * Fiber.new {
3764  * Fiber.yield [
3765  * Thread.current.thread_variable_get("foo"), # get the thread local
3766  * Thread.current["foo"], # get the fiber local
3767  * ]
3768  * }.resume
3769  * }.join.value # => ['bar', nil]
3770  *
3771  * The value "bar" is returned for the thread local, where nil is returned
3772  * for the fiber local. The fiber is executed in the same thread, so the
3773  * thread local values are available.
3774  */
3775 
3776 static VALUE
3777 rb_thread_variable_get(VALUE thread, VALUE key)
3778 {
3779  VALUE locals;
3780  VALUE symbol = rb_to_symbol(key);
3781 
3782  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3783  return Qnil;
3784  }
3785  locals = rb_thread_local_storage(thread);
3786  return rb_hash_aref(locals, symbol);
3787 }
3788 
3789 /*
3790  * call-seq:
3791  * thr.thread_variable_set(key, value)
3792  *
3793  * Sets a thread local with +key+ to +value+. Note that these are local to
3794  * threads, and not to fibers. Please see Thread#thread_variable_get and
3795  * Thread#[] for more information.
3796  */
3797 
3798 static VALUE
3799 rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3800 {
3801  VALUE locals;
3802 
3803  if (OBJ_FROZEN(thread)) {
3804  rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3805  }
3806 
3807  locals = rb_thread_local_storage(thread);
3808  return rb_hash_aset(locals, rb_to_symbol(key), val);
3809 }
3810 
3811 /*
3812  * call-seq:
3813  * thr.key?(sym) -> true or false
3814  *
3815  * Returns +true+ if the given string (or symbol) exists as a fiber-local
3816  * variable.
3817  *
3818  * me = Thread.current
3819  * me[:oliver] = "a"
3820  * me.key?(:oliver) #=> true
3821  * me.key?(:stanley) #=> false
3822  */
3823 
3824 static VALUE
3825 rb_thread_key_p(VALUE self, VALUE key)
3826 {
3827  VALUE val;
3828  ID id = rb_check_id(&key);
3829  struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3830 
3831  if (!id || local_storage == NULL) {
3832  return Qfalse;
3833  }
3834  return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3835 }
3836 
3837 static enum rb_id_table_iterator_result
3838 thread_keys_i(ID key, VALUE value, void *ary)
3839 {
3840  rb_ary_push((VALUE)ary, ID2SYM(key));
3841  return ID_TABLE_CONTINUE;
3842 }
3843 
3844 int
3845 rb_thread_alone(void)
3846 {
3847  // TODO
3848  return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3849 }
3850 
3851 /*
3852  * call-seq:
3853  * thr.keys -> array
3854  *
3855  * Returns an array of the names of the fiber-local variables (as Symbols).
3856  *
3857  * thr = Thread.new do
3858  * Thread.current[:cat] = 'meow'
3859  * Thread.current["dog"] = 'woof'
3860  * end
3861  * thr.join #=> #<Thread:0x401b3f10 dead>
3862  * thr.keys #=> [:dog, :cat]
3863  */
3864 
3865 static VALUE
3866 rb_thread_keys(VALUE self)
3867 {
3868  struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3869  VALUE ary = rb_ary_new();
3870 
3871  if (local_storage) {
3872  rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3873  }
3874  return ary;
3875 }
3876 
3877 static int
3878 keys_i(VALUE key, VALUE value, VALUE ary)
3879 {
3880  rb_ary_push(ary, key);
3881  return ST_CONTINUE;
3882 }
3883 
3884 /*
3885  * call-seq:
3886  * thr.thread_variables -> array
3887  *
3888  * Returns an array of the names of the thread-local variables (as Symbols).
3889  *
3890  * thr = Thread.new do
3891  * Thread.current.thread_variable_set(:cat, 'meow')
3892  * Thread.current.thread_variable_set("dog", 'woof')
3893  * end
3894  * thr.join #=> #<Thread:0x401b3f10 dead>
3895  * thr.thread_variables #=> [:dog, :cat]
3896  *
3897  * Note that these are not fiber local variables. Please see Thread#[] and
3898  * Thread#thread_variable_get for more details.
3899  */
3900 
3901 static VALUE
3902 rb_thread_variables(VALUE thread)
3903 {
3904  VALUE locals;
3905  VALUE ary;
3906 
3907  ary = rb_ary_new();
3908  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3909  return ary;
3910  }
3911  locals = rb_thread_local_storage(thread);
3912  rb_hash_foreach(locals, keys_i, ary);
3913 
3914  return ary;
3915 }
3916 
3917 /*
3918  * call-seq:
3919  * thr.thread_variable?(key) -> true or false
3920  *
3921  * Returns +true+ if the given string (or symbol) exists as a thread-local
3922  * variable.
3923  *
3924  * me = Thread.current
3925  * me.thread_variable_set(:oliver, "a")
3926  * me.thread_variable?(:oliver) #=> true
3927  * me.thread_variable?(:stanley) #=> false
3928  *
3929  * Note that these are not fiber local variables. Please see Thread#[] and
3930  * Thread#thread_variable_get for more details.
3931  */
3932 
3933 static VALUE
3934 rb_thread_variable_p(VALUE thread, VALUE key)
3935 {
3936  VALUE locals;
3937  VALUE symbol = rb_to_symbol(key);
3938 
3939  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3940  return Qfalse;
3941  }
3942  locals = rb_thread_local_storage(thread);
3943 
3944  return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
3945 }
3946 
3947 /*
3948  * call-seq:
3949  * thr.priority -> integer
3950  *
3951  * Returns the priority of <i>thr</i>. Default is inherited from the
3952  * current thread which creating the new thread, or zero for the
3953  * initial main thread; higher-priority thread will run more frequently
3954  * than lower-priority threads (but lower-priority threads can also run).
3955  *
3956  * This is just hint for Ruby thread scheduler. It may be ignored on some
3957  * platform.
3958  *
3959  * Thread.current.priority #=> 0
3960  */
3961 
3962 static VALUE
3963 rb_thread_priority(VALUE thread)
3964 {
3965  return INT2NUM(rb_thread_ptr(thread)->priority);
3966 }
3967 
3968 
3969 /*
3970  * call-seq:
3971  * thr.priority= integer -> thr
3972  *
3973  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3974  * will run more frequently than lower-priority threads (but lower-priority
3975  * threads can also run).
3976  *
3977  * This is just hint for Ruby thread scheduler. It may be ignored on some
3978  * platform.
3979  *
3980  * count1 = count2 = 0
3981  * a = Thread.new do
3982  * loop { count1 += 1 }
3983  * end
3984  * a.priority = -1
3985  *
3986  * b = Thread.new do
3987  * loop { count2 += 1 }
3988  * end
3989  * b.priority = -2
3990  * sleep 1 #=> 1
3991  * count1 #=> 622504
3992  * count2 #=> 5832
3993  */
3994 
3995 static VALUE
3996 rb_thread_priority_set(VALUE thread, VALUE prio)
3997 {
3998  rb_thread_t *target_th = rb_thread_ptr(thread);
3999  int priority;
4000 
4001 #if USE_NATIVE_THREAD_PRIORITY
4002  target_th->priority = NUM2INT(prio);
4003  native_thread_apply_priority(th);
4004 #else
4005  priority = NUM2INT(prio);
4006  if (priority > RUBY_THREAD_PRIORITY_MAX) {
4007  priority = RUBY_THREAD_PRIORITY_MAX;
4008  }
4009  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4010  priority = RUBY_THREAD_PRIORITY_MIN;
4011  }
4012  target_th->priority = (int8_t)priority;
4013 #endif
4014  return INT2NUM(target_th->priority);
4015 }
4016 
4017 /* for IO */
4018 
4019 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4020 
4021 /*
4022  * several Unix platforms support file descriptors bigger than FD_SETSIZE
4023  * in select(2) system call.
4024  *
4025  * - Linux 2.2.12 (?)
4026  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4027  * select(2) documents how to allocate fd_set dynamically.
4028  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4029  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4030  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4031  * select(2) documents how to allocate fd_set dynamically.
4032  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4033  * - Solaris 8 has select_large_fdset
4034  * - Mac OS X 10.7 (Lion)
4035  * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4036  * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4037  * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4038  *
4039  * When fd_set is not big enough to hold big file descriptors,
4040  * it should be allocated dynamically.
4041  * Note that this assumes fd_set is structured as bitmap.
4042  *
4043  * rb_fd_init allocates the memory.
4044  * rb_fd_term free the memory.
4045  * rb_fd_set may re-allocates bitmap.
4046  *
4047  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4048  */
4049 
4050 void
4051 rb_fd_init(rb_fdset_t *fds)
4052 {
4053  fds->maxfd = 0;
4054  fds->fdset = ALLOC(fd_set);
4055  FD_ZERO(fds->fdset);
4056 }
4057 
4058 void
4059 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4060 {
4061  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4062 
4063  if (size < sizeof(fd_set))
4064  size = sizeof(fd_set);
4065  dst->maxfd = src->maxfd;
4066  dst->fdset = xmalloc(size);
4067  memcpy(dst->fdset, src->fdset, size);
4068 }
4069 
4070 void
4071 rb_fd_term(rb_fdset_t *fds)
4072 {
4073  xfree(fds->fdset);
4074  fds->maxfd = 0;
4075  fds->fdset = 0;
4076 }
4077 
4078 void
4079 rb_fd_zero(rb_fdset_t *fds)
4080 {
4081  if (fds->fdset)
4082  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4083 }
4084 
4085 static void
4086 rb_fd_resize(int n, rb_fdset_t *fds)
4087 {
4088  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4089  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4090 
4091  if (m < sizeof(fd_set)) m = sizeof(fd_set);
4092  if (o < sizeof(fd_set)) o = sizeof(fd_set);
4093 
4094  if (m > o) {
4095  fds->fdset = xrealloc(fds->fdset, m);
4096  memset((char *)fds->fdset + o, 0, m - o);
4097  }
4098  if (n >= fds->maxfd) fds->maxfd = n + 1;
4099 }
4100 
4101 void
4102 rb_fd_set(int n, rb_fdset_t *fds)
4103 {
4104  rb_fd_resize(n, fds);
4105  FD_SET(n, fds->fdset);
4106 }
4107 
4108 void
4109 rb_fd_clr(int n, rb_fdset_t *fds)
4110 {
4111  if (n >= fds->maxfd) return;
4112  FD_CLR(n, fds->fdset);
4113 }
4114 
4115 int
4116 rb_fd_isset(int n, const rb_fdset_t *fds)
4117 {
4118  if (n >= fds->maxfd) return 0;
4119  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4120 }
4121 
4122 void
4123 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4124 {
4125  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4126 
4127  if (size < sizeof(fd_set)) size = sizeof(fd_set);
4128  dst->maxfd = max;
4129  dst->fdset = xrealloc(dst->fdset, size);
4130  memcpy(dst->fdset, src, size);
4131 }
4132 
4133 void
4134 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4135 {
4136  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4137 
4138  if (size < sizeof(fd_set))
4139  size = sizeof(fd_set);
4140  dst->maxfd = src->maxfd;
4141  dst->fdset = xrealloc(dst->fdset, size);
4142  memcpy(dst->fdset, src->fdset, size);
4143 }
4144 
4145 int
4146 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4147 {
4148  fd_set *r = NULL, *w = NULL, *e = NULL;
4149  if (readfds) {
4150  rb_fd_resize(n - 1, readfds);
4151  r = rb_fd_ptr(readfds);
4152  }
4153  if (writefds) {
4154  rb_fd_resize(n - 1, writefds);
4155  w = rb_fd_ptr(writefds);
4156  }
4157  if (exceptfds) {
4158  rb_fd_resize(n - 1, exceptfds);
4159  e = rb_fd_ptr(exceptfds);
4160  }
4161  return select(n, r, w, e, timeout);
4162 }
4163 
4164 #define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4165 
4166 #undef FD_ZERO
4167 #undef FD_SET
4168 #undef FD_CLR
4169 #undef FD_ISSET
4170 
4171 #define FD_ZERO(f) rb_fd_zero(f)
4172 #define FD_SET(i, f) rb_fd_set((i), (f))
4173 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4174 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4175 
4176 #elif defined(_WIN32)
4177 
4178 void
4179 rb_fd_init(rb_fdset_t *set)
4180 {
4181  set->capa = FD_SETSIZE;
4182  set->fdset = ALLOC(fd_set);
4183  FD_ZERO(set->fdset);
4184 }
4185 
4186 void
4187 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4188 {
4189  rb_fd_init(dst);
4190  rb_fd_dup(dst, src);
4191 }
4192 
4193 void
4194 rb_fd_term(rb_fdset_t *set)
4195 {
4196  xfree(set->fdset);
4197  set->fdset = NULL;
4198  set->capa = 0;
4199 }
4200 
4201 void
4202 rb_fd_set(int fd, rb_fdset_t *set)
4203 {
4204  unsigned int i;
4205  SOCKET s = rb_w32_get_osfhandle(fd);
4206 
4207  for (i = 0; i < set->fdset->fd_count; i++) {
4208  if (set->fdset->fd_array[i] == s) {
4209  return;
4210  }
4211  }
4212  if (set->fdset->fd_count >= (unsigned)set->capa) {
4213  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4214  set->fdset =
4215  rb_xrealloc_mul_add(
4216  set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4217  }
4218  set->fdset->fd_array[set->fdset->fd_count++] = s;
4219 }
4220 
4221 #undef FD_ZERO
4222 #undef FD_SET
4223 #undef FD_CLR
4224 #undef FD_ISSET
4225 
4226 #define FD_ZERO(f) rb_fd_zero(f)
4227 #define FD_SET(i, f) rb_fd_set((i), (f))
4228 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4229 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4230 
4231 #define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4232 
4233 #endif
4234 
4235 #ifndef rb_fd_no_init
4236 #define rb_fd_no_init(fds) (void)(fds)
4237 #endif
4238 
4239 static int
4240 wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4241 {
4242  int r = *result;
4243  if (r < 0) {
4244  switch (errnum) {
4245  case EINTR:
4246 #ifdef ERESTART
4247  case ERESTART:
4248 #endif
4249  *result = 0;
4250  if (rel && hrtime_update_expire(rel, end)) {
4251  *rel = 0;
4252  }
4253  return TRUE;
4254  }
4255  return FALSE;
4256  }
4257  else if (r == 0) {
4258  /* check for spurious wakeup */
4259  if (rel) {
4260  return !hrtime_update_expire(rel, end);
4261  }
4262  return TRUE;
4263  }
4264  return FALSE;
4265 }
4267 struct select_set {
4268  int max;
4269  rb_thread_t *th;
4270  rb_fdset_t *rset;
4271  rb_fdset_t *wset;
4272  rb_fdset_t *eset;
4273  rb_fdset_t orig_rset;
4274  rb_fdset_t orig_wset;
4275  rb_fdset_t orig_eset;
4276  struct timeval *timeout;
4277 };
4278 
4279 static VALUE
4280 select_set_free(VALUE p)
4281 {
4282  struct select_set *set = (struct select_set *)p;
4283 
4284  rb_fd_term(&set->orig_rset);
4285  rb_fd_term(&set->orig_wset);
4286  rb_fd_term(&set->orig_eset);
4287 
4288  return Qfalse;
4289 }
4290 
4291 static VALUE
4292 do_select(VALUE p)
4293 {
4294  struct select_set *set = (struct select_set *)p;
4295  volatile int result = 0;
4296  int lerrno;
4297  rb_hrtime_t *to, rel, end = 0;
4298 
4299  timeout_prepare(&to, &rel, &end, set->timeout);
4300  volatile rb_hrtime_t endtime = end;
4301 #define restore_fdset(dst, src) \
4302  ((dst) ? rb_fd_dup(dst, src) : (void)0)
4303 #define do_select_update() \
4304  (restore_fdset(set->rset, &set->orig_rset), \
4305  restore_fdset(set->wset, &set->orig_wset), \
4306  restore_fdset(set->eset, &set->orig_eset), \
4307  TRUE)
4308 
4309  do {
4310  lerrno = 0;
4311 
4312  BLOCKING_REGION(set->th, {
4313  struct timeval tv;
4314 
4315  if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4316  result = native_fd_select(set->max,
4317  set->rset, set->wset, set->eset,
4318  rb_hrtime2timeval(&tv, to), set->th);
4319  if (result < 0) lerrno = errno;
4320  }
4321  }, ubf_select, set->th, TRUE);
4322 
4323  RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4324  } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4325 
4326  if (result < 0) {
4327  errno = lerrno;
4328  }
4329 
4330  return (VALUE)result;
4331 }
4332 
4333 int
4334 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4335  struct timeval *timeout)
4336 {
4337  struct select_set set;
4338 
4339  set.th = GET_THREAD();
4340  RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4341  set.max = max;
4342  set.rset = read;
4343  set.wset = write;
4344  set.eset = except;
4345  set.timeout = timeout;
4346 
4347  if (!set.rset && !set.wset && !set.eset) {
4348  if (!timeout) {
4350  return 0;
4351  }
4352  rb_thread_wait_for(*timeout);
4353  return 0;
4354  }
4355 
4356 #define fd_init_copy(f) do { \
4357  if (set.f) { \
4358  rb_fd_resize(set.max - 1, set.f); \
4359  if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4360  rb_fd_init_copy(&set.orig_##f, set.f); \
4361  } \
4362  } \
4363  else { \
4364  rb_fd_no_init(&set.orig_##f); \
4365  } \
4366  } while (0)
4367  fd_init_copy(rset);
4368  fd_init_copy(wset);
4369  fd_init_copy(eset);
4370 #undef fd_init_copy
4371 
4372  return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4373 }
4374 
4375 #ifdef USE_POLL
4376 
4377 /* The same with linux kernel. TODO: make platform independent definition. */
4378 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4379 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4380 #define POLLEX_SET (POLLPRI)
4381 
4382 #ifndef POLLERR_SET /* defined for FreeBSD for now */
4383 # define POLLERR_SET (0)
4384 #endif
4385 
4386 static int
4387 wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4388  rb_hrtime_t *const to, volatile int *lerrno)
4389 {
4390  struct timespec ts;
4391  volatile int result = 0;
4392 
4393  *lerrno = 0;
4394  BLOCKING_REGION(th, {
4395  if (!RUBY_VM_INTERRUPTED(th->ec)) {
4396  result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4397  if (result < 0) *lerrno = errno;
4398  }
4399  }, ubf_select, th, TRUE);
4400  return result;
4401 }
4402 
4403 /*
4404  * returns a mask of events
4405  */
4406 int
4407 rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4408 {
4409  struct pollfd fds[1] = {{
4410  .fd = fd,
4411  .events = (short)events,
4412  .revents = 0,
4413  }};
4414  volatile int result = 0;
4415  nfds_t nfds;
4416  struct waiting_fd wfd;
4417  enum ruby_tag_type state;
4418  volatile int lerrno;
4419 
4420  rb_execution_context_t *ec = GET_EC();
4421  rb_thread_t *th = rb_ec_thread_ptr(ec);
4422 
4423  thread_io_setup_wfd(th, fd, &wfd);
4424 
4425  if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4426  // fd is readable
4427  state = 0;
4428  fds[0].revents = events;
4429  errno = 0;
4430  }
4431  else {
4432  EC_PUSH_TAG(wfd.th->ec);
4433  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4434  rb_hrtime_t *to, rel, end = 0;
4435  RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4436  timeout_prepare(&to, &rel, &end, timeout);
4437  do {
4438  nfds = numberof(fds);
4439  result = wait_for_single_fd_blocking_region(wfd.th, fds, nfds, to, &lerrno);
4440 
4441  RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4442  } while (wait_retryable(&result, lerrno, to, end));
4443  }
4444  EC_POP_TAG();
4445  }
4446 
4447  thread_io_wake_pending_closer(&wfd);
4448 
4449  if (state) {
4450  EC_JUMP_TAG(wfd.th->ec, state);
4451  }
4452 
4453  if (result < 0) {
4454  errno = lerrno;
4455  return -1;
4456  }
4457 
4458  if (fds[0].revents & POLLNVAL) {
4459  errno = EBADF;
4460  return -1;
4461  }
4462 
4463  /*
4464  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4465  * Therefore we need to fix it up.
4466  */
4467  result = 0;
4468  if (fds[0].revents & POLLIN_SET)
4469  result |= RB_WAITFD_IN;
4470  if (fds[0].revents & POLLOUT_SET)
4471  result |= RB_WAITFD_OUT;
4472  if (fds[0].revents & POLLEX_SET)
4473  result |= RB_WAITFD_PRI;
4474 
4475  /* all requested events are ready if there is an error */
4476  if (fds[0].revents & POLLERR_SET)
4477  result |= events;
4478 
4479  return result;
4480 }
4481 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4482 struct select_args {
4483  union {
4484  int fd;
4485  int error;
4486  } as;
4487  rb_fdset_t *read;
4488  rb_fdset_t *write;
4489  rb_fdset_t *except;
4490  struct waiting_fd wfd;
4491  struct timeval *tv;
4492 };
4493 
4494 static VALUE
4495 select_single(VALUE ptr)
4496 {
4497  struct select_args *args = (struct select_args *)ptr;
4498  int r;
4499 
4500  r = rb_thread_fd_select(args->as.fd + 1,
4501  args->read, args->write, args->except, args->tv);
4502  if (r == -1)
4503  args->as.error = errno;
4504  if (r > 0) {
4505  r = 0;
4506  if (args->read && rb_fd_isset(args->as.fd, args->read))
4507  r |= RB_WAITFD_IN;
4508  if (args->write && rb_fd_isset(args->as.fd, args->write))
4509  r |= RB_WAITFD_OUT;
4510  if (args->except && rb_fd_isset(args->as.fd, args->except))
4511  r |= RB_WAITFD_PRI;
4512  }
4513  return (VALUE)r;
4514 }
4515 
4516 static VALUE
4517 select_single_cleanup(VALUE ptr)
4518 {
4519  struct select_args *args = (struct select_args *)ptr;
4520 
4521  thread_io_wake_pending_closer(&args->wfd);
4522  if (args->read) rb_fd_term(args->read);
4523  if (args->write) rb_fd_term(args->write);
4524  if (args->except) rb_fd_term(args->except);
4525 
4526  return (VALUE)-1;
4527 }
4528 
4529 static rb_fdset_t *
4530 init_set_fd(int fd, rb_fdset_t *fds)
4531 {
4532  if (fd < 0) {
4533  return 0;
4534  }
4535  rb_fd_init(fds);
4536  rb_fd_set(fd, fds);
4537 
4538  return fds;
4539 }
4540 
4541 int
4542 rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4543 {
4544  rb_fdset_t rfds, wfds, efds;
4545  struct select_args args;
4546  int r;
4547  VALUE ptr = (VALUE)&args;
4548  rb_execution_context_t *ec = GET_EC();
4549  rb_thread_t *th = rb_ec_thread_ptr(ec);
4550 
4551  args.as.fd = fd;
4552  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4553  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4554  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4555  args.tv = timeout;
4556  thread_io_setup_wfd(th, fd, &args.wfd);
4557 
4558  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4559  if (r == -1)
4560  errno = args.as.error;
4561 
4562  return r;
4563 }
4564 #endif /* ! USE_POLL */
4565 
4566 /*
4567  * for GC
4568  */
4569 
4570 #ifdef USE_CONSERVATIVE_STACK_END
4571 void
4572 rb_gc_set_stack_end(VALUE **stack_end_p)
4573 {
4574  VALUE stack_end;
4575 COMPILER_WARNING_PUSH
4576 #if __has_warning("-Wdangling-pointer")
4577 COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4578 #endif
4579  *stack_end_p = &stack_end;
4580 COMPILER_WARNING_POP
4581 }
4582 #endif
4583 
4584 /*
4585  *
4586  */
4587 
4588 void
4589 rb_threadptr_check_signal(rb_thread_t *mth)
4590 {
4591  /* mth must be main_thread */
4592  if (rb_signal_buff_size() > 0) {
4593  /* wakeup main thread */
4594  threadptr_trap_interrupt(mth);
4595  }
4596 }
4597 
4598 static void
4599 async_bug_fd(const char *mesg, int errno_arg, int fd)
4600 {
4601  char buff[64];
4602  size_t n = strlcpy(buff, mesg, sizeof(buff));
4603  if (n < sizeof(buff)-3) {
4604  ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4605  }
4606  rb_async_bug_errno(buff, errno_arg);
4607 }
4608 
4609 /* VM-dependent API is not available for this function */
4610 static int
4611 consume_communication_pipe(int fd)
4612 {
4613 #if USE_EVENTFD
4614  uint64_t buff[1];
4615 #else
4616  /* buffer can be shared because no one refers to them. */
4617  static char buff[1024];
4618 #endif
4619  ssize_t result;
4620  int ret = FALSE; /* for rb_sigwait_sleep */
4621 
4622  while (1) {
4623  result = read(fd, buff, sizeof(buff));
4624 #if USE_EVENTFD
4625  RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4626 #else
4627  RUBY_DEBUG_LOG("result:%d", (int)result);
4628 #endif
4629  if (result > 0) {
4630  ret = TRUE;
4631  if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4632  return ret;
4633  }
4634  }
4635  else if (result == 0) {
4636  return ret;
4637  }
4638  else if (result < 0) {
4639  int e = errno;
4640  switch (e) {
4641  case EINTR:
4642  continue; /* retry */
4643  case EAGAIN:
4644 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4645  case EWOULDBLOCK:
4646 #endif
4647  return ret;
4648  default:
4649  async_bug_fd("consume_communication_pipe: read", e, fd);
4650  }
4651  }
4652  }
4653 }
4654 
4655 void
4656 rb_thread_stop_timer_thread(void)
4657 {
4658  if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4659  native_reset_timer_thread();
4660  }
4661 }
4662 
4663 void
4664 rb_thread_reset_timer_thread(void)
4665 {
4666  native_reset_timer_thread();
4667 }
4668 
4669 void
4670 rb_thread_start_timer_thread(void)
4671 {
4672  system_working = 1;
4673  rb_thread_create_timer_thread();
4674 }
4675 
4676 static int
4677 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4678 {
4679  int i;
4680  VALUE coverage = (VALUE)val;
4681  VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4682  VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4683 
4684  if (lines) {
4685  if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4686  rb_ary_clear(lines);
4687  }
4688  else {
4689  int i;
4690  for (i = 0; i < RARRAY_LEN(lines); i++) {
4691  if (RARRAY_AREF(lines, i) != Qnil)
4692  RARRAY_ASET(lines, i, INT2FIX(0));
4693  }
4694  }
4695  }
4696  if (branches) {
4697  VALUE counters = RARRAY_AREF(branches, 1);
4698  for (i = 0; i < RARRAY_LEN(counters); i++) {
4699  RARRAY_ASET(counters, i, INT2FIX(0));
4700  }
4701  }
4702 
4703  return ST_CONTINUE;
4704 }
4705 
4706 void
4707 rb_clear_coverages(void)
4708 {
4709  VALUE coverages = rb_get_coverages();
4710  if (RTEST(coverages)) {
4711  rb_hash_foreach(coverages, clear_coverage_i, 0);
4712  }
4713 }
4714 
4715 #if defined(HAVE_WORKING_FORK)
4716 
4717 static void
4718 rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4719 {
4720  rb_thread_t *i = 0;
4721  rb_vm_t *vm = th->vm;
4722  rb_ractor_t *r = th->ractor;
4723  vm->ractor.main_ractor = r;
4724  vm->ractor.main_thread = th;
4725  r->threads.main = th;
4726  r->status_ = ractor_created;
4727 
4728  thread_sched_atfork(TH_SCHED(th));
4729  ubf_list_atfork();
4730 
4731  // OK. Only this thread accesses:
4732  ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4733  ccan_list_for_each(&r->threads.set, i, lt_node) {
4734  atfork(i, th);
4735  }
4736  }
4737  rb_vm_living_threads_init(vm);
4738 
4739  rb_ractor_atfork(vm, th);
4740  rb_vm_postponed_job_atfork();
4741 
4742  /* may be held by RJIT threads in parent */
4743  rb_native_mutex_initialize(&vm->workqueue_lock);
4744 
4745  /* may be held by any thread in parent */
4746  rb_native_mutex_initialize(&th->interrupt_lock);
4747  ccan_list_head_init(&th->interrupt_exec_tasks);
4748 
4749  vm->fork_gen++;
4750  rb_ractor_sleeper_threads_clear(th->ractor);
4751  rb_clear_coverages();
4752 
4753  // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4754  rb_thread_reset_timer_thread();
4755  rb_thread_start_timer_thread();
4756 
4757  VM_ASSERT(vm->ractor.blocking_cnt == 0);
4758  VM_ASSERT(vm->ractor.cnt == 1);
4759 }
4760 
4761 static void
4762 terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4763 {
4764  if (th != current_th) {
4765  rb_mutex_abandon_keeping_mutexes(th);
4766  rb_mutex_abandon_locking_mutex(th);
4767  thread_cleanup_func(th, TRUE);
4768  }
4769 }
4770 
4771 void rb_fiber_atfork(rb_thread_t *);
4772 void
4773 rb_thread_atfork(void)
4774 {
4775  rb_thread_t *th = GET_THREAD();
4776  rb_threadptr_pending_interrupt_clear(th);
4777  rb_thread_atfork_internal(th, terminate_atfork_i);
4778  th->join_list = NULL;
4779  rb_fiber_atfork(th);
4780 
4781  /* We don't want reproduce CVE-2003-0900. */
4783 }
4784 
4785 static void
4786 terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4787 {
4788  if (th != current_th) {
4789  thread_cleanup_func_before_exec(th);
4790  }
4791 }
4792 
4793 void
4795 {
4796  rb_thread_t *th = GET_THREAD();
4797  rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4798 }
4799 #else
4800 void
4801 rb_thread_atfork(void)
4802 {
4803 }
4804 
4805 void
4807 {
4808 }
4809 #endif
4811 struct thgroup {
4812  int enclosed;
4813 };
4814 
4815 static const rb_data_type_t thgroup_data_type = {
4816  "thgroup",
4817  {
4818  0,
4820  NULL, // No external memory to report
4821  },
4822  0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
4823 };
4824 
4825 /*
4826  * Document-class: ThreadGroup
4827  *
4828  * ThreadGroup provides a means of keeping track of a number of threads as a
4829  * group.
4830  *
4831  * A given Thread object can only belong to one ThreadGroup at a time; adding
4832  * a thread to a new group will remove it from any previous group.
4833  *
4834  * Newly created threads belong to the same group as the thread from which they
4835  * were created.
4836  */
4837 
4838 /*
4839  * Document-const: Default
4840  *
4841  * The default ThreadGroup created when Ruby starts; all Threads belong to it
4842  * by default.
4843  */
4844 static VALUE
4845 thgroup_s_alloc(VALUE klass)
4846 {
4847  VALUE group;
4848  struct thgroup *data;
4849 
4850  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4851  data->enclosed = 0;
4852 
4853  return group;
4854 }
4855 
4856 /*
4857  * call-seq:
4858  * thgrp.list -> array
4859  *
4860  * Returns an array of all existing Thread objects that belong to this group.
4861  *
4862  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4863  */
4864 
4865 static VALUE
4866 thgroup_list(VALUE group)
4867 {
4868  VALUE ary = rb_ary_new();
4869  rb_thread_t *th = 0;
4870  rb_ractor_t *r = GET_RACTOR();
4871 
4872  ccan_list_for_each(&r->threads.set, th, lt_node) {
4873  if (th->thgroup == group) {
4874  rb_ary_push(ary, th->self);
4875  }
4876  }
4877  return ary;
4878 }
4879 
4880 
4881 /*
4882  * call-seq:
4883  * thgrp.enclose -> thgrp
4884  *
4885  * Prevents threads from being added to or removed from the receiving
4886  * ThreadGroup.
4887  *
4888  * New threads can still be started in an enclosed ThreadGroup.
4889  *
4890  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4891  * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4892  * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4893  * tg.add thr
4894  * #=> ThreadError: can't move from the enclosed thread group
4895  */
4896 
4897 static VALUE
4898 thgroup_enclose(VALUE group)
4899 {
4900  struct thgroup *data;
4901 
4902  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4903  data->enclosed = 1;
4904 
4905  return group;
4906 }
4907 
4908 
4909 /*
4910  * call-seq:
4911  * thgrp.enclosed? -> true or false
4912  *
4913  * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4914  */
4915 
4916 static VALUE
4917 thgroup_enclosed_p(VALUE group)
4918 {
4919  struct thgroup *data;
4920 
4921  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4922  return RBOOL(data->enclosed);
4923 }
4924 
4925 
4926 /*
4927  * call-seq:
4928  * thgrp.add(thread) -> thgrp
4929  *
4930  * Adds the given +thread+ to this group, removing it from any other
4931  * group to which it may have previously been a member.
4932  *
4933  * puts "Initial group is #{ThreadGroup::Default.list}"
4934  * tg = ThreadGroup.new
4935  * t1 = Thread.new { sleep }
4936  * t2 = Thread.new { sleep }
4937  * puts "t1 is #{t1}"
4938  * puts "t2 is #{t2}"
4939  * tg.add(t1)
4940  * puts "Initial group now #{ThreadGroup::Default.list}"
4941  * puts "tg group now #{tg.list}"
4942  *
4943  * This will produce:
4944  *
4945  * Initial group is #<Thread:0x401bdf4c>
4946  * t1 is #<Thread:0x401b3c90>
4947  * t2 is #<Thread:0x401b3c18>
4948  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4949  * tg group now #<Thread:0x401b3c90>
4950  */
4951 
4952 static VALUE
4953 thgroup_add(VALUE group, VALUE thread)
4954 {
4955  rb_thread_t *target_th = rb_thread_ptr(thread);
4956  struct thgroup *data;
4957 
4958  if (OBJ_FROZEN(group)) {
4959  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4960  }
4961  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4962  if (data->enclosed) {
4963  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4964  }
4965 
4966  if (OBJ_FROZEN(target_th->thgroup)) {
4967  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4968  }
4969  TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4970  if (data->enclosed) {
4972  "can't move from the enclosed thread group");
4973  }
4974 
4975  target_th->thgroup = group;
4976  return group;
4977 }
4978 
4979 /*
4980  * Document-class: ThreadShield
4981  */
4982 static void
4983 thread_shield_mark(void *ptr)
4984 {
4985  rb_gc_mark((VALUE)ptr);
4986 }
4987 
4988 static const rb_data_type_t thread_shield_data_type = {
4989  "thread_shield",
4990  {thread_shield_mark, 0, 0,},
4991  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4992 };
4993 
4994 static VALUE
4995 thread_shield_alloc(VALUE klass)
4996 {
4997  return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4998 }
4999 
5000 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5001 #define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5002 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5003 #define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5004 STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5005 static inline unsigned int
5006 rb_thread_shield_waiting(VALUE b)
5007 {
5008  return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5009 }
5010 
5011 static inline void
5012 rb_thread_shield_waiting_inc(VALUE b)
5013 {
5014  unsigned int w = rb_thread_shield_waiting(b);
5015  w++;
5016  if (w > THREAD_SHIELD_WAITING_MAX)
5017  rb_raise(rb_eRuntimeError, "waiting count overflow");
5018  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5019  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5020 }
5021 
5022 static inline void
5023 rb_thread_shield_waiting_dec(VALUE b)
5024 {
5025  unsigned int w = rb_thread_shield_waiting(b);
5026  if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5027  w--;
5028  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5029  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5030 }
5031 
5032 VALUE
5033 rb_thread_shield_new(void)
5034 {
5035  VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5036  rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5037  return thread_shield;
5038 }
5039 
5040 bool
5041 rb_thread_shield_owned(VALUE self)
5042 {
5043  VALUE mutex = GetThreadShieldPtr(self);
5044  if (!mutex) return false;
5045 
5046  rb_mutex_t *m = mutex_ptr(mutex);
5047 
5048  return m->fiber == GET_EC()->fiber_ptr;
5049 }
5050 
5051 /*
5052  * Wait a thread shield.
5053  *
5054  * Returns
5055  * true: acquired the thread shield
5056  * false: the thread shield was destroyed and no other threads waiting
5057  * nil: the thread shield was destroyed but still in use
5058  */
5059 VALUE
5060 rb_thread_shield_wait(VALUE self)
5061 {
5062  VALUE mutex = GetThreadShieldPtr(self);
5063  rb_mutex_t *m;
5064 
5065  if (!mutex) return Qfalse;
5066  m = mutex_ptr(mutex);
5067  if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5068  rb_thread_shield_waiting_inc(self);
5069  rb_mutex_lock(mutex);
5070  rb_thread_shield_waiting_dec(self);
5071  if (DATA_PTR(self)) return Qtrue;
5072  rb_mutex_unlock(mutex);
5073  return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5074 }
5075 
5076 static VALUE
5077 thread_shield_get_mutex(VALUE self)
5078 {
5079  VALUE mutex = GetThreadShieldPtr(self);
5080  if (!mutex)
5081  rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5082  return mutex;
5083 }
5084 
5085 /*
5086  * Release a thread shield, and return true if it has waiting threads.
5087  */
5088 VALUE
5089 rb_thread_shield_release(VALUE self)
5090 {
5091  VALUE mutex = thread_shield_get_mutex(self);
5092  rb_mutex_unlock(mutex);
5093  return RBOOL(rb_thread_shield_waiting(self) > 0);
5094 }
5095 
5096 /*
5097  * Release and destroy a thread shield, and return true if it has waiting threads.
5098  */
5099 VALUE
5100 rb_thread_shield_destroy(VALUE self)
5101 {
5102  VALUE mutex = thread_shield_get_mutex(self);
5103  DATA_PTR(self) = 0;
5104  rb_mutex_unlock(mutex);
5105  return RBOOL(rb_thread_shield_waiting(self) > 0);
5106 }
5107 
5108 static VALUE
5109 threadptr_recursive_hash(rb_thread_t *th)
5110 {
5111  return th->ec->local_storage_recursive_hash;
5112 }
5113 
5114 static void
5115 threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5116 {
5117  th->ec->local_storage_recursive_hash = hash;
5118 }
5119 
5120 ID rb_frame_last_func(void);
5121 
5122 /*
5123  * Returns the current "recursive list" used to detect recursion.
5124  * This list is a hash table, unique for the current thread and for
5125  * the current __callee__.
5126  */
5127 
5128 static VALUE
5129 recursive_list_access(VALUE sym)
5130 {
5131  rb_thread_t *th = GET_THREAD();
5132  VALUE hash = threadptr_recursive_hash(th);
5133  VALUE list;
5134  if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5135  hash = rb_ident_hash_new();
5136  threadptr_recursive_hash_set(th, hash);
5137  list = Qnil;
5138  }
5139  else {
5140  list = rb_hash_aref(hash, sym);
5141  }
5142  if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5143  list = rb_ident_hash_new();
5144  rb_hash_aset(hash, sym, list);
5145  }
5146  return list;
5147 }
5148 
5149 /*
5150  * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5151  * in the recursion list.
5152  * Assumes the recursion list is valid.
5153  */
5154 
5155 static bool
5156 recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5157 {
5158 #if SIZEOF_LONG == SIZEOF_VOIDP
5159  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5160 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5161  #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5162  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5163 #endif
5164 
5165  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5166  if (UNDEF_P(pair_list))
5167  return false;
5168  if (paired_obj_id) {
5169  if (!RB_TYPE_P(pair_list, T_HASH)) {
5170  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5171  return false;
5172  }
5173  else {
5174  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5175  return false;
5176  }
5177  }
5178  return true;
5179 }
5180 
5181 /*
5182  * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5183  * For a single obj, it sets list[obj] to Qtrue.
5184  * For a pair, it sets list[obj] to paired_obj_id if possible,
5185  * otherwise list[obj] becomes a hash like:
5186  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5187  * Assumes the recursion list is valid.
5188  */
5189 
5190 static void
5191 recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5192 {
5193  VALUE pair_list;
5194 
5195  if (!paired_obj) {
5196  rb_hash_aset(list, obj, Qtrue);
5197  }
5198  else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5199  rb_hash_aset(list, obj, paired_obj);
5200  }
5201  else {
5202  if (!RB_TYPE_P(pair_list, T_HASH)){
5203  VALUE other_paired_obj = pair_list;
5204  pair_list = rb_hash_new();
5205  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5206  rb_hash_aset(list, obj, pair_list);
5207  }
5208  rb_hash_aset(pair_list, paired_obj, Qtrue);
5209  }
5210 }
5211 
5212 /*
5213  * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5214  * For a pair, if list[obj] is a hash, then paired_obj_id is
5215  * removed from the hash and no attempt is made to simplify
5216  * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5217  * Assumes the recursion list is valid.
5218  */
5219 
5220 static int
5221 recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5222 {
5223  if (paired_obj) {
5224  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5225  if (UNDEF_P(pair_list)) {
5226  return 0;
5227  }
5228  if (RB_TYPE_P(pair_list, T_HASH)) {
5229  rb_hash_delete_entry(pair_list, paired_obj);
5230  if (!RHASH_EMPTY_P(pair_list)) {
5231  return 1; /* keep hash until is empty */
5232  }
5233  }
5234  }
5235  rb_hash_delete_entry(list, obj);
5236  return 1;
5237 }
5239 struct exec_recursive_params {
5240  VALUE (*func) (VALUE, VALUE, int);
5241  VALUE list;
5242  VALUE obj;
5243  VALUE pairid;
5244  VALUE arg;
5245 };
5246 
5247 static VALUE
5248 exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5249 {
5250  struct exec_recursive_params *p = (void *)data;
5251  return (*p->func)(p->obj, p->arg, FALSE);
5252 }
5253 
5254 /*
5255  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5256  * current method is called recursively on obj, or on the pair <obj, pairid>
5257  * If outer is 0, then the innermost func will be called with recursive set
5258  * to true, otherwise the outermost func will be called. In the latter case,
5259  * all inner func are short-circuited by throw.
5260  * Implementation details: the value thrown is the recursive list which is
5261  * proper to the current method and unlikely to be caught anywhere else.
5262  * list[recursive_key] is used as a flag for the outermost call.
5263  */
5264 
5265 static VALUE
5266 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5267 {
5268  VALUE result = Qundef;
5269  const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5270  struct exec_recursive_params p;
5271  int outermost;
5272  p.list = recursive_list_access(sym);
5273  p.obj = obj;
5274  p.pairid = pairid;
5275  p.arg = arg;
5276  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5277 
5278  if (recursive_check(p.list, p.obj, pairid)) {
5279  if (outer && !outermost) {
5280  rb_throw_obj(p.list, p.list);
5281  }
5282  return (*func)(obj, arg, TRUE);
5283  }
5284  else {
5285  enum ruby_tag_type state;
5286 
5287  p.func = func;
5288 
5289  if (outermost) {
5290  recursive_push(p.list, ID2SYM(recursive_key), 0);
5291  recursive_push(p.list, p.obj, p.pairid);
5292  result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5293  if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5294  if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5295  if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5296  if (result == p.list) {
5297  result = (*func)(obj, arg, TRUE);
5298  }
5299  }
5300  else {
5301  volatile VALUE ret = Qundef;
5302  recursive_push(p.list, p.obj, p.pairid);
5303  EC_PUSH_TAG(GET_EC());
5304  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5305  ret = (*func)(obj, arg, FALSE);
5306  }
5307  EC_POP_TAG();
5308  if (!recursive_pop(p.list, p.obj, p.pairid)) {
5309  goto invalid;
5310  }
5311  if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5312  result = ret;
5313  }
5314  }
5315  *(volatile struct exec_recursive_params *)&p;
5316  return result;
5317 
5318  invalid:
5319  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5320  "for %+"PRIsVALUE" in %+"PRIsVALUE,
5321  sym, rb_thread_current());
5323 }
5324 
5325 /*
5326  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5327  * current method is called recursively on obj
5328  */
5329 
5330 VALUE
5331 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5332 {
5333  return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5334 }
5335 
5336 /*
5337  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5338  * current method is called recursively on the ordered pair <obj, paired_obj>
5339  */
5340 
5341 VALUE
5342 rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5343 {
5344  return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5345 }
5346 
5347 /*
5348  * If recursion is detected on the current method and obj, the outermost
5349  * func will be called with (obj, arg, true). All inner func will be
5350  * short-circuited using throw.
5351  */
5352 
5353 VALUE
5354 rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5355 {
5356  return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5357 }
5358 
5359 VALUE
5360 rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5361 {
5362  return exec_recursive(func, obj, 0, arg, 1, mid);
5363 }
5364 
5365 /*
5366  * If recursion is detected on the current method, obj and paired_obj,
5367  * the outermost func will be called with (obj, arg, true). All inner
5368  * func will be short-circuited using throw.
5369  */
5370 
5371 VALUE
5372 rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5373 {
5374  return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5375 }
5376 
5377 /*
5378  * call-seq:
5379  * thread.backtrace -> array or nil
5380  *
5381  * Returns the current backtrace of the target thread.
5382  *
5383  */
5384 
5385 static VALUE
5386 rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5387 {
5388  return rb_vm_thread_backtrace(argc, argv, thval);
5389 }
5390 
5391 /* call-seq:
5392  * thread.backtrace_locations(*args) -> array or nil
5393  *
5394  * Returns the execution stack for the target thread---an array containing
5395  * backtrace location objects.
5396  *
5397  * See Thread::Backtrace::Location for more information.
5398  *
5399  * This method behaves similarly to Kernel#caller_locations except it applies
5400  * to a specific thread.
5401  */
5402 static VALUE
5403 rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5404 {
5405  return rb_vm_thread_backtrace_locations(argc, argv, thval);
5406 }
5407 
5408 void
5409 Init_Thread_Mutex(void)
5410 {
5411  rb_thread_t *th = GET_THREAD();
5412 
5413  rb_native_mutex_initialize(&th->vm->workqueue_lock);
5414  rb_native_mutex_initialize(&th->interrupt_lock);
5415 }
5416 
5417 /*
5418  * Document-class: ThreadError
5419  *
5420  * Raised when an invalid operation is attempted on a thread.
5421  *
5422  * For example, when no other thread has been started:
5423  *
5424  * Thread.stop
5425  *
5426  * This will raises the following exception:
5427  *
5428  * ThreadError: stopping only thread
5429  * note: use sleep to stop forever
5430  */
5431 
5432 void
5433 Init_Thread(void)
5434 {
5435  VALUE cThGroup;
5436  rb_thread_t *th = GET_THREAD();
5437 
5438  sym_never = ID2SYM(rb_intern_const("never"));
5439  sym_immediate = ID2SYM(rb_intern_const("immediate"));
5440  sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5441 
5442  rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5443  rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5444  rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5445  rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5446  rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5447  rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5448  rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5449  rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5450  rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5451  rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5452  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5453  rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5454  rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5455  rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5456  rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5457  rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5458  rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5459  rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5460  rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5461 
5462  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5463  rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5464  rb_define_method(rb_cThread, "join", thread_join_m, -1);
5465  rb_define_method(rb_cThread, "value", thread_value, 0);
5467  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5471  rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5472  rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5473  rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5474  rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5475  rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5476  rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5477  rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5478  rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5479  rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5480  rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5481  rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5482  rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5483  rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5484  rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5485  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5486  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5487  rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5488  rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5489  rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5490  rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5491  rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5492 
5493  rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5494  rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5495  rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5496  rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5497  rb_define_alias(rb_cThread, "inspect", "to_s");
5498 
5499  rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5500  "stream closed in another thread");
5501 
5502  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5503  rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5504  rb_define_method(cThGroup, "list", thgroup_list, 0);
5505  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5506  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5507  rb_define_method(cThGroup, "add", thgroup_add, 1);
5508 
5509  {
5510  th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5511  rb_define_const(cThGroup, "Default", th->thgroup);
5512  }
5513 
5515 
5516  /* init thread core */
5517  {
5518  /* main thread setting */
5519  {
5520  /* acquire global vm lock */
5521 #ifdef HAVE_PTHREAD_NP_H
5522  VM_ASSERT(TH_SCHED(th)->running == th);
5523 #endif
5524  // thread_sched_to_running() should not be called because
5525  // it assumes blocked by thread_sched_to_waiting().
5526  // thread_sched_to_running(sched, th);
5527 
5528  th->pending_interrupt_queue = rb_ary_hidden_new(0);
5529  th->pending_interrupt_queue_checked = 0;
5530  th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5531  }
5532  }
5533 
5534  rb_thread_create_timer_thread();
5535 
5536  Init_thread_sync();
5537 
5538  // TODO: Suppress unused function warning for now
5539  // if (0) rb_thread_sched_destroy(NULL);
5540 }
5541 
5542 int
5544 {
5545  rb_thread_t *th = ruby_thread_from_native();
5546 
5547  return th != 0;
5548 }
5549 
5550 #ifdef NON_SCALAR_THREAD_ID
5551  #define thread_id_str(th) (NULL)
5552 #else
5553  #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5554 #endif
5555 
5556 static void
5557 debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5558 {
5559  rb_thread_t *th = 0;
5560  VALUE sep = rb_str_new_cstr("\n ");
5561 
5562  rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5563  rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5564  (void *)GET_THREAD(), (void *)r->threads.main);
5565 
5566  ccan_list_for_each(&r->threads.set, th, lt_node) {
5567  rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5568  "native:%p int:%u",
5569  th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5570 
5571  if (th->locking_mutex) {
5572  rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5573  rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5574  (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5575  }
5576 
5577  {
5578  struct rb_waiting_list *list = th->join_list;
5579  while (list) {
5580  rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5581  list = list->next;
5582  }
5583  }
5584  rb_str_catf(msg, "\n ");
5585  rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5586  rb_str_catf(msg, "\n");
5587  }
5588 }
5589 
5590 static void
5591 rb_check_deadlock(rb_ractor_t *r)
5592 {
5593  if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5594 
5595 #ifdef RUBY_THREAD_PTHREAD_H
5596  if (r->threads.sched.readyq_cnt > 0) return;
5597 #endif
5598 
5599  int sleeper_num = rb_ractor_sleeper_thread_num(r);
5600  int ltnum = rb_ractor_living_thread_num(r);
5601 
5602  if (ltnum > sleeper_num) return;
5603  if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5604 
5605  int found = 0;
5606  rb_thread_t *th = NULL;
5607 
5608  ccan_list_for_each(&r->threads.set, th, lt_node) {
5609  if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5610  found = 1;
5611  }
5612  else if (th->locking_mutex) {
5613  rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5614  if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5615  found = 1;
5616  }
5617  }
5618  if (found)
5619  break;
5620  }
5621 
5622  if (!found) {
5623  VALUE argv[2];
5624  argv[0] = rb_eFatal;
5625  argv[1] = rb_str_new2("No live threads left. Deadlock?");
5626  debug_deadlock_check(r, argv[1]);
5627  rb_ractor_sleeper_threads_dec(GET_RACTOR());
5628  rb_threadptr_raise(r->threads.main, 2, argv);
5629  }
5630 }
5631 
5632 // Used for VM memsize reporting. Returns the size of a list of waiting_fd
5633 // structs. Defined here because the struct definition lives here as well.
5634 size_t
5635 rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5636 {
5637  struct waiting_fd *waitfd = 0;
5638  size_t size = 0;
5639 
5640  ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5641  size += sizeof(struct waiting_fd);
5642  }
5643 
5644  return size;
5645 }
5646 
5647 static void
5648 update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5649 {
5650  const rb_control_frame_t *cfp = GET_EC()->cfp;
5651  VALUE coverage = rb_iseq_coverage(cfp->iseq);
5652  if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5653  VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5654  if (lines) {
5655  long line = rb_sourceline() - 1;
5656  long count;
5657  VALUE num;
5658  void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5659  if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5660  rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5661  rb_ary_push(lines, LONG2FIX(line + 1));
5662  return;
5663  }
5664  if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5665  return;
5666  }
5667  num = RARRAY_AREF(lines, line);
5668  if (!FIXNUM_P(num)) return;
5669  count = FIX2LONG(num) + 1;
5670  if (POSFIXABLE(count)) {
5671  RARRAY_ASET(lines, line, LONG2FIX(count));
5672  }
5673  }
5674  }
5675 }
5676 
5677 static void
5678 update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5679 {
5680  const rb_control_frame_t *cfp = GET_EC()->cfp;
5681  VALUE coverage = rb_iseq_coverage(cfp->iseq);
5682  if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5683  VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5684  if (branches) {
5685  long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5686  long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5687  VALUE counters = RARRAY_AREF(branches, 1);
5688  VALUE num = RARRAY_AREF(counters, idx);
5689  count = FIX2LONG(num) + 1;
5690  if (POSFIXABLE(count)) {
5691  RARRAY_ASET(counters, idx, LONG2FIX(count));
5692  }
5693  }
5694  }
5695 }
5696 
5697 const rb_method_entry_t *
5698 rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5699 {
5700  VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5701 
5702  if (!me->def) return NULL; // negative cme
5703 
5704  retry:
5705  switch (me->def->type) {
5706  case VM_METHOD_TYPE_ISEQ: {
5707  const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5708  rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5709  path = rb_iseq_path(iseq);
5710  beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5711  beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5712  end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5713  end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5714  break;
5715  }
5716  case VM_METHOD_TYPE_BMETHOD: {
5717  const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5718  if (iseq) {
5719  rb_iseq_location_t *loc;
5720  rb_iseq_check(iseq);
5721  path = rb_iseq_path(iseq);
5722  loc = &ISEQ_BODY(iseq)->location;
5723  beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5724  beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5725  end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5726  end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5727  break;
5728  }
5729  return NULL;
5730  }
5731  case VM_METHOD_TYPE_ALIAS:
5732  me = me->def->body.alias.original_me;
5733  goto retry;
5734  case VM_METHOD_TYPE_REFINED:
5735  me = me->def->body.refined.orig_me;
5736  if (!me) return NULL;
5737  goto retry;
5738  default:
5739  return NULL;
5740  }
5741 
5742  /* found */
5743  if (RB_TYPE_P(path, T_ARRAY)) {
5744  path = rb_ary_entry(path, 1);
5745  if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5746  }
5747  if (resolved_location) {
5748  resolved_location[0] = path;
5749  resolved_location[1] = beg_pos_lineno;
5750  resolved_location[2] = beg_pos_column;
5751  resolved_location[3] = end_pos_lineno;
5752  resolved_location[4] = end_pos_column;
5753  }
5754  return me;
5755 }
5756 
5757 static void
5758 update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5759 {
5760  const rb_control_frame_t *cfp = GET_EC()->cfp;
5761  const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5762  const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5763  VALUE rcount;
5764  long count;
5765 
5766  me = rb_resolve_me_location(me, 0);
5767  if (!me) return;
5768 
5769  rcount = rb_hash_aref(me2counter, (VALUE) me);
5770  count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5771  if (POSFIXABLE(count)) {
5772  rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5773  }
5774 }
5775 
5776 VALUE
5777 rb_get_coverages(void)
5778 {
5779  return GET_VM()->coverages;
5780 }
5781 
5782 int
5783 rb_get_coverage_mode(void)
5784 {
5785  return GET_VM()->coverage_mode;
5786 }
5787 
5788 void
5789 rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5790 {
5791  GET_VM()->coverages = coverages;
5792  GET_VM()->me2counter = me2counter;
5793  GET_VM()->coverage_mode = mode;
5794 }
5795 
5796 void
5797 rb_resume_coverages(void)
5798 {
5799  int mode = GET_VM()->coverage_mode;
5800  VALUE me2counter = GET_VM()->me2counter;
5801  rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5802  if (mode & COVERAGE_TARGET_BRANCHES) {
5803  rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5804  }
5805  if (mode & COVERAGE_TARGET_METHODS) {
5806  rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5807  }
5808 }
5809 
5810 void
5811 rb_suspend_coverages(void)
5812 {
5813  rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5814  if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5815  rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5816  }
5817  if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5818  rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5819  }
5820 }
5821 
5822 /* Make coverage arrays empty so old covered files are no longer tracked. */
5823 void
5824 rb_reset_coverages(void)
5825 {
5826  rb_clear_coverages();
5827  rb_iseq_remove_coverage_all();
5828  GET_VM()->coverages = Qfalse;
5829 }
5830 
5831 VALUE
5832 rb_default_coverage(int n)
5833 {
5834  VALUE coverage = rb_ary_hidden_new_fill(3);
5835  VALUE lines = Qfalse, branches = Qfalse;
5836  int mode = GET_VM()->coverage_mode;
5837 
5838  if (mode & COVERAGE_TARGET_LINES) {
5839  lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5840  }
5841  RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5842 
5843  if (mode & COVERAGE_TARGET_BRANCHES) {
5844  branches = rb_ary_hidden_new_fill(2);
5845  /* internal data structures for branch coverage:
5846  *
5847  * { branch base node =>
5848  * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5849  * branch target id =>
5850  * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5851  * ...
5852  * }],
5853  * ...
5854  * }
5855  *
5856  * Example:
5857  * { NODE_CASE =>
5858  * [1, 0, 4, 3, {
5859  * NODE_WHEN => [2, 8, 2, 9, 0],
5860  * NODE_WHEN => [3, 8, 3, 9, 1],
5861  * ...
5862  * }],
5863  * ...
5864  * }
5865  */
5866  VALUE structure = rb_hash_new();
5867  rb_obj_hide(structure);
5868  RARRAY_ASET(branches, 0, structure);
5869  /* branch execution counters */
5870  RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5871  }
5872  RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5873 
5874  return coverage;
5875 }
5876 
5877 static VALUE
5878 uninterruptible_exit(VALUE v)
5879 {
5880  rb_thread_t *cur_th = GET_THREAD();
5881  rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5882 
5883  cur_th->pending_interrupt_queue_checked = 0;
5884  if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5885  RUBY_VM_SET_INTERRUPT(cur_th->ec);
5886  }
5887  return Qnil;
5888 }
5889 
5890 VALUE
5891 rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5892 {
5893  VALUE interrupt_mask = rb_ident_hash_new();
5894  rb_thread_t *cur_th = GET_THREAD();
5895 
5896  rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5897  OBJ_FREEZE(interrupt_mask);
5898  rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5899 
5900  VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5901 
5902  RUBY_VM_CHECK_INTS(cur_th->ec);
5903  return ret;
5904 }
5905 
5906 static void
5907 thread_specific_storage_alloc(rb_thread_t *th)
5908 {
5909  VM_ASSERT(th->specific_storage == NULL);
5910 
5911  if (UNLIKELY(specific_key_count > 0)) {
5912  th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5913  }
5914 }
5915 
5916 rb_internal_thread_specific_key_t
5918 {
5919  rb_vm_t *vm = GET_VM();
5920 
5921  if (specific_key_count == 0 && vm->ractor.cnt > 1) {
5922  rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
5923  }
5924  else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
5925  rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5926  }
5927  else {
5928  rb_internal_thread_specific_key_t key = specific_key_count++;
5929 
5930  if (key == 0) {
5931  // allocate
5932  rb_ractor_t *cr = GET_RACTOR();
5933  rb_thread_t *th;
5934 
5935  ccan_list_for_each(&cr->threads.set, th, lt_node) {
5936  thread_specific_storage_alloc(th);
5937  }
5938  }
5939  return key;
5940  }
5941 }
5942 
5943 // async and native thread safe.
5944 void *
5945 rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
5946 {
5947  rb_thread_t *th = DATA_PTR(thread_val);
5948 
5949  VM_ASSERT(rb_thread_ptr(thread_val) == th);
5950  VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5951  VM_ASSERT(th->specific_storage);
5952 
5953  return th->specific_storage[key];
5954 }
5955 
5956 // async and native thread safe.
5957 void
5958 rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
5959 {
5960  rb_thread_t *th = DATA_PTR(thread_val);
5961 
5962  VM_ASSERT(rb_thread_ptr(thread_val) == th);
5963  VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5964  VM_ASSERT(th->specific_storage);
5965 
5966  th->specific_storage[key] = data;
5967 }
5968 
5969 // interrupt_exec
5971 struct rb_interrupt_exec_task {
5972  struct ccan_list_node node;
5973 
5974  rb_interrupt_exec_func_t *func;
5975  void *data;
5976  enum rb_interrupt_exec_flag flags;
5977 };
5978 
5979 void
5980 rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
5981 {
5982  struct rb_interrupt_exec_task *task;
5983 
5984  ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
5985  if (task->flags & rb_interrupt_exec_flag_value_data) {
5986  rb_gc_mark((VALUE)task->data);
5987  }
5988  }
5989 }
5990 
5991 // native thread safe
5992 // th should be available
5993 void
5994 rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
5995 {
5996  // should not use ALLOC
5997  struct rb_interrupt_exec_task *task = ALLOC(struct rb_interrupt_exec_task);
5998  *task = (struct rb_interrupt_exec_task) {
5999  .flags = flags,
6000  .func = func,
6001  .data = data,
6002  };
6003 
6004  rb_native_mutex_lock(&th->interrupt_lock);
6005  {
6006  ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6007  threadptr_interrupt_locked(th, true);
6008  }
6009  rb_native_mutex_unlock(&th->interrupt_lock);
6010 }
6011 
6012 static void
6013 threadptr_interrupt_exec_exec(rb_thread_t *th)
6014 {
6015  while (1) {
6016  struct rb_interrupt_exec_task *task;
6017 
6018  rb_native_mutex_lock(&th->interrupt_lock);
6019  {
6020  task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6021  }
6022  rb_native_mutex_unlock(&th->interrupt_lock);
6023 
6024  if (task) {
6025  (*task->func)(task->data);
6026  ruby_xfree(task);
6027  }
6028  else {
6029  break;
6030  }
6031  }
6032 }
6033 
6034 static void
6035 threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6036 {
6037  rb_native_mutex_lock(&th->interrupt_lock);
6038  {
6039  struct rb_interrupt_exec_task *task;
6040 
6041  while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6042  ruby_xfree(task);
6043  }
6044  }
6045  rb_native_mutex_unlock(&th->interrupt_lock);
6046 }
6049  rb_interrupt_exec_func_t *func;
6050  void *data;
6051 };
6052 
6053 static VALUE
6054 interrupt_ractor_new_thread_func(void *data)
6055 {
6057  ruby_xfree(data);
6058 
6059  d.func(d.data);
6060  return Qnil;
6061 }
6062 
6063 static VALUE
6064 interrupt_ractor_func(void *data)
6065 {
6066  rb_thread_create(interrupt_ractor_new_thread_func, data);
6067  return Qnil;
6068 }
6069 
6070 // native thread safe
6071 // func/data should be native thread safe
6072 void
6073 rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6074  rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6075 {
6077 
6078  d->func = func;
6079  d->data = data;
6080  rb_thread_t *main_th = target_r->threads.main;
6081  rb_threadptr_interrupt_exec(main_th, interrupt_ractor_func, d, flags);
6082 
6083  // TODO MEMO: we can create a new thread in a ractor, but not sure how to do that now.
6084 }
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition: assert.h:199
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:685
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition: event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition: vm_trace.c:315
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition: event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition: event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition: event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition: event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition: fl_type.h:606
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:980
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:2345
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition: eval.c:1139
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
Definition: class.c:2142
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition: eval.c:929
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:916
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition: string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition: memory.h:395
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition: xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition: fl_type.h:137
#define xrealloc
Old name of ruby_xrealloc.
Definition: xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition: fl_type.h:135
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition: assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition: globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition: xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition: long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition: int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition: memory.h:396
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition: value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition: int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition: int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition: long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition: value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition: fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition: value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:288
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition: error.h:486
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition: error.c:3635
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:676
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:1089
VALUE rb_eSystemExit
SystemExit exception.
Definition: error.c:1401
VALUE rb_eIOError
IOError exception.
Definition: io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition: error.c:1405
VALUE rb_eTypeError
TypeError exception.
Definition: error.c:1408
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition: error.c:3959
VALUE rb_eFatal
fatal exception.
Definition: error.c:1404
VALUE rb_eRuntimeError
RuntimeError exception.
Definition: error.c:1406
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition: error.c:466
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition: error.c:1446
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1409
VALUE rb_eException
Mother of all exceptions.
Definition: error.c:1400
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1045
VALUE rb_eThreadError
ThreadError exception.
Definition: eval.c:934
void rb_exit(int status)
Terminates the current execution context.
Definition: process.c:4451
VALUE rb_eSignal
SignalException exception.
Definition: error.c:1403
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition: object.c:2093
VALUE rb_cInteger
Module class.
Definition: numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:104
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition: object.c:247
VALUE rb_cThread
Thread class.
Definition: vm.c:543
VALUE rb_cModule
Module class.
Definition: object.c:67
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition: object.c:3686
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition: object.c:865
static bool rb_enc_asciicompat(rb_encoding *enc)
Queries if the passed encoding is in some sense compatible with ASCII.
Definition: encoding.h:768
rb_encoding * rb_enc_get(VALUE obj)
Identical to rb_enc_get_index(), except the return type.
Definition: encoding.c:1028
static const char * rb_enc_name(rb_encoding *enc)
Queries the (canonical) name of the passed encoding.
Definition: encoding.h:417
void rb_gc_mark(VALUE obj)
Marks an object.
Definition: gc.c:2109
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
Definition: array.c:1496
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
Definition: array.c:2777
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
Definition: array.c:4108
VALUE rb_ary_new(void)
Allocates a new, empty array.
Definition: array.c:747
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
Definition: array.c:1431
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
Definition: array.c:859
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
Definition: array.c:4735
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
Definition: array.c:1384
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
Definition: array.c:1737
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
Definition: array.c:2891
VALUE rb_fiber_current(void)
Queries the fiber which is calling this function.
Definition: cont.c:2555
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition: error.h:284
VALUE rb_make_exception(int argc, const VALUE *argv)
Constructs an exception object from the list of arguments, in a manner similar to Ruby's raise.
Definition: eval.c:883
void rb_obj_call_init_kw(VALUE, int, const VALUE *, int)
Identical to rb_obj_call_init(), except you can specify how to handle the last element of the given a...
Definition: eval.c:1728
void rb_hash_foreach(VALUE hash, int(*func)(VALUE key, VALUE val, VALUE arg), VALUE arg)
Iterates over a hash.
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
Identical to rb_hash_lookup(), except you can specify what to return on misshits.
Definition: hash.c:2086
VALUE rb_hash_aref(VALUE hash, VALUE key)
Queries the given key in the given hash table.
Definition: hash.c:2073
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2893
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Identical to rb_hash_aref(), except it always returns RUBY_Qnil for misshits.
Definition: hash.c:2099
VALUE rb_hash_new(void)
Creates a new, empty hash object.
Definition: hash.c:1475
VALUE rb_memory_id(VALUE obj)
Identical to rb_obj_id(), except it hesitates from allocating a new instance of rb_cInteger.
Definition: gc.c:1706
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition: proc.c:813
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition: random.c:1787
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition: string.c:1461
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition: string.c:3919
VALUE rb_str_new_cstr(const char *ptr)
Identical to rb_str_new(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:1074
VALUE rb_str_cat_cstr(VALUE dst, const char *src)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:3455
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition: thread.c:1449
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition: thread.c:3571
VALUE rb_mutex_new(void)
Creates a mutex.
Definition: thread_sync.c:191
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition: thread.c:2764
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition: thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition: thread.c:3003
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition: thread.c:1379
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition: thread.c:2705
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition: thread.c:1411
VALUE rb_thread_stop(void)
Stops the current thread.
Definition: thread.c:2915
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
Definition: thread_sync.c:567
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition: thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition: thread.c:4805
VALUE rb_thread_create(VALUE(*f)(void *g), void *g)
Creates a Ruby thread that is backended by a C function.
void rb_thread_check_ints(void)
Checks for interrupts.
Definition: thread.c:1432
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition: thread.c:2906
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition: thread.c:2859
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
Definition: thread_sync.c:505
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition: thread.c:1386
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition: thread.c:4800
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition: thread.c:2982
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition: thread.c:3844
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition: thread.c:3719
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition: thread.c:1480
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition: thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition: thread.c:2868
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
Definition: thread_sync.c:432
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition: thread.c:1455
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition: time.c:1947
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition: time.c:2896
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition: variable.c:1859
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition: variable.c:1350
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition: variable.c:293
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition: vm.c:1876
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition: symbol.h:276
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition: symbol.c:1117
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition: string.c:12475
ID rb_to_id(VALUE str)
Identical to rb_intern(), except it takes an instance of rb_cString.
Definition: string.c:12465
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition: variable.c:3714
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition: io.c:190
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition: io.h:2
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition: thread.c:1676
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition: thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition: thread.c:5944
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition: thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition: thread.c:5957
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition: thread.c:5916
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition: thread.c:1902
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition: thread.h:73
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition: thread.c:1537
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition: sprintf.c:1217
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition: sprintf.c:1240
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition: iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition: vm_eval.c:1354
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition: vm_eval.c:2410
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition: largesize.h:209
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition: largesize.h:195
int rb_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
int rb_fd_isset(int fd, const rb_fdset_t *f)
Queries if the given FD is in the given set.
void rb_fd_clr(int fd, rb_fdset_t *f)
Releases a specific FD from the given fdset.
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
void rb_fd_zero(rb_fdset_t *f)
Wipes out the current set of FDs.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition: memory.h:367
#define ALLOCA_N(type, n)
Definition: memory.h:287
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition: memory.h:355
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition: memory.h:162
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition: posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition: posix.h:54
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition: rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition: rarray.h:386
#define RARRAY_AREF(a, i)
Definition: rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition: rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition: rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition: rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition: rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition: rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition: rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition: rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition: rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition: rtypeddata.h:449
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition: rtypeddata.h:497
int rb_errno(void)
Identical to system errno.
Definition: eval.c:2149
void rb_errno_set(int err)
Set the errno.
Definition: eval.c:2155
#define errno
Ractor-aware version of errno.
Definition: ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition: thread.c:5542
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition: sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition: scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition: scheduler.c:743
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition: scheduler.c:226
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition: scheduler.c:390
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition: scheduler.c:187
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition: scheduler.c:409
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition: thread.c:4333
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition: select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition: stdarg.h:35
Definition: method.h:62
This is the struct that holds necessary info for a struct.
Definition: rtypeddata.h:200
The data structure which wraps the fd_set bitmap used by select(2).
Definition: largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition: largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition: largesize.h:73
int capa
Maximum allowed number of FDs.
Definition: win32.h:50
Definition: method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition: thread.c:298
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition: thread.c:304
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition: thread.c:286
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition: thread.c:292
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:376
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition: gc.c:4299