2#include "ccan/list/list.h"
5static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
6static VALUE rb_eClosedQueueError;
10 rb_serial_t ec_serial;
13 struct ccan_list_head waitq;
21 struct ccan_list_node node;
27 if (rb_fiberptr_blocking(fiber)) {
40#define MUTEX_ALLOW_TRAP FL_USER1
43sync_wakeup(
struct ccan_list_head *head,
long max)
45 RUBY_DEBUG_LOG(
"max:%ld", max);
49 ccan_list_for_each_safe(head, cur, next, node) {
50 ccan_list_del_init(&cur->node);
52 if (cur->th->status != THREAD_KILLED) {
53 if (cur->th->scheduler !=
Qnil && cur->fiber) {
57 RUBY_DEBUG_LOG(
"target_th:%u", rb_th_serial(cur->th));
58 rb_threadptr_interrupt(cur->th);
59 cur->th->status = THREAD_RUNNABLE;
62 if (--max == 0)
return;
68wakeup_one(
struct ccan_list_head *head)
74wakeup_all(
struct ccan_list_head *head)
76 sync_wakeup(head, LONG_MAX);
79#if defined(HAVE_WORKING_FORK)
80static void rb_mutex_abandon_all(
rb_mutex_t *mutexes);
81static void rb_mutex_abandon_keeping_mutexes(
rb_thread_t *th);
82static void rb_mutex_abandon_locking_mutex(
rb_thread_t *th);
116 ccan_list_for_each(&mutex->waitq, w, node) {
128 return mutex->ec_serial != 0;
135 if (mutex_locked_p(mutex)) {
136 const char *err = rb_mutex_unlock_th(mutex, mutex->th, 0);
137 if (err) rb_bug(
"%s", err);
143mutex_memsize(
const void *ptr)
150 {NULL, mutex_free, mutex_memsize,},
151 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
165rb_obj_is_mutex(
VALUE obj)
171mutex_alloc(
VALUE klass)
178 ccan_list_head_init(&mutex->waitq);
185 return mutex_alloc(rb_cMutex);
193 return RBOOL(mutex_locked_p(mutex));
200 if (thread->keeping_mutexes) {
201 mutex->next_mutex = thread->keeping_mutexes;
204 thread->keeping_mutexes = mutex;
210 rb_mutex_t **keeping_mutexes = &thread->keeping_mutexes;
212 while (*keeping_mutexes && *keeping_mutexes != mutex) {
214 keeping_mutexes = &(*keeping_mutexes)->next_mutex;
217 if (*keeping_mutexes) {
218 *keeping_mutexes = mutex->next_mutex;
219 mutex->next_mutex = NULL;
227 mutex->ec_serial = ec_serial;
233 mutex_set_owner(mutex, th, ec_serial);
234 thread_mutex_insert(th, mutex);
240 if (mutex->ec_serial == 0) {
241 RUBY_DEBUG_LOG(
"%p ok", mutex);
243 mutex_locked(mutex, th, ec_serial);
247 RUBY_DEBUG_LOG(
"%p ng", mutex);
255 return RBOOL(do_mutex_trylock(mutex_ptr(self), ec->thread_ptr, rb_ec_serial(ec)));
261 return rb_mut_trylock(GET_EC(), self);
265mutex_owned_p(rb_serial_t ec_serial,
rb_mutex_t *mutex)
267 return RBOOL(mutex->ec_serial == ec_serial);
271call_rb_fiber_scheduler_block(
VALUE mutex)
277delete_from_waitq(
VALUE value)
297 args->mutex = mutex_ptr(mutex);
302do_mutex_lock(
struct mutex_args *args,
int interruptible_p)
304 VALUE self = args->self;
308 rb_serial_t ec_serial = rb_ec_serial(ec);
314 th->ec->interrupt_mask & TRAP_INTERRUPT_MASK) {
318 if (!do_mutex_trylock(mutex, th, ec_serial)) {
319 if (mutex->ec_serial == ec_serial) {
323 while (mutex->ec_serial != ec_serial) {
324 VM_ASSERT(mutex->ec_serial != 0);
327 if (scheduler !=
Qnil) {
331 .fiber = nonblocking_fiber(fiber)
334 ccan_list_add_tail(&mutex->waitq, &
sync_waiter.node);
338 if (!mutex->ec_serial) {
339 mutex_set_owner(mutex, th, ec_serial);
343 if (!th->vm->thread_ignore_deadlock && mutex->th == th) {
344 rb_raise(
rb_eThreadError,
"deadlock; lock already owned by another fiber belonging to the same thread");
350 .fiber = nonblocking_fiber(fiber),
353 RUBY_DEBUG_LOG(
"%p wait", mutex);
364 enum rb_thread_status prev_status = th->status;
365 th->status = THREAD_STOPPED_FOREVER;
366 rb_ractor_sleeper_threads_inc(th->ractor);
367 rb_check_deadlock(th->ractor);
370 th->locking_mutex = self;
372 ccan_list_add_tail(&mutex->waitq, &
sync_waiter.node);
374 native_sleep(th, NULL);
379 if (!mutex->ec_serial) {
380 mutex_set_owner(mutex, th, ec_serial);
383 rb_ractor_sleeper_threads_dec(th->ractor);
384 th->status = prev_status;
385 th->locking_mutex =
Qfalse;
387 RUBY_DEBUG_LOG(
"%p wakeup", mutex);
390 if (interruptible_p) {
393 if (mutex->ec_serial == ec_serial) {
395 mutex->ec_serial = 0;
397 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
398 if (!mutex->ec_serial) {
399 mutex_set_owner(mutex, th, ec_serial);
404 if (RUBY_VM_INTERRUPTED(th->ec)) {
406 if (saved_ints == 0) {
407 saved_ints = threadptr_get_interrupts(th);
411 threadptr_get_interrupts(th);
417 if (saved_ints) th->ec->interrupt_flag = saved_ints;
418 if (mutex->ec_serial == ec_serial) mutex_locked(mutex, th, ec_serial);
421 RUBY_DEBUG_LOG(
"%p locked", mutex);
424 if (mutex_owned_p(ec_serial, mutex) ==
Qfalse) rb_bug(
"do_mutex_lock: mutex is not owned.");
430mutex_lock_uninterruptible(
VALUE self)
433 mutex_args_init(&args, self);
434 return do_mutex_lock(&args, 0);
442 .mutex = mutex_ptr(self),
445 return do_mutex_lock(&args, 1);
452 mutex_args_init(&args, self);
453 return do_mutex_lock(&args, 1);
459 return mutex_owned_p(rb_ec_serial(ec), mutex_ptr(self));
463rb_mutex_owned_p(
VALUE self)
465 return rb_mut_owned_p(GET_EC(), self);
471 RUBY_DEBUG_LOG(
"%p", mutex);
473 if (mutex->ec_serial == 0) {
474 return "Attempt to unlock a mutex which is not locked";
476 else if (ec_serial && mutex->ec_serial != ec_serial) {
477 return "Attempt to unlock a mutex which is locked by another thread/fiber";
482 mutex->ec_serial = 0;
483 thread_mutex_remove(th, mutex);
485 ccan_list_for_each_safe(&mutex->waitq, cur, next, node) {
486 ccan_list_del_init(&cur->node);
488 if (cur->th->scheduler !=
Qnil && cur->fiber) {
493 switch (cur->th->status) {
494 case THREAD_RUNNABLE:
495 case THREAD_STOPPED_FOREVER:
496 RUBY_DEBUG_LOG(
"wakeup th:%u", rb_th_serial(cur->th));
497 rb_threadptr_interrupt(cur->th);
500 rb_bug(
"unexpected THREAD_STOPPED");
503 rb_bug(
"unexpected THREAD_KILLED");
520 err = rb_mutex_unlock_th(mutex, th, rb_ec_serial(args->ec));
525do_mutex_unlock_safe(
VALUE args)
542 mutex_args_init(&args, self);
543 do_mutex_unlock(&args);
552 .mutex = mutex_ptr(self),
555 do_mutex_unlock(&args);
559#if defined(HAVE_WORKING_FORK)
563 rb_mutex_abandon_all(th->keeping_mutexes);
564 th->keeping_mutexes = NULL;
570 if (th->locking_mutex) {
571 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
573 ccan_list_head_init(&mutex->waitq);
574 th->locking_mutex =
Qfalse;
585 mutexes = mutex->next_mutex;
586 mutex->ec_serial = 0;
587 mutex->next_mutex = 0;
588 ccan_list_head_init(&mutex->waitq);
599mutex_sleep_begin(
VALUE _arguments)
602 VALUE timeout = arguments->timeout;
606 if (scheduler !=
Qnil) {
610 if (
NIL_P(timeout)) {
611 rb_thread_sleep_deadly_allow_spurious_wakeup(arguments->self,
Qnil, 0);
615 rb_hrtime_t relative_timeout = rb_timeval2hrtime(&timeout_value);
617 woken = RBOOL(sleep_hrtime(GET_THREAD(), relative_timeout, 0));
627 if (!
NIL_P(timeout)) {
632 rb_mut_unlock(ec, self);
633 time_t beg = time(0);
640 VALUE woken = rb_ec_ensure(ec, mutex_sleep_begin, (
VALUE)&arguments, mutex_lock_uninterruptible, self);
642 RUBY_VM_CHECK_INTS_BLOCKING(ec);
643 if (!woken)
return Qnil;
644 time_t end = time(0) - beg;
645 return TIMET2NUM(end);
651 return rb_mut_sleep(GET_EC(), self, timeout);
658 mutex_args_init(&args, self);
659 do_mutex_lock(&args, 1);
660 return rb_ec_ensure(args.ec, func, arg, do_mutex_unlock_safe, (
VALUE)&args);
664do_ec_yield(
VALUE _ec)
674 .mutex = mutex_ptr(self),
677 do_mutex_lock(&args, 1);
678 return rb_ec_ensure(args.ec, do_ec_yield, (
VALUE)ec, do_mutex_unlock_safe, (
VALUE)&args);
682rb_mutex_allow_trap(
VALUE self,
int val)
694#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
695#define queue_list(q) UNALIGNED_MEMBER_PTR(q, que)
696RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_BEGIN()
698 struct ccan_list_head waitq;
699 rb_serial_t fork_gen;
702} RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_END();
704#define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
705#define szqueue_list(sq) UNALIGNED_MEMBER_PTR(sq, q.que)
706#define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
707RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_BEGIN()
710 int num_waiting_push;
711 struct ccan_list_head pushq;
713} RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_END();
716queue_mark_and_move(
void *ptr)
721 rb_gc_mark_and_move((
VALUE *)UNALIGNED_MEMBER_PTR(q, que));
725queue_memsize(
const void *ptr)
733 .dmark = queue_mark_and_move,
735 .dsize = queue_memsize,
736 .dcompact = queue_mark_and_move,
738 .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
742queue_alloc(
VALUE klass)
748 ccan_list_head_init(queue_waitq(q));
755 rb_serial_t fork_gen = GET_VM()->fork_gen;
757 if (q->fork_gen == fork_gen) {
761 q->fork_gen = fork_gen;
762 ccan_list_head_init(queue_waitq(q));
778#define QUEUE_CLOSED FL_USER5
781queue_timeout2hrtime(
VALUE timeout)
783 if (
NIL_P(timeout)) {
784 return (rb_hrtime_t)0;
788 rel = rb_sec2hrtime(NUM2TIMET(timeout));
793 return rb_hrtime_add(rel, rb_hrtime_now());
797szqueue_mark_and_move(
void *ptr)
801 queue_mark_and_move(&sq->q);
805szqueue_memsize(
const void *ptr)
813 .dmark = szqueue_mark_and_move,
815 .dsize = szqueue_memsize,
816 .dcompact = szqueue_mark_and_move,
818 .parent = &queue_data_type,
819 .
flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
823szqueue_alloc(
VALUE klass)
827 &szqueue_data_type, sq);
828 ccan_list_head_init(szqueue_waitq(sq));
829 ccan_list_head_init(szqueue_pushq(sq));
834szqueue_ptr(
VALUE obj)
839 if (queue_fork_check(&sq->q)) {
840 ccan_list_head_init(szqueue_pushq(sq));
841 sq->num_waiting_push = 0;
856 if (RB_LIKELY(ary)) {
859 rb_raise(
rb_eTypeError,
"%+"PRIsVALUE
" not initialized", obj);
869queue_closed_p(
VALUE self)
881NORETURN(
static void raise_closed_queue_error(
VALUE self));
884raise_closed_queue_error(
VALUE self)
886 rb_raise(rb_eClosedQueueError,
"queue closed");
957rb_queue_initialize(
int argc,
VALUE *argv,
VALUE self)
960 struct rb_queue *q = queue_ptr(self);
961 if ((argc =
rb_scan_args(argc, argv,
"01", &initial)) == 1) {
962 initial = rb_to_array(initial);
965 ccan_list_head_init(queue_waitq(q));
975 if (queue_closed_p(self)) {
976 raise_closed_queue_error(self);
979 wakeup_one(queue_waitq(q));
1017rb_queue_close(
VALUE self)
1019 struct rb_queue *q = queue_ptr(self);
1021 if (!queue_closed_p(self)) {
1022 FL_SET(self, QUEUE_CLOSED);
1024 wakeup_all(queue_waitq(q));
1038rb_queue_closed_p(
VALUE self)
1040 return RBOOL(queue_closed_p(self));
1056 return queue_do_push(self, queue_ptr(self), obj);
1060queue_sleep(
VALUE _args)
1063 rb_thread_sleep_deadly_allow_spurious_wakeup(args->self, args->timeout, args->end);
1076queue_sleep_done(
VALUE p)
1080 ccan_list_del(&qw->w.node);
1081 qw->as.q->num_waiting--;
1087szqueue_sleep_done(
VALUE p)
1091 ccan_list_del(&qw->w.node);
1092 qw->as.sq->num_waiting_push--;
1100 check_array(self, q->que);
1102 if (
RTEST(non_block)) {
1111 rb_hrtime_t end = queue_timeout2hrtime(timeout);
1113 if (queue_closed_p(self)) {
1114 return queue_closed_result(self, q);
1121 .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
1125 struct ccan_list_head *waitq = queue_waitq(q);
1137 if (!
NIL_P(timeout) && (rb_hrtime_now() >= end))
1148 return queue_do_pop(ec, self, queue_ptr(self), non_block, timeout);
1159rb_queue_empty_p(
VALUE self)
1161 return RBOOL(queue_length(self, queue_ptr(self)) == 0);
1171rb_queue_clear(
VALUE self)
1173 struct rb_queue *q = queue_ptr(self);
1189rb_queue_length(
VALUE self)
1191 return LONG2NUM(queue_length(self, queue_ptr(self)));
1194NORETURN(
static VALUE rb_queue_freeze(
VALUE self));
1204rb_queue_freeze(
VALUE self)
1206 rb_raise(
rb_eTypeError,
"cannot freeze " "%+"PRIsVALUE, self);
1217rb_queue_num_waiting(
VALUE self)
1219 struct rb_queue *q = queue_ptr(self);
1221 return INT2NUM(q->num_waiting);
1248 rb_raise(rb_eArgError,
"queue size must be positive");
1252 ccan_list_head_init(szqueue_waitq(sq));
1253 ccan_list_head_init(szqueue_pushq(sq));
1272rb_szqueue_close(
VALUE self)
1274 if (!queue_closed_p(self)) {
1277 FL_SET(self, QUEUE_CLOSED);
1278 wakeup_all(szqueue_waitq(sq));
1279 wakeup_all(szqueue_pushq(sq));
1291rb_szqueue_max_get(
VALUE self)
1293 return LONG2NUM(szqueue_ptr(self)->max);
1311 rb_raise(rb_eArgError,
"queue size must be positive");
1313 if (max > sq->max) {
1314 diff = max - sq->max;
1317 sync_wakeup(szqueue_pushq(sq), diff);
1326 if (queue_length(self, &sq->q) >= sq->max) {
1327 if (
RTEST(non_block)) {
1336 rb_hrtime_t end = queue_timeout2hrtime(timeout);
1337 while (queue_length(self, &sq->q) >= sq->max) {
1338 if (queue_closed_p(self)) {
1339 raise_closed_queue_error(self);
1343 .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
1347 struct ccan_list_head *pushq = szqueue_pushq(sq);
1350 sq->num_waiting_push++;
1358 if (!
NIL_P(timeout) && rb_hrtime_now() >= end) {
1364 return queue_do_push(self, &sq->q,
object);
1371 VALUE retval = queue_do_pop(ec, self, &sq->q, non_block, timeout);
1373 if (queue_length(self, &sq->q) < sq->max) {
1374 wakeup_one(szqueue_pushq(sq));
1387rb_szqueue_clear(
VALUE self)
1392 wakeup_all(szqueue_pushq(sq));
1403rb_szqueue_num_waiting(
VALUE self)
1407 return INT2NUM(sq->q.num_waiting + sq->num_waiting_push);
1413 struct ccan_list_head waitq;
1414 rb_serial_t fork_gen;
1500condvar_memsize(
const void *ptr)
1508 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
1512condvar_ptr(
VALUE self)
1515 rb_serial_t fork_gen = GET_VM()->fork_gen;
1520 if (cv->fork_gen != fork_gen) {
1521 cv->fork_gen = fork_gen;
1522 ccan_list_head_init(&cv->waitq);
1529condvar_alloc(
VALUE klass)
1535 ccan_list_head_init(&cv->waitq);
1552 if (
CLASS_OF(p->mutex) == rb_cMutex) {
1553 return rb_mut_sleep(p->ec, p->mutex, p->timeout);
1556 return rb_funcallv(p->mutex, id_sleep, 1, &p->timeout);
1572 .th = ec->thread_ptr,
1573 .fiber = nonblocking_fiber(ec->fiber_ptr)
1576 ccan_list_add_tail(&cv->waitq, &
sync_waiter.node);
1584 wakeup_one(&cv->waitq);
1592 wakeup_all(&cv->waitq);
1599undumpable(
VALUE obj)
1606define_thread_class(
VALUE outer,
const ID name,
VALUE super)
1614Init_thread_sync(
void)
1617#if defined(TEACH_RDOC) && TEACH_RDOC == 42
1624#define DEFINE_CLASS(name, super) \
1625 rb_c##name = define_thread_class(rb_cThread, rb_intern(#name), rb_c##super)
1628 DEFINE_CLASS(Mutex, Object);
1632 DEFINE_CLASS(Queue, Object);
1653 DEFINE_CLASS(SizedQueue, Queue);
1661 rb_define_method(rb_cSizedQueue,
"num_waiting", rb_szqueue_num_waiting, 0);
1664 DEFINE_CLASS(ConditionVariable, Object);
1667 id_sleep = rb_intern(
"sleep");
1672#include "thread_sync.rbinc"
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
VALUE rb_define_class_id_under(VALUE outer, ID id, VALUE super)
Identical to rb_define_class_under(), except it takes the name in ID instead of C's string.
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define CLASS_OF
Old name of rb_class_of.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define FL_SET
Old name of RB_FL_SET.
#define LONG2NUM
Old name of RB_LONG2NUM.
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define Check_TypedStruct(v, t)
Old name of rb_check_typeddata.
#define NUM2LONG
Old name of RB_NUM2LONG.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Checks if the given object is of given kind.
VALUE rb_eTypeError
TypeError exception.
VALUE rb_eStopIteration
StopIteration exception.
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
VALUE rb_eThreadError
ThreadError exception.
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
VALUE rb_cThread
Thread class.
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Defines RBIMPL_HAS_BUILTIN.
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
void rb_provide(const char *feature)
Declares that the given feature is already provided by someone else.
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_mutex_trylock(VALUE mutex)
Attempts to lock the mutex, without waiting for other threads to unlock it.
VALUE rb_mutex_locked_p(VALUE mutex)
Queries if there are any threads that holds the lock.
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
struct timeval rb_time_interval(VALUE num)
Creates a "time interval".
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
VALUE rb_fiber_scheduler_kernel_sleep(VALUE scheduler, VALUE duration)
Non-blocking sleep.
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
#define RTEST
This is an old name of RB_TEST.
This is the struct that holds necessary info for a struct.
const char * wrap_struct_name
Name of structs of this kind.
VALUE flags
Type-specific behavioural characteristics.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.