2#include "ccan/list/list.h" 
    5static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
 
    6static VALUE rb_eClosedQueueError;
 
   13    struct ccan_list_head waitq; 
 
 
   21    struct ccan_list_node node;
 
 
   27    if (rb_fiberptr_blocking(fiber)) {
 
   40#define MUTEX_ALLOW_TRAP FL_USER1 
   43sync_wakeup(
struct ccan_list_head *head, 
long max)
 
   45    RUBY_DEBUG_LOG(
"max:%ld", max);
 
   49    ccan_list_for_each_safe(head, cur, next, node) {
 
   50        ccan_list_del_init(&cur->node);
 
   52        if (cur->th->status != THREAD_KILLED) {
 
   53            if (cur->th->scheduler != 
Qnil && cur->fiber) {
 
   57                RUBY_DEBUG_LOG(
"target_th:%u", rb_th_serial(cur->th));
 
   58                rb_threadptr_interrupt(cur->th);
 
   59                cur->th->status = THREAD_RUNNABLE;
 
   62            if (--max == 0) 
return;
 
   68wakeup_one(
struct ccan_list_head *head)
 
   74wakeup_all(
struct ccan_list_head *head)
 
   76    sync_wakeup(head, LONG_MAX);
 
   79#if defined(HAVE_WORKING_FORK) 
   80static void rb_mutex_abandon_all(
rb_mutex_t *mutexes);
 
   81static void rb_mutex_abandon_keeping_mutexes(
rb_thread_t *th);
 
   82static void rb_mutex_abandon_locking_mutex(
rb_thread_t *th);
 
  116    ccan_list_for_each(&mutex->waitq, w, node) {
 
  128    return mutex->fiber != 0;
 
  136    if (locked_p(mutex)) {
 
  137        fiber = rb_fiberptr_self(mutex->fiber); 
 
  138        if (fiber) rb_gc_mark_movable(fiber);
 
  139        rb_gc_mark_movable(mutex->thread);
 
  144mutex_compact(
void *ptr)
 
  147    if (locked_p(mutex)) {
 
  148        mutex->thread = rb_gc_location(mutex->thread);
 
  156    if (locked_p(mutex)) {
 
  157        const char *err = rb_mutex_unlock_th(mutex, rb_thread_ptr(mutex->thread), mutex->fiber);
 
  158        if (err) rb_bug(
"%s", err);
 
  164mutex_memsize(
const void *ptr)
 
  171    {mutex_mark, mutex_free, mutex_memsize, mutex_compact,},
 
  172    0, 0, RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
 
  186rb_obj_is_mutex(
VALUE obj)
 
  192mutex_alloc(
VALUE klass)
 
  199    ccan_list_head_init(&mutex->waitq);
 
  210mutex_initialize(
VALUE self)
 
  218    return mutex_alloc(rb_cMutex);
 
 
  232    return RBOOL(locked_p(mutex));
 
 
  239    if (thread->keeping_mutexes) {
 
  240        mutex->next_mutex = thread->keeping_mutexes;
 
  243    thread->keeping_mutexes = mutex;
 
  249    rb_mutex_t **keeping_mutexes = &thread->keeping_mutexes;
 
  251    while (*keeping_mutexes && *keeping_mutexes != mutex) {
 
  253        keeping_mutexes = &(*keeping_mutexes)->next_mutex;
 
  256    if (*keeping_mutexes) {
 
  257        *keeping_mutexes = mutex->next_mutex;
 
  258        mutex->next_mutex = NULL;
 
  267    mutex->thread = th->self;
 
  268    mutex->fiber = fiber;
 
  280    mutex_set_owner(self, th, fiber);
 
  281    thread_mutex_insert(th, mutex);
 
  296    if (mutex->fiber == 0) {
 
  297        RUBY_DEBUG_LOG(
"%p ok", mutex);
 
  302        mutex_locked(th, fiber, self);
 
  306        RUBY_DEBUG_LOG(
"%p ng", mutex);
 
 
  314    return RBOOL(mutex->fiber == fiber);
 
  318call_rb_fiber_scheduler_block(
VALUE mutex)
 
  324delete_from_waitq(
VALUE value)
 
  335do_mutex_lock(
VALUE self, 
int interruptible_p)
 
  345        th->ec->interrupt_mask & TRAP_INTERRUPT_MASK) {
 
  350        if (mutex->fiber == fiber) {
 
  354        while (mutex->fiber != fiber) {
 
  355            VM_ASSERT(mutex->fiber != NULL);
 
  358            if (scheduler != 
Qnil) {
 
  362                    .fiber = nonblocking_fiber(fiber)
 
  365                ccan_list_add_tail(&mutex->waitq, &
sync_waiter.node);
 
  370                    mutex_set_owner(self, th, fiber);
 
  374                if (!th->vm->thread_ignore_deadlock && rb_fiber_threadptr(mutex->fiber) == th) {
 
  375                    rb_raise(
rb_eThreadError, 
"deadlock; lock already owned by another fiber belonging to the same thread");
 
  381                    .fiber = nonblocking_fiber(fiber),
 
  384                RUBY_DEBUG_LOG(
"%p wait", mutex);
 
  395                enum rb_thread_status prev_status = th->status;
 
  396                th->status = THREAD_STOPPED_FOREVER;
 
  397                rb_ractor_sleeper_threads_inc(th->ractor);
 
  398                rb_check_deadlock(th->ractor);
 
  401                th->locking_mutex = self;
 
  403                ccan_list_add_tail(&mutex->waitq, &
sync_waiter.node);
 
  405                    native_sleep(th, NULL);
 
  411                    mutex_set_owner(self, th, fiber);
 
  414                rb_ractor_sleeper_threads_dec(th->ractor);
 
  415                th->status = prev_status;
 
  416                th->locking_mutex = 
Qfalse;
 
  418                RUBY_DEBUG_LOG(
"%p wakeup", mutex);
 
  421            if (interruptible_p) {
 
  424                if (mutex->fiber == fiber) {
 
  428                RUBY_VM_CHECK_INTS_BLOCKING(th->ec); 
 
  430                    mutex_set_owner(self, th, fiber);
 
  435                if (RUBY_VM_INTERRUPTED(th->ec)) {
 
  437                    if (saved_ints == 0) {
 
  438                        saved_ints = threadptr_get_interrupts(th);
 
  442                        threadptr_get_interrupts(th);
 
  448        if (saved_ints) th->ec->interrupt_flag = saved_ints;
 
  449        if (mutex->fiber == fiber) mutex_locked(th, fiber, self);
 
  452    RUBY_DEBUG_LOG(
"%p locked", mutex);
 
  455    if (mutex_owned_p(fiber, mutex) == 
Qfalse) rb_bug(
"do_mutex_lock: mutex is not owned.");
 
  461mutex_lock_uninterruptible(
VALUE self)
 
  463    return do_mutex_lock(self, 0);
 
  476    return do_mutex_lock(self, 1);
 
 
  486rb_mutex_owned_p(
VALUE self)
 
  491    return mutex_owned_p(fiber, mutex);
 
  497    RUBY_DEBUG_LOG(
"%p", mutex);
 
  499    if (mutex->fiber == 0) {
 
  500        return "Attempt to unlock a mutex which is not locked";
 
  502    else if (mutex->fiber != fiber) {
 
  503        return "Attempt to unlock a mutex which is locked by another thread/fiber";
 
  509    thread_mutex_remove(th, mutex);
 
  511    ccan_list_for_each_safe(&mutex->waitq, cur, next, node) {
 
  512        ccan_list_del_init(&cur->node);
 
  514        if (cur->th->scheduler != 
Qnil && cur->fiber) {
 
  519            switch (cur->th->status) {
 
  520              case THREAD_RUNNABLE: 
 
  521              case THREAD_STOPPED_FOREVER: 
 
  522                RUBY_DEBUG_LOG(
"wakeup th:%u", rb_th_serial(cur->th));
 
  523                rb_threadptr_interrupt(cur->th);
 
  526                rb_bug(
"unexpected THREAD_STOPPED");
 
  529                rb_bug(
"unexpected THREAD_KILLED");
 
  553    err = rb_mutex_unlock_th(mutex, th, GET_EC()->fiber_ptr);
 
 
  559#if defined(HAVE_WORKING_FORK) 
  563    rb_mutex_abandon_all(th->keeping_mutexes);
 
  564    th->keeping_mutexes = NULL;
 
  570    if (th->locking_mutex) {
 
  571        rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
 
  573        ccan_list_head_init(&mutex->waitq);
 
  574        th->locking_mutex = 
Qfalse;
 
  585        mutexes = mutex->next_mutex;
 
  587        mutex->next_mutex = 0;
 
  588        ccan_list_head_init(&mutex->waitq);
 
  599mutex_sleep_begin(
VALUE _arguments)
 
  602    VALUE timeout = arguments->timeout;
 
  606    if (scheduler != 
Qnil) {
 
  610        if (
NIL_P(timeout)) {
 
  611            rb_thread_sleep_deadly_allow_spurious_wakeup(arguments->self, 
Qnil, 0);
 
  615            rb_hrtime_t relative_timeout = rb_timeval2hrtime(&timeout_value);
 
  617            woken = RBOOL(sleep_hrtime(GET_THREAD(), relative_timeout, 0));
 
  627    if (!
NIL_P(timeout)) {
 
  633    time_t beg = time(0);
 
  640    VALUE woken = 
rb_ensure(mutex_sleep_begin, (
VALUE)&arguments, mutex_lock_uninterruptible, self);
 
  642    RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
 
  643    if (!woken) 
return Qnil;
 
  644    time_t end = time(0) - beg;
 
  645    return TIMET2NUM(end);
 
 
  696rb_mutex_synchronize_m(
VALUE self)
 
  706rb_mutex_allow_trap(
VALUE self, 
int val)
 
  718#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq) 
  719#define queue_list(q) UNALIGNED_MEMBER_PTR(q, que) 
  720RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_BEGIN()
 
  722    struct ccan_list_head waitq;
 
  723    rb_serial_t fork_gen;
 
  726} RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_END();
 
 
  728#define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq) 
  729#define szqueue_list(sq) UNALIGNED_MEMBER_PTR(sq, q.que) 
  730#define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq) 
  731RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_BEGIN()
 
  734    int num_waiting_push;
 
  735    struct ccan_list_head pushq;
 
  737} RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_END();
 
 
  740queue_mark_and_move(
void *ptr)
 
  745    rb_gc_mark_and_move((
VALUE *)UNALIGNED_MEMBER_PTR(q, que));
 
  749queue_memsize(
const void *ptr)
 
  757    0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
 
  761queue_alloc(
VALUE klass)
 
  767    ccan_list_head_init(queue_waitq(q));
 
  774    rb_serial_t fork_gen = GET_VM()->fork_gen;
 
  776    if (q->fork_gen == fork_gen) {
 
  780    q->fork_gen = fork_gen;
 
  781    ccan_list_head_init(queue_waitq(q));
 
  797#define QUEUE_CLOSED          FL_USER5 
  800queue_timeout2hrtime(
VALUE timeout)
 
  802    if (
NIL_P(timeout)) {
 
  803        return (rb_hrtime_t)0;
 
  807        rel = rb_sec2hrtime(NUM2TIMET(timeout));
 
  812    return rb_hrtime_add(rel, rb_hrtime_now());
 
  816szqueue_mark_and_move(
void *ptr)
 
  820    queue_mark_and_move(&sq->q);
 
  824szqueue_memsize(
const void *ptr)
 
  832    0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
 
  836szqueue_alloc(
VALUE klass)
 
  840                                        &szqueue_data_type, sq);
 
  841    ccan_list_head_init(szqueue_waitq(sq));
 
  842    ccan_list_head_init(szqueue_pushq(sq));
 
  847szqueue_ptr(
VALUE obj)
 
  852    if (queue_fork_check(&sq->q)) {
 
  853        ccan_list_head_init(szqueue_pushq(sq));
 
  854        sq->num_waiting_push = 0;
 
  870        rb_raise(
rb_eTypeError, 
"%+"PRIsVALUE
" not initialized", obj);
 
  882queue_closed_p(
VALUE self)
 
  894NORETURN(
static void raise_closed_queue_error(
VALUE self));
 
  897raise_closed_queue_error(
VALUE self)
 
  899    rb_raise(rb_eClosedQueueError, 
"queue closed");
 
  970rb_queue_initialize(
int argc, 
VALUE *argv, 
VALUE self)
 
  973    struct rb_queue *q = queue_ptr(self);
 
  974    if ((argc = 
rb_scan_args(argc, argv, 
"01", &initial)) == 1) {
 
  975        initial = rb_to_array(initial);
 
  978    ccan_list_head_init(queue_waitq(q));
 
  988    if (queue_closed_p(self)) {
 
  989        raise_closed_queue_error(self);
 
  992    wakeup_one(queue_waitq(q));
 
 1030rb_queue_close(
VALUE self)
 
 1032    struct rb_queue *q = queue_ptr(self);
 
 1034    if (!queue_closed_p(self)) {
 
 1035        FL_SET(self, QUEUE_CLOSED);
 
 1037        wakeup_all(queue_waitq(q));
 
 1051rb_queue_closed_p(
VALUE self)
 
 1053    return RBOOL(queue_closed_p(self));
 
 1069    return queue_do_push(self, queue_ptr(self), obj);
 
 1073queue_sleep(
VALUE _args)
 
 1076    rb_thread_sleep_deadly_allow_spurious_wakeup(args->self, args->timeout, args->end);
 
 1089queue_sleep_done(
VALUE p)
 
 1093    ccan_list_del(&qw->w.node);
 
 1094    qw->as.q->num_waiting--;
 
 1100szqueue_sleep_done(
VALUE p)
 
 1104    ccan_list_del(&qw->w.node);
 
 1105    qw->as.sq->num_waiting_push--;
 
 1113    check_array(self, q->que);
 
 1115        if (!should_block) {
 
 1124    rb_hrtime_t end = queue_timeout2hrtime(timeout);
 
 1126        if (queue_closed_p(self)) {
 
 1127            return queue_closed_result(self, q);
 
 1136                .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
 
 1140            struct ccan_list_head *waitq = queue_waitq(q);
 
 1152            if (!
NIL_P(timeout) && (rb_hrtime_now() >= end))
 
 1163    return queue_do_pop(self, queue_ptr(self), !
RTEST(non_block), timeout);
 
 1174rb_queue_empty_p(
VALUE self)
 
 1176    return RBOOL(queue_length(self, queue_ptr(self)) == 0);
 
 1186rb_queue_clear(
VALUE self)
 
 1188    struct rb_queue *q = queue_ptr(self);
 
 1204rb_queue_length(
VALUE self)
 
 1206    return LONG2NUM(queue_length(self, queue_ptr(self)));
 
 1209NORETURN(
static VALUE rb_queue_freeze(
VALUE self));
 
 1219rb_queue_freeze(
VALUE self)
 
 1221    rb_raise(
rb_eTypeError, 
"cannot freeze " "%+"PRIsVALUE, self);
 
 1232rb_queue_num_waiting(
VALUE self)
 
 1234    struct rb_queue *q = queue_ptr(self);
 
 1236    return INT2NUM(q->num_waiting);
 
 1263        rb_raise(rb_eArgError, 
"queue size must be positive");
 
 1267    ccan_list_head_init(szqueue_waitq(sq));
 
 1268    ccan_list_head_init(szqueue_pushq(sq));
 
 1287rb_szqueue_close(
VALUE self)
 
 1289    if (!queue_closed_p(self)) {
 
 1292        FL_SET(self, QUEUE_CLOSED);
 
 1293        wakeup_all(szqueue_waitq(sq));
 
 1294        wakeup_all(szqueue_pushq(sq));
 
 1306rb_szqueue_max_get(
VALUE self)
 
 1308    return LONG2NUM(szqueue_ptr(self)->max);
 
 1326        rb_raise(rb_eArgError, 
"queue size must be positive");
 
 1328    if (max > sq->max) {
 
 1329        diff = max - sq->max;
 
 1332    sync_wakeup(szqueue_pushq(sq), diff);
 
 1341    if (queue_length(self, &sq->q) >= sq->max) {
 
 1342        if (
RTEST(non_block)) {
 
 1351    rb_hrtime_t end = queue_timeout2hrtime(timeout);
 
 1352    while (queue_length(self, &sq->q) >= sq->max) {
 
 1353        if (queue_closed_p(self)) {
 
 1354            raise_closed_queue_error(self);
 
 1359                .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
 
 1363            struct ccan_list_head *pushq = szqueue_pushq(sq);
 
 1366            sq->num_waiting_push++;
 
 1374            if (!
NIL_P(timeout) && rb_hrtime_now() >= end) {
 
 1380    return queue_do_push(self, &sq->q, 
object);
 
 1384szqueue_do_pop(
VALUE self, 
int should_block, 
VALUE timeout)
 
 1387    VALUE retval = queue_do_pop(self, &sq->q, should_block, timeout);
 
 1389    if (queue_length(self, &sq->q) < sq->max) {
 
 1390        wakeup_one(szqueue_pushq(sq));
 
 1398    return szqueue_do_pop(self, !
RTEST(non_block), timeout);
 
 1408rb_szqueue_clear(
VALUE self)
 
 1413    wakeup_all(szqueue_pushq(sq));
 
 1427rb_szqueue_length(
VALUE self)
 
 1431    return LONG2NUM(queue_length(self, &sq->q));
 
 1441rb_szqueue_num_waiting(
VALUE self)
 
 1445    return INT2NUM(sq->q.num_waiting + sq->num_waiting_push);
 
 1456rb_szqueue_empty_p(
VALUE self)
 
 1460    return RBOOL(queue_length(self, &sq->q) == 0);
 
 1466    struct ccan_list_head waitq;
 
 1467    rb_serial_t fork_gen;
 
 
 1553condvar_memsize(
const void *ptr)
 
 1561    0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
 
 1565condvar_ptr(
VALUE self)
 
 1568    rb_serial_t fork_gen = GET_VM()->fork_gen;
 
 1573    if (cv->fork_gen != fork_gen) {
 
 1574        cv->fork_gen = fork_gen;
 
 1575        ccan_list_head_init(&cv->waitq);
 
 1582condvar_alloc(
VALUE klass)
 
 1588    ccan_list_head_init(&cv->waitq);
 
 1600rb_condvar_initialize(
VALUE self)
 
 1603    ccan_list_head_init(&cv->waitq);
 
 1618    return rb_funcallv(p->mutex, id_sleep, 1, &p->timeout);
 
 1636rb_condvar_wait(
int argc, 
VALUE *argv, 
VALUE self)
 
 1643    rb_scan_args(argc, argv, 
"11", &args.mutex, &args.timeout);
 
 1647        .th = ec->thread_ptr,
 
 1648        .fiber = nonblocking_fiber(ec->fiber_ptr)
 
 1651    ccan_list_add_tail(&cv->waitq, &
sync_waiter.node);
 
 1662rb_condvar_signal(
VALUE self)
 
 1665    wakeup_one(&cv->waitq);
 
 1676rb_condvar_broadcast(
VALUE self)
 
 1679    wakeup_all(&cv->waitq);
 
 1686undumpable(
VALUE obj)
 
 1693define_thread_class(
VALUE outer, 
const ID name, 
VALUE super)
 
 1701Init_thread_sync(
void)
 
 1704#if defined(TEACH_RDOC) && TEACH_RDOC == 42 
 1711#define DEFINE_CLASS(name, super) \ 
 1712    rb_c##name = define_thread_class(rb_cThread, rb_intern(#name), rb_c##super) 
 1715    DEFINE_CLASS(Mutex, Object);
 
 1727    DEFINE_CLASS(Queue, Object);
 
 1748    DEFINE_CLASS(SizedQueue, Queue);
 
 1758    rb_define_method(rb_cSizedQueue, 
"num_waiting", rb_szqueue_num_waiting, 0);
 
 1762    DEFINE_CLASS(ConditionVariable, Object);
 
 1765    id_sleep = rb_intern(
"sleep");
 
 1767    rb_define_method(rb_cConditionVariable, 
"initialize", rb_condvar_initialize, 0);
 
 1772    rb_define_method(rb_cConditionVariable, 
"broadcast", rb_condvar_broadcast, 0);
 
 1777#include "thread_sync.rbinc" 
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
 
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
 
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
 
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
 
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
 
VALUE rb_define_class_id_under(VALUE outer, ID id, VALUE super)
Identical to rb_define_class_under(), except it takes the name in ID instead of C's string.
 
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
 
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
 
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
 
int rb_block_given_p(void)
Determines if the current method is given a block.
 
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
 
#define Qundef
Old name of RUBY_Qundef.
 
#define INT2FIX
Old name of RB_INT2FIX.
 
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
 
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
 
#define FL_SET
Old name of RB_FL_SET.
 
#define LONG2NUM
Old name of RB_LONG2NUM.
 
#define Qtrue
Old name of RUBY_Qtrue.
 
#define INT2NUM
Old name of RB_INT2NUM.
 
#define Qnil
Old name of RUBY_Qnil.
 
#define Qfalse
Old name of RUBY_Qfalse.
 
#define T_ARRAY
Old name of RUBY_T_ARRAY.
 
#define NIL_P
Old name of RB_NIL_P.
 
#define Check_TypedStruct(v, t)
Old name of rb_check_typeddata.
 
#define NUM2LONG
Old name of RB_NUM2LONG.
 
#define FIXNUM_P
Old name of RB_FIXNUM_P.
 
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
 
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Checks if the given object is of given kind.
 
VALUE rb_eTypeError
TypeError exception.
 
VALUE rb_eStopIteration
StopIteration exception.
 
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
 
VALUE rb_eThreadError
ThreadError exception.
 
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
 
VALUE rb_cThread
Thread class.
 
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
 
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
 
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
 
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
 
Defines RBIMPL_HAS_BUILTIN.
 
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
 
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
 
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
 
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
 
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
 
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
 
void rb_provide(const char *feature)
Declares that the given feature is already provided by someone else.
 
VALUE rb_mutex_new(void)
Creates a mutex.
 
VALUE rb_mutex_trylock(VALUE mutex)
Attempts to lock the mutex, without waiting for other threads to unlock it.
 
VALUE rb_mutex_locked_p(VALUE mutex)
Queries if there are any threads that holds the lock.
 
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
 
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
 
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
 
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
 
struct timeval rb_time_interval(VALUE num)
Creates a "time  interval".
 
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
 
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
 
VALUE rb_yield(VALUE val)
Yields the block.
 
#define RARRAY_LEN
Just another name of rb_array_len.
 
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
 
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
 
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
 
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
 
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
 
VALUE rb_fiber_scheduler_kernel_sleep(VALUE scheduler, VALUE duration)
Non-blocking sleep.
 
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
 
#define RTEST
This is an old name of RB_TEST.
 
This is the struct that holds necessary info for a struct.
 
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
 
uintptr_t VALUE
Type that represents a Ruby object.
 
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.