12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
14#include "internal/sanitizers.h"
17#define TIME_QUANTUM_USEC (10 * 1000)
18#define RB_CONDATTR_CLOCK_MONOTONIC 1
22#define native_thread_yield() Sleep(0)
23#define unregister_ubf_list(th)
24#define ubf_wakeup_all_threads() do {} while (0)
25#define ubf_threads_empty() (1)
26#define ubf_timer_disarm() do {} while (0)
27#define ubf_list_atfork() do {} while (0)
29static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
31static int w32_wait_events(HANDLE *events,
int count, DWORD timeout,
rb_thread_t *th);
33rb_internal_thread_event_hook_t *
49w32_error(const
char *func)
52 DWORD err = GetLastError();
53 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
54 FORMAT_MESSAGE_FROM_SYSTEM |
55 FORMAT_MESSAGE_IGNORE_INSERTS,
58 MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
59 (LPTSTR) & lpMsgBuf, 0, NULL) == 0)
60 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
61 FORMAT_MESSAGE_FROM_SYSTEM |
62 FORMAT_MESSAGE_IGNORE_INSERTS,
65 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
66 (LPTSTR) & lpMsgBuf, 0, NULL);
67 rb_bug(
"%s: %s", func, (
char*)lpMsgBuf);
71#define W32_EVENT_DEBUG 0
74#define w32_event_debug printf
76#define w32_event_debug if (0) printf
80w32_mutex_lock(HANDLE lock,
bool try)
85 w32_event_debug(
"lock:%p\n", lock);
87 result = w32_wait_events(&lock, 1,
try ? 0 : INFINITE, 0);
91 w32_event_debug(
"locked lock:%p\n", lock);
94 case WAIT_OBJECT_0 + 1:
97 w32_event_debug(
"interrupted lock:%p\n", lock);
101 w32_event_debug(
"timeout locK:%p\n", lock);
105 rb_bug(
"win32_mutex_lock: WAIT_ABANDONED");
109 rb_bug(
"win32_mutex_lock: unknown result (%ld)", result);
117w32_mutex_create(
void)
119 HANDLE lock = CreateMutex(NULL, FALSE, NULL);
121 w32_error(
"rb_native_mutex_initialize");
131 w32_mutex_lock(sched->lock,
false);
132 if (GVL_DEBUG) fprintf(stderr,
"gvl acquire (%p): acquire\n", th);
138 ReleaseMutex(sched->lock);
144 thread_sched_to_waiting(sched, th,
true);
150 thread_sched_to_waiting(sched, th,
true);
151 native_thread_yield();
152 thread_sched_to_running(sched, th);
158 if (GVL_DEBUG) fprintf(stderr,
"sched init\n");
159 sched->lock = w32_mutex_create();
167 if (GVL_DEBUG) fprintf(stderr,
"sched destroy\n");
168 CloseHandle(sched->lock);
173ruby_thread_from_native(
void)
175 return TlsGetValue(ruby_native_thread_key);
182 rb_ractor_set_current_ec(th->ractor, th->ec);
184 return TlsSetValue(ruby_native_thread_key, th);
190 if ((ruby_current_ec_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
191 rb_bug(
"TlsAlloc() for ruby_current_ec_key fails");
193 if ((ruby_native_thread_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
194 rb_bug(
"TlsAlloc() for ruby_native_thread_key fails");
199 ruby_thread_set_native(main_th);
200 main_th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
202 DuplicateHandle(GetCurrentProcess(),
205 &main_th->nt->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
207 RUBY_DEBUG_LOG(
"initial thread th:%u thid:%p, event: %p",
208 rb_th_serial(main_th),
209 main_th->nt->thread_id,
210 main_th->nt->interrupt_event);
214ruby_mn_threads_params(
void)
219w32_wait_events(HANDLE *events,
int count, DWORD timeout,
rb_thread_t *th)
221 HANDLE *targets = events;
223 const int initcount = count;
226 w32_event_debug(
"events:%p, count:%d, timeout:%ld, th:%u\n",
227 events, count, timeout, th ? rb_th_serial(th) : UINT_MAX);
229 if (th && (intr = th->nt->interrupt_event)) {
230 if (ResetEvent(intr) && (!RUBY_VM_INTERRUPTED(th->ec) || SetEvent(intr))) {
231 targets =
ALLOCA_N(HANDLE, count + 1);
232 memcpy(targets, events,
sizeof(HANDLE) * count);
234 targets[count++] = intr;
235 w32_event_debug(
"handle:%p (count:%d, intr)\n", intr, count);
237 else if (intr == th->nt->interrupt_event) {
238 w32_error(
"w32_wait_events");
242 w32_event_debug(
"WaitForMultipleObjects start count:%d\n", count);
243 ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
244 w32_event_debug(
"WaitForMultipleObjects end ret:%lu\n", ret);
246 if (ret == (DWORD)(WAIT_OBJECT_0 + initcount) && th) {
249 if (ret == WAIT_FAILED && W32_EVENT_DEBUG) {
252 for (i = 0; i < count; i++) {
253 w32_event_debug(
"i:%d %s\n", i, GetHandleInformation(targets[i], &dmy) ?
"OK" :
"NG");
259static void ubf_handle(
void *ptr);
260#define ubf_select ubf_handle
263rb_w32_wait_events_blocking(HANDLE *events,
int num, DWORD timeout)
265 return w32_wait_events(events, num, timeout, ruby_thread_from_native());
269rb_w32_wait_events(HANDLE *events,
int num, DWORD timeout)
274 BLOCKING_REGION(th, ret = rb_w32_wait_events_blocking(events, num, timeout),
275 ubf_handle, ruby_thread_from_native(), FALSE);
280w32_close_handle(HANDLE handle)
282 if (CloseHandle(handle) == 0) {
283 w32_error(
"w32_close_handle");
288w32_resume_thread(HANDLE handle)
290 if (ResumeThread(handle) == (DWORD)-1) {
291 w32_error(
"w32_resume_thread");
296#define HAVE__BEGINTHREADEX 1
298#undef HAVE__BEGINTHREADEX
301#ifdef HAVE__BEGINTHREADEX
302#define start_thread (HANDLE)_beginthreadex
303#define thread_errno errno
304typedef unsigned long (__stdcall *w32_thread_start_func)(
void*);
306#define start_thread CreateThread
307#define thread_errno rb_w32_map_errno(GetLastError())
308typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
312w32_create_thread(DWORD stack_size, w32_thread_start_func func,
void *val)
314 return start_thread(0, stack_size, func, val, CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 0);
318rb_w32_sleep(
unsigned long msec)
320 return w32_wait_events(0, 0, msec, ruby_thread_from_native());
324rb_w32_Sleep(
unsigned long msec)
329 BLOCKING_REGION(th, ret = rb_w32_sleep(msec),
330 ubf_handle, ruby_thread_from_native(), FALSE);
335hrtime2msec(rb_hrtime_t hrt)
337 return (DWORD)hrt / (DWORD)RB_HRTIME_PER_MSEC;
343 const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
345 THREAD_BLOCKING_BEGIN(th);
350 th->unblock.func = ubf_handle;
351 th->unblock.arg = th;
354 if (RUBY_VM_INTERRUPTED(th->ec)) {
358 RUBY_DEBUG_LOG(
"start msec:%lu", msec);
359 ret = w32_wait_events(0, 0, msec, th);
360 RUBY_DEBUG_LOG(
"done ret:%lu", ret);
365 th->unblock.func = 0;
369 THREAD_BLOCKING_END(th);
375#ifdef USE_WIN32_MUTEX
376 w32_mutex_lock(lock->mutex,
false);
378 EnterCriticalSection(&lock->crit);
385#ifdef USE_WIN32_MUTEX
386 return w32_mutex_lock(lock->mutex,
true);
388 return TryEnterCriticalSection(&lock->crit) == 0 ? EBUSY : 0;
395#ifdef USE_WIN32_MUTEX
396 RUBY_DEBUG_LOG(
"lock:%p", lock->mutex);
397 ReleaseMutex(lock->mutex);
399 LeaveCriticalSection(&lock->crit);
406#ifdef USE_WIN32_MUTEX
407 lock->mutex = w32_mutex_create();
410 InitializeCriticalSection(&lock->crit);
417#ifdef USE_WIN32_MUTEX
418 w32_close_handle(lock->mutex);
420 DeleteCriticalSection(&lock->crit);
424struct cond_event_entry {
425 struct cond_event_entry* next;
426 struct cond_event_entry* prev;
434 struct cond_event_entry *e = cond->next;
435 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
438 struct cond_event_entry *next = e->next;
439 struct cond_event_entry *prev = e->prev;
443 e->next = e->prev = e;
453 struct cond_event_entry *e = cond->next;
454 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
457 struct cond_event_entry *next = e->next;
458 struct cond_event_entry *prev = e->prev;
464 e->next = e->prev = e;
471native_cond_timedwait_ms(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex,
unsigned long msec)
474 struct cond_event_entry entry;
475 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
477 entry.event = CreateEvent(0, FALSE, FALSE, 0);
481 entry.prev = head->prev;
482 head->prev->next = &entry;
487 r = WaitForSingleObject(entry.event, msec);
488 if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
489 rb_bug(
"rb_native_cond_wait: WaitForSingleObject returns %lu", r);
494 entry.prev->next = entry.next;
495 entry.next->prev = entry.prev;
497 w32_close_handle(entry.event);
498 return (r == WAIT_OBJECT_0) ? 0 : ETIMEDOUT;
504 native_cond_timedwait_ms(cond, mutex, INFINITE);
508abs_timespec_to_timeout_ms(
const struct timespec *ts)
513 gettimeofday(&now, NULL);
514 tv.tv_sec = ts->tv_sec;
515 tv.tv_usec = ts->tv_nsec / 1000;
517 if (!rb_w32_time_subtract(&tv, &now))
520 return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
524native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex,
const struct timespec *ts)
526 unsigned long timeout_ms;
528 timeout_ms = abs_timespec_to_timeout_ms(ts);
532 return native_cond_timedwait_ms(cond, mutex, timeout_ms);
535static struct timespec native_cond_timeout(rb_nativethread_cond_t *cond,
struct timespec timeout_rel);
541 .tv_sec = msec / 1000,
542 .tv_nsec = (msec % 1000) * 1000 * 1000,
544 struct timespec ts = native_cond_timeout(cond, rel);
545 native_cond_timedwait(cond, mutex, &ts);
549native_cond_timeout(rb_nativethread_cond_t *cond,
struct timespec timeout_rel)
556 ret = gettimeofday(&tv, 0);
559 now.tv_sec = tv.tv_sec;
560 now.tv_nsec = tv.tv_usec * 1000;
562 timeout.tv_sec = now.tv_sec;
563 timeout.tv_nsec = now.tv_nsec;
564 timeout.tv_sec += timeout_rel.tv_sec;
565 timeout.tv_nsec += timeout_rel.tv_nsec;
567 if (timeout.tv_nsec >= 1000*1000*1000) {
569 timeout.tv_nsec -= 1000*1000*1000;
572 if (timeout.tv_sec < now.tv_sec)
573 timeout.tv_sec = TIMET_MAX;
581 cond->next = (
struct cond_event_entry *)cond;
582 cond->prev = (
struct cond_event_entry *)cond;
592#define CHECK_ERR(expr) \
593 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
596#if __has_warning("-Wmaybe-uninitialized")
597COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
600query_memory_basic_info(PMEMORY_BASIC_INFORMATION mi,
void *local_in_parent_frame)
602 return VirtualQuery(asan_get_real_stack_addr(local_in_parent_frame), mi,
sizeof(*mi));
607native_thread_init_stack(
rb_thread_t *th,
void *local_in_parent_frame)
609 MEMORY_BASIC_INFORMATION mi;
613 CHECK_ERR(query_memory_basic_info(&mi, local_in_parent_frame));
614 base = mi.AllocationBase;
615 end = mi.BaseAddress;
616 end += mi.RegionSize;
619 if (space > 1024*1024) space = 1024*1024;
620 th->ec->machine.stack_start = (
VALUE *)end - 1;
621 th->ec->machine.stack_maxsize = size - space;
630#ifndef InterlockedExchangePointer
631#define InterlockedExchangePointer(t, v) \
632 (void *)InterlockedExchange((long *)(t), (long)(v))
638 HANDLE intr = InterlockedExchangePointer(&nt->interrupt_event, 0);
639 RUBY_DEBUG_LOG(
"close handle intr:%p, thid:%p\n", intr, nt->thread_id);
640 w32_close_handle(intr);
644static unsigned long __stdcall
645thread_start_func_1(
void *th_ptr)
648 volatile HANDLE thread_id = th->nt->thread_id;
650 native_thread_init_stack(th, &th);
651 th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
654 RUBY_DEBUG_LOG(
"thread created th:%u, thid: %p, event: %p",
655 rb_th_serial(th), th->nt->thread_id, th->nt->interrupt_event);
657 thread_sched_to_running(TH_SCHED(th), th);
658 ruby_thread_set_native(th);
661 thread_start_func_2(th, th->ec->machine.stack_start);
663 w32_close_handle(thread_id);
664 RUBY_DEBUG_LOG(
"thread deleted th:%u", rb_th_serial(th));
673 const size_t stack_size = th->vm->default_params.thread_machine_stack_size;
675 th->nt->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
678 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size /
sizeof(
VALUE);
679 void *vm_stack = ruby_xmalloc(vm_stack_word_size *
sizeof(
VALUE));
680 th->sched.vm_stack = vm_stack;
681 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
683 if ((th->nt->thread_id) == 0) {
687 w32_resume_thread(th->nt->thread_id);
689 if (USE_RUBY_DEBUG_LOG) {
691 RUBY_DEBUG_LOG(
"th:%u thid:%p intr:%p), stack size: %"PRIuSIZE
"",
692 rb_th_serial(th), th->nt->thread_id,
693 th->nt->interrupt_event, stack_size);
699native_thread_join(HANDLE th)
701 w32_wait_events(&th, 1, INFINITE, 0);
704#if USE_NATIVE_THREAD_PRIORITY
709 int priority = th->priority;
710 if (th->priority > 0) {
711 priority = THREAD_PRIORITY_ABOVE_NORMAL;
713 else if (th->priority < 0) {
714 priority = THREAD_PRIORITY_BELOW_NORMAL;
717 priority = THREAD_PRIORITY_NORMAL;
720 SetThreadPriority(th->nt->thread_id, priority);
725int rb_w32_select_with_thread(
int, fd_set *, fd_set *, fd_set *,
struct timeval *,
void *);
730 fd_set *r = NULL, *w = NULL, *e = NULL;
743 return rb_w32_select_with_thread(n, r, w, e, timeout, th);
750 return w32_wait_events(0, 0, 0, th);
757 RUBY_DEBUG_LOG(
"th:%u\n", rb_th_serial(th));
759 if (!SetEvent(th->nt->interrupt_event)) {
760 w32_error(
"ubf_handle");
764int rb_w32_set_thread_description(HANDLE th,
const WCHAR *name);
765int rb_w32_set_thread_description_str(HANDLE th,
VALUE name);
766#define native_set_another_thread_name rb_w32_set_thread_description_str
772#define TIMER_THREAD_CREATED_P() (timer_thread.id != 0)
774static unsigned long __stdcall
775timer_thread_func(
void *dummy)
778 RUBY_DEBUG_LOG(
"start");
779 rb_w32_set_thread_description(GetCurrentThread(), L
"ruby-timer-thread");
780 while (WaitForSingleObject(timer_thread.lock,
781 TIME_QUANTUM_USEC/1000) == WAIT_TIMEOUT) {
783 rb_threadptr_check_signal(vm->ractor.main_thread);
785 RUBY_DEBUG_LOG(
"end");
790rb_thread_wakeup_timer_thread(
int sig)
796rb_thread_create_timer_thread(
void)
798 if (timer_thread.id == 0) {
799 if (!timer_thread.lock) {
800 timer_thread.lock = CreateEvent(0, TRUE, FALSE, 0);
802 timer_thread.id = w32_create_thread(1024 + (USE_RUBY_DEBUG_LOG ? BUFSIZ : 0),
803 timer_thread_func, 0);
804 w32_resume_thread(timer_thread.id);
809native_stop_timer_thread(
void)
813 SetEvent(timer_thread.lock);
814 native_thread_join(timer_thread.id);
815 CloseHandle(timer_thread.lock);
816 timer_thread.lock = 0;
822native_reset_timer_thread(
void)
824 if (timer_thread.id) {
825 CloseHandle(timer_thread.id);
831ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
833 return rb_ec_raised_p(th->ec, RAISED_STACKOVERFLOW);
836#if defined(__MINGW32__)
838rb_w32_stack_overflow_handler(
struct _EXCEPTION_POINTERS *exception)
840 if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
841 rb_ec_raised_set(GET_EC(), RAISED_STACKOVERFLOW);
844 return EXCEPTION_CONTINUE_SEARCH;
848#ifdef RUBY_ALLOCA_CHKSTK
850ruby_alloca_chkstk(
size_t len,
void *sp)
854 if (!rb_ec_raised_p(ec, RAISED_STACKOVERFLOW)) {
855 rb_ec_raised_set(ec, RAISED_STACKOVERFLOW);
870 return GetCurrentThread();
881 DWORD tid = GetThreadId(th->nt->thread_id);
882 if (tid == 0) rb_sys_fail(
"GetThreadId");
885#define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
908 native_thread_destroy(th->nt);
910 ruby_xfree(th->sched.vm_stack);
920rb_thread_sched_mark_zombies(
rb_vm_t *vm)
926vm_barrier_finish_p(
rb_vm_t *vm)
928 RUBY_DEBUG_LOG(
"cnt:%u living:%u blocking:%u",
929 vm->ractor.blocking_cnt == vm->ractor.cnt,
930 vm->ractor.sync.barrier_cnt,
932 vm->ractor.blocking_cnt);
934 VM_ASSERT(vm->ractor.blocking_cnt <= vm->ractor.cnt);
936 return vm->ractor.blocking_cnt == vm->ractor.cnt;
942 vm->ractor.sync.barrier_waiting =
true;
944 RUBY_DEBUG_LOG(
"barrier start. cnt:%u living:%u blocking:%u",
945 vm->ractor.sync.barrier_cnt,
947 vm->ractor.blocking_cnt);
949 rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
953 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
955 rb_ractor_vm_barrier_interrupt_running_thread(r);
960 while (!vm_barrier_finish_p(vm)) {
961 rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_complete_cond);
964 RUBY_DEBUG_LOG(
"cnt:%u barrier success", vm->ractor.sync.barrier_cnt);
966 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
968 vm->ractor.sync.barrier_waiting =
false;
969 vm->ractor.sync.barrier_cnt++;
977 vm->ractor.sync.lock_owner = cr;
978 unsigned int barrier_cnt = vm->ractor.sync.barrier_cnt;
982 RB_VM_SAVE_MACHINE_CONTEXT(th);
984 if (rb_ractor_status_p(cr, ractor_running)) {
985 rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
991 VM_ASSERT(rb_ractor_status_p(cr, ractor_blocking));
993 if (vm_barrier_finish_p(vm)) {
994 RUBY_DEBUG_LOG(
"wakeup barrier owner");
998 RUBY_DEBUG_LOG(
"wait for barrier finish");
1002 while (barrier_cnt == vm->ractor.sync.barrier_cnt) {
1003 rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_release_cond);
1006 RUBY_DEBUG_LOG(
"barrier is released. Acquire vm_lock");
1009 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
1012 vm->ractor.sync.lock_owner = NULL;
1022rb_thread_prevent_fork(
void *(*func)(
void *),
void *data)
1028rb_thread_malloc_stack_set(
rb_thread_t *th,
void *stack,
size_t stack_size)
#define RUBY_ATOMIC_SET(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except for the return type.
uint32_t rb_event_flag_t
Represents event(s).
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
#define ULONG2NUM
Old name of RB_ULONG2NUM.
#define ZALLOC
Old name of RB_ZALLOC.
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
int len
Length of the buffer.
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
bool rb_thread_lock_native_thread(void)
Declare the current Ruby thread should acquire a dedicated native thread on M:N thread scheduler.
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
#define ALLOCA_N(type, n)
#define RBIMPL_ATTR_NORETURN()
Wraps (or simulates) [[noreturn]]
#define errno
Ractor-aware version of errno.
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
The data structure which wraps the fd_set bitmap used by select(2).
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.