Ruby 3.5.0dev (2025-01-10 revision 5fab31b15e32622c4b71d1d347a41937e9f9c212)
thread_win32.c (5fab31b15e32622c4b71d1d347a41937e9f9c212)
1/* -*-c-*- */
2/**********************************************************************
3
4 thread_win32.c -
5
6 $Author$
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13
14#include "internal/sanitizers.h"
15#include <process.h>
16
17#define TIME_QUANTUM_USEC (10 * 1000)
18#define RB_CONDATTR_CLOCK_MONOTONIC 1 /* no effect */
19
20#undef Sleep
21
22#define native_thread_yield() Sleep(0)
23#define unregister_ubf_list(th)
24#define ubf_wakeup_all_threads() do {} while (0)
25#define ubf_threads_empty() (1)
26#define ubf_timer_disarm() do {} while (0)
27#define ubf_list_atfork() do {} while (0)
28
29static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
30
31static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th);
32
33rb_internal_thread_event_hook_t *
34rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
35{
36 // not implemented
37 return NULL;
38}
39
40bool
41rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook)
42{
43 // not implemented
44 return false;
45}
46
48static void
49w32_error(const char *func)
50{
51 LPVOID lpMsgBuf;
52 DWORD err = GetLastError();
53 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
54 FORMAT_MESSAGE_FROM_SYSTEM |
55 FORMAT_MESSAGE_IGNORE_INSERTS,
56 NULL,
57 err,
58 MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
59 (LPTSTR) & lpMsgBuf, 0, NULL) == 0)
60 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
61 FORMAT_MESSAGE_FROM_SYSTEM |
62 FORMAT_MESSAGE_IGNORE_INSERTS,
63 NULL,
64 err,
65 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
66 (LPTSTR) & lpMsgBuf, 0, NULL);
67 rb_bug("%s: %s", func, (char*)lpMsgBuf);
69}
70
71#define W32_EVENT_DEBUG 0
72
73#if W32_EVENT_DEBUG
74#define w32_event_debug printf
75#else
76#define w32_event_debug if (0) printf
77#endif
78
79static int
80w32_mutex_lock(HANDLE lock, bool try)
81{
82 DWORD result;
83 while (1) {
84 // RUBY_DEBUG_LOG() is not available because RUBY_DEBUG_LOG() calls it.
85 w32_event_debug("lock:%p\n", lock);
86
87 result = w32_wait_events(&lock, 1, try ? 0 : INFINITE, 0);
88 switch (result) {
89 case WAIT_OBJECT_0:
90 /* get mutex object */
91 w32_event_debug("locked lock:%p\n", lock);
92 return 0;
93
94 case WAIT_OBJECT_0 + 1:
95 /* interrupt */
96 errno = EINTR;
97 w32_event_debug("interrupted lock:%p\n", lock);
98 return 0;
99
100 case WAIT_TIMEOUT:
101 w32_event_debug("timeout locK:%p\n", lock);
102 return EBUSY;
103
104 case WAIT_ABANDONED:
105 rb_bug("win32_mutex_lock: WAIT_ABANDONED");
106 break;
107
108 default:
109 rb_bug("win32_mutex_lock: unknown result (%ld)", result);
110 break;
111 }
112 }
113 return 0;
114}
115
116static HANDLE
117w32_mutex_create(void)
118{
119 HANDLE lock = CreateMutex(NULL, FALSE, NULL);
120 if (lock == NULL) {
121 w32_error("rb_native_mutex_initialize");
122 }
123 return lock;
124}
125
126#define GVL_DEBUG 0
127
128static void
129thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
130{
131 w32_mutex_lock(sched->lock, false);
132 if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
133}
134
135#define thread_sched_to_dead thread_sched_to_waiting
136
137static void
138thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
139{
140 ReleaseMutex(sched->lock);
141}
142
143static void
144thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
145{
146 thread_sched_to_waiting(sched, th);
147 native_thread_yield();
148 thread_sched_to_running(sched, th);
149}
150
151void
152rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
153{
154 if (GVL_DEBUG) fprintf(stderr, "sched init\n");
155 sched->lock = w32_mutex_create();
156}
157
158#if 0
159// per-ractor
160void
161rb_thread_sched_destroy(struct rb_thread_sched *sched)
162{
163 if (GVL_DEBUG) fprintf(stderr, "sched destroy\n");
164 CloseHandle(sched->lock);
165}
166#endif
167
169ruby_thread_from_native(void)
170{
171 return TlsGetValue(ruby_native_thread_key);
172}
173
174int
175ruby_thread_set_native(rb_thread_t *th)
176{
177 if (th && th->ec) {
178 rb_ractor_set_current_ec(th->ractor, th->ec);
179 }
180 return TlsSetValue(ruby_native_thread_key, th);
181}
182
183void
184Init_native_thread(rb_thread_t *main_th)
185{
186 if ((ruby_current_ec_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
187 rb_bug("TlsAlloc() for ruby_current_ec_key fails");
188 }
189 if ((ruby_native_thread_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
190 rb_bug("TlsAlloc() for ruby_native_thread_key fails");
191 }
192
193 // setup main thread
194
195 ruby_thread_set_native(main_th);
196 main_th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
197
198 DuplicateHandle(GetCurrentProcess(),
199 GetCurrentThread(),
200 GetCurrentProcess(),
201 &main_th->nt->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
202
203 RUBY_DEBUG_LOG("initial thread th:%u thid:%p, event: %p",
204 rb_th_serial(main_th),
205 main_th->nt->thread_id,
206 main_th->nt->interrupt_event);
207}
208
209void
210ruby_mn_threads_params(void)
211{
212}
213
214static int
215w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
216{
217 HANDLE *targets = events;
218 HANDLE intr;
219 const int initcount = count;
220 DWORD ret;
221
222 w32_event_debug("events:%p, count:%d, timeout:%ld, th:%u\n",
223 events, count, timeout, th ? rb_th_serial(th) : UINT_MAX);
224
225 if (th && (intr = th->nt->interrupt_event)) {
226 if (ResetEvent(intr) && (!RUBY_VM_INTERRUPTED(th->ec) || SetEvent(intr))) {
227 targets = ALLOCA_N(HANDLE, count + 1);
228 memcpy(targets, events, sizeof(HANDLE) * count);
229
230 targets[count++] = intr;
231 w32_event_debug("handle:%p (count:%d, intr)\n", intr, count);
232 }
233 else if (intr == th->nt->interrupt_event) {
234 w32_error("w32_wait_events");
235 }
236 }
237
238 w32_event_debug("WaitForMultipleObjects start count:%d\n", count);
239 ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
240 w32_event_debug("WaitForMultipleObjects end ret:%lu\n", ret);
241
242 if (ret == (DWORD)(WAIT_OBJECT_0 + initcount) && th) {
243 errno = EINTR;
244 }
245 if (ret == WAIT_FAILED && W32_EVENT_DEBUG) {
246 int i;
247 DWORD dmy;
248 for (i = 0; i < count; i++) {
249 w32_event_debug("i:%d %s\n", i, GetHandleInformation(targets[i], &dmy) ? "OK" : "NG");
250 }
251 }
252 return ret;
253}
254
255static void ubf_handle(void *ptr);
256#define ubf_select ubf_handle
257
258int
259rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
260{
261 return w32_wait_events(events, num, timeout, ruby_thread_from_native());
262}
263
264int
265rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
266{
267 int ret;
268 rb_thread_t *th = GET_THREAD();
269
270 BLOCKING_REGION(th, ret = rb_w32_wait_events_blocking(events, num, timeout),
271 ubf_handle, ruby_thread_from_native(), FALSE);
272 return ret;
273}
274
275static void
276w32_close_handle(HANDLE handle)
277{
278 if (CloseHandle(handle) == 0) {
279 w32_error("w32_close_handle");
280 }
281}
282
283static void
284w32_resume_thread(HANDLE handle)
285{
286 if (ResumeThread(handle) == (DWORD)-1) {
287 w32_error("w32_resume_thread");
288 }
289}
290
291#ifdef _MSC_VER
292#define HAVE__BEGINTHREADEX 1
293#else
294#undef HAVE__BEGINTHREADEX
295#endif
296
297#ifdef HAVE__BEGINTHREADEX
298#define start_thread (HANDLE)_beginthreadex
299#define thread_errno errno
300typedef unsigned long (__stdcall *w32_thread_start_func)(void*);
301#else
302#define start_thread CreateThread
303#define thread_errno rb_w32_map_errno(GetLastError())
304typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
305#endif
306
307static HANDLE
308w32_create_thread(DWORD stack_size, w32_thread_start_func func, void *val)
309{
310 return start_thread(0, stack_size, func, val, CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 0);
311}
312
313int
314rb_w32_sleep(unsigned long msec)
315{
316 return w32_wait_events(0, 0, msec, ruby_thread_from_native());
317}
318
319int WINAPI
320rb_w32_Sleep(unsigned long msec)
321{
322 int ret;
323 rb_thread_t *th = GET_THREAD();
324
325 BLOCKING_REGION(th, ret = rb_w32_sleep(msec),
326 ubf_handle, ruby_thread_from_native(), FALSE);
327 return ret;
328}
329
330static DWORD
331hrtime2msec(rb_hrtime_t hrt)
332{
333 return (DWORD)hrt / (DWORD)RB_HRTIME_PER_MSEC;
334}
335
336static void
337native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
338{
339 const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
340
341 THREAD_BLOCKING_BEGIN(th);
342 {
343 DWORD ret;
344
345 rb_native_mutex_lock(&th->interrupt_lock);
346 th->unblock.func = ubf_handle;
347 th->unblock.arg = th;
348 rb_native_mutex_unlock(&th->interrupt_lock);
349
350 if (RUBY_VM_INTERRUPTED(th->ec)) {
351 /* interrupted. return immediate */
352 }
353 else {
354 RUBY_DEBUG_LOG("start msec:%lu", msec);
355 ret = w32_wait_events(0, 0, msec, th);
356 RUBY_DEBUG_LOG("done ret:%lu", ret);
357 (void)ret;
358 }
359
360 rb_native_mutex_lock(&th->interrupt_lock);
361 th->unblock.func = 0;
362 th->unblock.arg = 0;
363 rb_native_mutex_unlock(&th->interrupt_lock);
364 }
365 THREAD_BLOCKING_END(th);
366}
367
368void
369rb_native_mutex_lock(rb_nativethread_lock_t *lock)
370{
371#ifdef USE_WIN32_MUTEX
372 w32_mutex_lock(lock->mutex, false);
373#else
374 EnterCriticalSection(&lock->crit);
375#endif
376}
377
378int
379rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
380{
381#ifdef USE_WIN32_MUTEX
382 return w32_mutex_lock(lock->mutex, true);
383#else
384 return TryEnterCriticalSection(&lock->crit) == 0 ? EBUSY : 0;
385#endif
386}
387
388void
389rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
390{
391#ifdef USE_WIN32_MUTEX
392 RUBY_DEBUG_LOG("lock:%p", lock->mutex);
393 ReleaseMutex(lock->mutex);
394#else
395 LeaveCriticalSection(&lock->crit);
396#endif
397}
398
399void
400rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
401{
402#ifdef USE_WIN32_MUTEX
403 lock->mutex = w32_mutex_create();
404 /* thread_debug("initialize mutex: %p\n", lock->mutex); */
405#else
406 InitializeCriticalSection(&lock->crit);
407#endif
408}
409
410void
411rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
412{
413#ifdef USE_WIN32_MUTEX
414 w32_close_handle(lock->mutex);
415#else
416 DeleteCriticalSection(&lock->crit);
417#endif
418}
419
420struct cond_event_entry {
421 struct cond_event_entry* next;
422 struct cond_event_entry* prev;
423 HANDLE event;
424};
425
426void
427rb_native_cond_signal(rb_nativethread_cond_t *cond)
428{
429 /* cond is guarded by mutex */
430 struct cond_event_entry *e = cond->next;
431 struct cond_event_entry *head = (struct cond_event_entry*)cond;
432
433 if (e != head) {
434 struct cond_event_entry *next = e->next;
435 struct cond_event_entry *prev = e->prev;
436
437 prev->next = next;
438 next->prev = prev;
439 e->next = e->prev = e;
440
441 SetEvent(e->event);
442 }
443}
444
445void
446rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
447{
448 /* cond is guarded by mutex */
449 struct cond_event_entry *e = cond->next;
450 struct cond_event_entry *head = (struct cond_event_entry*)cond;
451
452 while (e != head) {
453 struct cond_event_entry *next = e->next;
454 struct cond_event_entry *prev = e->prev;
455
456 SetEvent(e->event);
457
458 prev->next = next;
459 next->prev = prev;
460 e->next = e->prev = e;
461
462 e = next;
463 }
464}
465
466static int
467native_cond_timedwait_ms(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
468{
469 DWORD r;
470 struct cond_event_entry entry;
471 struct cond_event_entry *head = (struct cond_event_entry*)cond;
472
473 entry.event = CreateEvent(0, FALSE, FALSE, 0);
474
475 /* cond is guarded by mutex */
476 entry.next = head;
477 entry.prev = head->prev;
478 head->prev->next = &entry;
479 head->prev = &entry;
480
482 {
483 r = WaitForSingleObject(entry.event, msec);
484 if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
485 rb_bug("rb_native_cond_wait: WaitForSingleObject returns %lu", r);
486 }
487 }
489
490 entry.prev->next = entry.next;
491 entry.next->prev = entry.prev;
492
493 w32_close_handle(entry.event);
494 return (r == WAIT_OBJECT_0) ? 0 : ETIMEDOUT;
495}
496
497void
498rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
499{
500 native_cond_timedwait_ms(cond, mutex, INFINITE);
501}
502
503static unsigned long
504abs_timespec_to_timeout_ms(const struct timespec *ts)
505{
506 struct timeval tv;
507 struct timeval now;
508
509 gettimeofday(&now, NULL);
510 tv.tv_sec = ts->tv_sec;
511 tv.tv_usec = ts->tv_nsec / 1000;
512
513 if (!rb_w32_time_subtract(&tv, &now))
514 return 0;
515
516 return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
517}
518
519static int
520native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, const struct timespec *ts)
521{
522 unsigned long timeout_ms;
523
524 timeout_ms = abs_timespec_to_timeout_ms(ts);
525 if (!timeout_ms)
526 return ETIMEDOUT;
527
528 return native_cond_timedwait_ms(cond, mutex, timeout_ms);
529}
530
531static struct timespec native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel);
532
533void
534rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
535{
536 struct timespec rel = {
537 .tv_sec = msec / 1000,
538 .tv_nsec = (msec % 1000) * 1000 * 1000,
539 };
540 struct timespec ts = native_cond_timeout(cond, rel);
541 native_cond_timedwait(cond, mutex, &ts);
542}
543
544static struct timespec
545native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
546{
547 int ret;
548 struct timeval tv;
549 struct timespec timeout;
550 struct timespec now;
551
552 ret = gettimeofday(&tv, 0);
553 if (ret != 0)
554 rb_sys_fail(0);
555 now.tv_sec = tv.tv_sec;
556 now.tv_nsec = tv.tv_usec * 1000;
557
558 timeout.tv_sec = now.tv_sec;
559 timeout.tv_nsec = now.tv_nsec;
560 timeout.tv_sec += timeout_rel.tv_sec;
561 timeout.tv_nsec += timeout_rel.tv_nsec;
562
563 if (timeout.tv_nsec >= 1000*1000*1000) {
564 timeout.tv_sec++;
565 timeout.tv_nsec -= 1000*1000*1000;
566 }
567
568 if (timeout.tv_sec < now.tv_sec)
569 timeout.tv_sec = TIMET_MAX;
570
571 return timeout;
572}
573
574void
575rb_native_cond_initialize(rb_nativethread_cond_t *cond)
576{
577 cond->next = (struct cond_event_entry *)cond;
578 cond->prev = (struct cond_event_entry *)cond;
579}
580
581void
582rb_native_cond_destroy(rb_nativethread_cond_t *cond)
583{
584 /* */
585}
586
587
588#define CHECK_ERR(expr) \
589 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
590
591COMPILER_WARNING_PUSH
592#if __has_warning("-Wmaybe-uninitialized")
593COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
594#endif
595static inline SIZE_T
596query_memory_basic_info(PMEMORY_BASIC_INFORMATION mi, void *local_in_parent_frame)
597{
598 return VirtualQuery(asan_get_real_stack_addr(local_in_parent_frame), mi, sizeof(*mi));
599}
600COMPILER_WARNING_POP
601
602static void
603native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
604{
605 MEMORY_BASIC_INFORMATION mi;
606 char *base, *end;
607 DWORD size, space;
608
609 CHECK_ERR(query_memory_basic_info(&mi, local_in_parent_frame));
610 base = mi.AllocationBase;
611 end = mi.BaseAddress;
612 end += mi.RegionSize;
613 size = end - base;
614 space = size / 5;
615 if (space > 1024*1024) space = 1024*1024;
616 th->ec->machine.stack_start = (VALUE *)end - 1;
617 th->ec->machine.stack_maxsize = size - space;
618}
619
620#ifndef InterlockedExchangePointer
621#define InterlockedExchangePointer(t, v) \
622 (void *)InterlockedExchange((long *)(t), (long)(v))
623#endif
624static void
625native_thread_destroy(struct rb_native_thread *nt)
626{
627 if (nt) {
628 HANDLE intr = InterlockedExchangePointer(&nt->interrupt_event, 0);
629 RUBY_DEBUG_LOG("close handle intr:%p, thid:%p\n", intr, nt->thread_id);
630 w32_close_handle(intr);
631 }
632}
633
634static unsigned long __stdcall
635thread_start_func_1(void *th_ptr)
636{
637 rb_thread_t *th = th_ptr;
638 volatile HANDLE thread_id = th->nt->thread_id;
639
640 native_thread_init_stack(th, &th);
641 th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
642
643 /* run */
644 RUBY_DEBUG_LOG("thread created th:%u, thid: %p, event: %p",
645 rb_th_serial(th), th->nt->thread_id, th->nt->interrupt_event);
646
647 thread_sched_to_running(TH_SCHED(th), th);
648 ruby_thread_set_native(th);
649
650 // kick threads
651 thread_start_func_2(th, th->ec->machine.stack_start);
652
653 w32_close_handle(thread_id);
654 RUBY_DEBUG_LOG("thread deleted th:%u", rb_th_serial(th));
655
656 return 0;
657}
658
659static int
660native_thread_create(rb_thread_t *th)
661{
662 // setup nt
663 const size_t stack_size = th->vm->default_params.thread_machine_stack_size;
664 th->nt = ZALLOC(struct rb_native_thread);
665 th->nt->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
666
667 // setup vm stack
668 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
669 void *vm_stack = ruby_xmalloc(vm_stack_word_size * sizeof(VALUE));
670 th->sched.vm_stack = vm_stack;
671 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
672
673 if ((th->nt->thread_id) == 0) {
674 return thread_errno;
675 }
676
677 w32_resume_thread(th->nt->thread_id);
678
679 if (USE_RUBY_DEBUG_LOG) {
680 Sleep(0);
681 RUBY_DEBUG_LOG("th:%u thid:%p intr:%p), stack size: %"PRIuSIZE"",
682 rb_th_serial(th), th->nt->thread_id,
683 th->nt->interrupt_event, stack_size);
684 }
685 return 0;
686}
687
688static void
689native_thread_join(HANDLE th)
690{
691 w32_wait_events(&th, 1, INFINITE, 0);
692}
693
694#if USE_NATIVE_THREAD_PRIORITY
695
696static void
697native_thread_apply_priority(rb_thread_t *th)
698{
699 int priority = th->priority;
700 if (th->priority > 0) {
701 priority = THREAD_PRIORITY_ABOVE_NORMAL;
702 }
703 else if (th->priority < 0) {
704 priority = THREAD_PRIORITY_BELOW_NORMAL;
705 }
706 else {
707 priority = THREAD_PRIORITY_NORMAL;
708 }
709
710 SetThreadPriority(th->nt->thread_id, priority);
711}
712
713#endif /* USE_NATIVE_THREAD_PRIORITY */
714
715int rb_w32_select_with_thread(int, fd_set *, fd_set *, fd_set *, struct timeval *, void *); /* @internal */
716
717static int
718native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
719{
720 fd_set *r = NULL, *w = NULL, *e = NULL;
721 if (readfds) {
722 rb_fd_resize(n - 1, readfds);
723 r = rb_fd_ptr(readfds);
724 }
725 if (writefds) {
726 rb_fd_resize(n - 1, writefds);
727 w = rb_fd_ptr(writefds);
728 }
729 if (exceptfds) {
730 rb_fd_resize(n - 1, exceptfds);
731 e = rb_fd_ptr(exceptfds);
732 }
733 return rb_w32_select_with_thread(n, r, w, e, timeout, th);
734}
735
736/* @internal */
737int
738rb_w32_check_interrupt(rb_thread_t *th)
739{
740 return w32_wait_events(0, 0, 0, th);
741}
742
743static void
744ubf_handle(void *ptr)
745{
746 rb_thread_t *th = (rb_thread_t *)ptr;
747 RUBY_DEBUG_LOG("th:%u\n", rb_th_serial(th));
748
749 if (!SetEvent(th->nt->interrupt_event)) {
750 w32_error("ubf_handle");
751 }
752}
753
754int rb_w32_set_thread_description(HANDLE th, const WCHAR *name);
755int rb_w32_set_thread_description_str(HANDLE th, VALUE name);
756#define native_set_another_thread_name rb_w32_set_thread_description_str
757
758static struct {
759 HANDLE id;
760 HANDLE lock;
761} timer_thread;
762#define TIMER_THREAD_CREATED_P() (timer_thread.id != 0)
763
764static unsigned long __stdcall
765timer_thread_func(void *dummy)
766{
767 rb_vm_t *vm = GET_VM();
768 RUBY_DEBUG_LOG("start");
769 rb_w32_set_thread_description(GetCurrentThread(), L"ruby-timer-thread");
770 while (WaitForSingleObject(timer_thread.lock,
771 TIME_QUANTUM_USEC/1000) == WAIT_TIMEOUT) {
772 vm->clock++;
773 rb_threadptr_check_signal(vm->ractor.main_thread);
774 }
775 RUBY_DEBUG_LOG("end");
776 return 0;
777}
778
779void
780rb_thread_wakeup_timer_thread(int sig)
781{
782 /* do nothing */
783}
784
785static void
786rb_thread_create_timer_thread(void)
787{
788 if (timer_thread.id == 0) {
789 if (!timer_thread.lock) {
790 timer_thread.lock = CreateEvent(0, TRUE, FALSE, 0);
791 }
792 timer_thread.id = w32_create_thread(1024 + (USE_RUBY_DEBUG_LOG ? BUFSIZ : 0),
793 timer_thread_func, 0);
794 w32_resume_thread(timer_thread.id);
795 }
796}
797
798static int
799native_stop_timer_thread(void)
800{
801 int stopped = --system_working <= 0;
802 if (stopped) {
803 SetEvent(timer_thread.lock);
804 native_thread_join(timer_thread.id);
805 CloseHandle(timer_thread.lock);
806 timer_thread.lock = 0;
807 }
808 return stopped;
809}
810
811static void
812native_reset_timer_thread(void)
813{
814 if (timer_thread.id) {
815 CloseHandle(timer_thread.id);
816 timer_thread.id = 0;
817 }
818}
819
820int
821ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
822{
823 return rb_ec_raised_p(th->ec, RAISED_STACKOVERFLOW);
824}
825
826#if defined(__MINGW32__)
827LONG WINAPI
828rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *exception)
829{
830 if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
831 rb_ec_raised_set(GET_EC(), RAISED_STACKOVERFLOW);
832 raise(SIGSEGV);
833 }
834 return EXCEPTION_CONTINUE_SEARCH;
835}
836#endif
837
838#ifdef RUBY_ALLOCA_CHKSTK
839void
840ruby_alloca_chkstk(size_t len, void *sp)
841{
842 if (ruby_stack_length(NULL) * sizeof(VALUE) >= len) {
843 rb_execution_context_t *ec = GET_EC();
844 if (!rb_ec_raised_p(ec, RAISED_STACKOVERFLOW)) {
845 rb_ec_raised_set(ec, RAISED_STACKOVERFLOW);
846 rb_exc_raise(sysstack_error);
847 }
848 }
849}
850#endif
851int
852rb_reserved_fd_p(int fd)
853{
854 return 0;
855}
856
857rb_nativethread_id_t
859{
860 return GetCurrentThread();
861}
862
863static void
864native_set_thread_name(rb_thread_t *th)
865{
866}
867
868static VALUE
869native_thread_native_thread_id(rb_thread_t *th)
870{
871 DWORD tid = GetThreadId(th->nt->thread_id);
872 if (tid == 0) rb_sys_fail("GetThreadId");
873 return ULONG2NUM(tid);
874}
875#define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
876
877void
878rb_add_running_thread(rb_thread_t *th){
879 // do nothing
880}
881
882void
883rb_del_running_thread(rb_thread_t *th)
884{
885 // do nothing
886}
887
888static bool
889th_has_dedicated_nt(const rb_thread_t *th)
890{
891 return true;
892}
893
894void
895rb_threadptr_sched_free(rb_thread_t *th)
896{
897 native_thread_destroy(th->nt);
898 ruby_xfree(th->nt);
899 ruby_xfree(th->sched.vm_stack);
900}
901
902void
903rb_threadptr_remove(rb_thread_t *th)
904{
905 // do nothing
906}
907
908void
909rb_thread_sched_mark_zombies(rb_vm_t *vm)
910{
911 // do nothing
912}
913
914static bool
915vm_barrier_finish_p(rb_vm_t *vm)
916{
917 RUBY_DEBUG_LOG("cnt:%u living:%u blocking:%u",
918 vm->ractor.blocking_cnt == vm->ractor.cnt,
919 vm->ractor.sync.barrier_cnt,
920 vm->ractor.cnt,
921 vm->ractor.blocking_cnt);
922
923 VM_ASSERT(vm->ractor.blocking_cnt <= vm->ractor.cnt);
924 return vm->ractor.blocking_cnt == vm->ractor.cnt;
925}
926
927void
928rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
929{
930 vm->ractor.sync.barrier_waiting = true;
931
932 RUBY_DEBUG_LOG("barrier start. cnt:%u living:%u blocking:%u",
933 vm->ractor.sync.barrier_cnt,
934 vm->ractor.cnt,
935 vm->ractor.blocking_cnt);
936
937 rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
938
939 // send signal
940 rb_ractor_t *r = 0;
941 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
942 if (r != cr) {
943 rb_ractor_vm_barrier_interrupt_running_thread(r);
944 }
945 }
946
947 // wait
948 while (!vm_barrier_finish_p(vm)) {
949 rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_cond);
950 }
951
952 RUBY_DEBUG_LOG("cnt:%u barrier success", vm->ractor.sync.barrier_cnt);
953
954 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
955
956 vm->ractor.sync.barrier_waiting = false;
957 vm->ractor.sync.barrier_cnt++;
958
959 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
960 rb_native_cond_signal(&r->barrier_wait_cond);
961 }
962}
963
964void
965rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
966{
967 vm->ractor.sync.lock_owner = cr;
968 unsigned int barrier_cnt = vm->ractor.sync.barrier_cnt;
969 rb_thread_t *th = GET_THREAD();
970 bool running;
971
972 RB_VM_SAVE_MACHINE_CONTEXT(th);
973
974 if (rb_ractor_status_p(cr, ractor_running)) {
975 rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
976 running = true;
977 }
978 else {
979 running = false;
980 }
981 VM_ASSERT(rb_ractor_status_p(cr, ractor_blocking));
982
983 if (vm_barrier_finish_p(vm)) {
984 RUBY_DEBUG_LOG("wakeup barrier owner");
985 rb_native_cond_signal(&vm->ractor.sync.barrier_cond);
986 }
987 else {
988 RUBY_DEBUG_LOG("wait for barrier finish");
989 }
990
991 // wait for restart
992 while (barrier_cnt == vm->ractor.sync.barrier_cnt) {
993 vm->ractor.sync.lock_owner = NULL;
994 rb_native_cond_wait(&cr->barrier_wait_cond, &vm->ractor.sync.lock);
995 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
996 vm->ractor.sync.lock_owner = cr;
997 }
998
999 RUBY_DEBUG_LOG("barrier is released. Acquire vm_lock");
1000
1001 if (running) {
1002 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
1003 }
1004
1005 vm->ractor.sync.lock_owner = NULL;
1006}
1007
1008bool
1010{
1011 return false;
1012}
1013
1014void *
1015rb_thread_prevent_fork(void *(*func)(void *), void *data)
1016{
1017 return func(data);
1018}
1019
1020#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition long.h:60
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:402
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2148
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
int len
Length of the buffer.
Definition io.h:8
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
bool rb_thread_lock_native_thread(void)
Declare the current Ruby thread should acquire a dedicated native thread on M:N thread scheduler.
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RBIMPL_ATTR_NORETURN()
Wraps (or simulates) [[noreturn]]
Definition noreturn.h:38
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40