Ruby 4.1.0dev (2026-01-08 revision 080d66beca71d6cc290a8be4acd49e5a70594f9c)
thread_sync.c (080d66beca71d6cc290a8be4acd49e5a70594f9c)
1/* included by thread.c */
2#include "ccan/list/list.h"
3#include "builtin.h"
4
5static VALUE rb_cMutex, rb_eClosedQueueError;
6
7/* Mutex */
8typedef struct rb_mutex_struct {
9 rb_serial_t ec_serial;
10 rb_thread_t *th; // even if the fiber is collected, we might need access to the thread in mutex_free
11 struct rb_mutex_struct *next_mutex;
12 struct ccan_list_head waitq; /* protected by GVL */
14
15/* sync_waiter is always on-stack */
17 VALUE self;
18 rb_thread_t *th;
19 rb_fiber_t *fiber;
20 struct ccan_list_node node;
21};
22
23static inline rb_fiber_t*
24nonblocking_fiber(rb_fiber_t *fiber)
25{
26 if (rb_fiberptr_blocking(fiber)) {
27 return NULL;
28 }
29
30 return fiber;
31}
32
34 VALUE self;
35 VALUE timeout;
36 rb_hrtime_t end;
37};
38
39#define MUTEX_ALLOW_TRAP FL_USER1
40
41static void
42sync_wakeup(struct ccan_list_head *head, long max)
43{
44 RUBY_DEBUG_LOG("max:%ld", max);
45
46 struct sync_waiter *cur = 0, *next;
47
48 ccan_list_for_each_safe(head, cur, next, node) {
49 ccan_list_del_init(&cur->node);
50
51 if (cur->th->status != THREAD_KILLED) {
52 if (cur->th->scheduler != Qnil && cur->fiber) {
53 rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
54 }
55 else {
56 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(cur->th));
57 rb_threadptr_interrupt(cur->th);
58 cur->th->status = THREAD_RUNNABLE;
59 }
60
61 if (--max == 0) return;
62 }
63 }
64}
65
66static void
67wakeup_one(struct ccan_list_head *head)
68{
69 sync_wakeup(head, 1);
70}
71
72static void
73wakeup_all(struct ccan_list_head *head)
74{
75 sync_wakeup(head, LONG_MAX);
76}
77
78#if defined(HAVE_WORKING_FORK)
79static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
80static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
81static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
82#endif
83static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_serial_t ec_serial);
84
85static size_t
86rb_mutex_num_waiting(rb_mutex_t *mutex)
87{
88 struct sync_waiter *w = 0;
89 size_t n = 0;
90
91 ccan_list_for_each(&mutex->waitq, w, node) {
92 n++;
93 }
94
95 return n;
96}
97
98rb_thread_t* rb_fiber_threadptr(const rb_fiber_t *fiber);
99
100static bool
101mutex_locked_p(rb_mutex_t *mutex)
102{
103 return mutex->ec_serial != 0;
104}
105
106static void
107mutex_free(void *ptr)
108{
109 rb_mutex_t *mutex = ptr;
110 if (mutex_locked_p(mutex)) {
111 const char *err = rb_mutex_unlock_th(mutex, mutex->th, 0);
112 if (err) rb_bug("%s", err);
113 }
114 ruby_xfree(ptr);
115}
116
117static size_t
118mutex_memsize(const void *ptr)
119{
120 return sizeof(rb_mutex_t);
121}
122
123static const rb_data_type_t mutex_data_type = {
124 "mutex",
125 {NULL, mutex_free, mutex_memsize,},
127};
128
129static rb_mutex_t *
130mutex_ptr(VALUE obj)
131{
132 rb_mutex_t *mutex;
133
134 TypedData_Get_Struct(obj, rb_mutex_t, &mutex_data_type, mutex);
135
136 return mutex;
137}
138
139VALUE
140rb_obj_is_mutex(VALUE obj)
141{
142 return RBOOL(rb_typeddata_is_kind_of(obj, &mutex_data_type));
143}
144
145static VALUE
146mutex_alloc(VALUE klass)
147{
148 VALUE obj;
149 rb_mutex_t *mutex;
150
151 obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
152
153 ccan_list_head_init(&mutex->waitq);
154 return obj;
155}
156
157VALUE
159{
160 return mutex_alloc(rb_cMutex);
161}
162
163VALUE
165{
166 rb_mutex_t *mutex = mutex_ptr(self);
167
168 return RBOOL(mutex_locked_p(mutex));
169}
170
171static void
172thread_mutex_insert(rb_thread_t *thread, rb_mutex_t *mutex)
173{
174 RUBY_ASSERT(!mutex->next_mutex);
175 if (thread->keeping_mutexes) {
176 mutex->next_mutex = thread->keeping_mutexes;
177 }
178
179 thread->keeping_mutexes = mutex;
180}
181
182static void
183thread_mutex_remove(rb_thread_t *thread, rb_mutex_t *mutex)
184{
185 rb_mutex_t **keeping_mutexes = &thread->keeping_mutexes;
186
187 while (*keeping_mutexes && *keeping_mutexes != mutex) {
188 // Move to the next mutex in the list:
189 keeping_mutexes = &(*keeping_mutexes)->next_mutex;
190 }
191
192 if (*keeping_mutexes) {
193 *keeping_mutexes = mutex->next_mutex;
194 mutex->next_mutex = NULL;
195 }
196}
197
198static void
199mutex_set_owner(rb_mutex_t *mutex, rb_thread_t *th, rb_serial_t ec_serial)
200{
201 mutex->th = th;
202 mutex->ec_serial = ec_serial;
203}
204
205static void
206mutex_locked(rb_mutex_t *mutex, rb_thread_t *th, rb_serial_t ec_serial)
207{
208 mutex_set_owner(mutex, th, ec_serial);
209 thread_mutex_insert(th, mutex);
210}
211
212static inline bool
213do_mutex_trylock(rb_mutex_t *mutex, rb_thread_t *th, rb_serial_t ec_serial)
214{
215 if (mutex->ec_serial == 0) {
216 RUBY_DEBUG_LOG("%p ok", mutex);
217
218 mutex_locked(mutex, th, ec_serial);
219 return true;
220 }
221 else {
222 RUBY_DEBUG_LOG("%p ng", mutex);
223 return false;
224 }
225}
226
227static VALUE
228rb_mut_trylock(rb_execution_context_t *ec, VALUE self)
229{
230 return RBOOL(do_mutex_trylock(mutex_ptr(self), ec->thread_ptr, rb_ec_serial(ec)));
231}
232
233VALUE
235{
236 return rb_mut_trylock(GET_EC(), self);
237}
238
239static VALUE
240mutex_owned_p(rb_serial_t ec_serial, rb_mutex_t *mutex)
241{
242 return RBOOL(mutex->ec_serial == ec_serial);
243}
244
245static VALUE
246call_rb_fiber_scheduler_block(VALUE mutex)
247{
249}
250
251static VALUE
252delete_from_waitq(VALUE value)
253{
254 struct sync_waiter *sync_waiter = (void *)value;
255 ccan_list_del(&sync_waiter->node);
256
257 return Qnil;
258}
259
260static inline rb_atomic_t threadptr_get_interrupts(rb_thread_t *th);
261
263 VALUE self;
264 rb_mutex_t *mutex;
266};
267
268static inline void
269mutex_args_init(struct mutex_args *args, VALUE mutex)
270{
271 args->self = mutex;
272 args->mutex = mutex_ptr(mutex);
273 args->ec = GET_EC();
274}
275
276static VALUE
277do_mutex_lock(struct mutex_args *args, int interruptible_p)
278{
279 VALUE self = args->self;
280 rb_execution_context_t *ec = args->ec;
281 rb_thread_t *th = ec->thread_ptr;
282 rb_fiber_t *fiber = ec->fiber_ptr;
283 rb_serial_t ec_serial = rb_ec_serial(ec);
284 rb_mutex_t *mutex = args->mutex;
285 rb_atomic_t saved_ints = 0;
286
287 /* When running trap handler */
288 if (!FL_TEST_RAW(self, MUTEX_ALLOW_TRAP) &&
289 th->ec->interrupt_mask & TRAP_INTERRUPT_MASK) {
290 rb_raise(rb_eThreadError, "can't be called from trap context");
291 }
292
293 if (!do_mutex_trylock(mutex, th, ec_serial)) {
294 if (mutex->ec_serial == ec_serial) {
295 rb_raise(rb_eThreadError, "deadlock; recursive locking");
296 }
297
298 while (mutex->ec_serial != ec_serial) {
299 VM_ASSERT(mutex->ec_serial != 0);
300
301 VALUE scheduler = rb_fiber_scheduler_current();
302 if (scheduler != Qnil) {
303 struct sync_waiter sync_waiter = {
304 .self = self,
305 .th = th,
306 .fiber = nonblocking_fiber(fiber)
307 };
308
309 ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
310
311 rb_ensure(call_rb_fiber_scheduler_block, self, delete_from_waitq, (VALUE)&sync_waiter);
312
313 if (!mutex->ec_serial) {
314 mutex_set_owner(mutex, th, ec_serial);
315 }
316 }
317 else {
318 if (!th->vm->thread_ignore_deadlock && mutex->th == th) {
319 rb_raise(rb_eThreadError, "deadlock; lock already owned by another fiber belonging to the same thread");
320 }
321
322 struct sync_waiter sync_waiter = {
323 .self = self,
324 .th = th,
325 .fiber = nonblocking_fiber(fiber),
326 };
327
328 RUBY_DEBUG_LOG("%p wait", mutex);
329
330 // similar code with `sleep_forever`, but
331 // sleep_forever(SLEEP_DEADLOCKABLE) raises an exception.
332 // Ensure clause is needed like but `rb_ensure` a bit slow.
333 //
334 // begin
335 // sleep_forever(th, SLEEP_DEADLOCKABLE);
336 // ensure
337 // ccan_list_del(&sync_waiter.node);
338 // end
339 enum rb_thread_status prev_status = th->status;
340 th->status = THREAD_STOPPED_FOREVER;
341 rb_ractor_sleeper_threads_inc(th->ractor);
342 rb_check_deadlock(th->ractor);
343
344 RUBY_ASSERT(!th->locking_mutex);
345 th->locking_mutex = self;
346
347 ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
348 {
349 native_sleep(th, NULL);
350 }
351 ccan_list_del(&sync_waiter.node);
352
353 // unlocked by another thread while sleeping
354 if (!mutex->ec_serial) {
355 mutex_set_owner(mutex, th, ec_serial);
356 }
357
358 rb_ractor_sleeper_threads_dec(th->ractor);
359 th->status = prev_status;
360 th->locking_mutex = Qfalse;
361
362 RUBY_DEBUG_LOG("%p wakeup", mutex);
363 }
364
365 if (interruptible_p) {
366 /* release mutex before checking for interrupts...as interrupt checking
367 * code might call rb_raise() */
368 if (mutex->ec_serial == ec_serial) {
369 mutex->th = NULL;
370 mutex->ec_serial = 0;
371 }
372 RUBY_VM_CHECK_INTS_BLOCKING(th->ec); /* may release mutex */
373 if (!mutex->ec_serial) {
374 mutex_set_owner(mutex, th, ec_serial);
375 }
376 }
377 else {
378 // clear interrupt information
379 if (RUBY_VM_INTERRUPTED(th->ec)) {
380 // reset interrupts
381 if (saved_ints == 0) {
382 saved_ints = threadptr_get_interrupts(th);
383 }
384 else {
385 // ignore additional interrupts
386 threadptr_get_interrupts(th);
387 }
388 }
389 }
390 }
391
392 if (saved_ints) th->ec->interrupt_flag = saved_ints;
393 if (mutex->ec_serial == ec_serial) mutex_locked(mutex, th, ec_serial);
394 }
395
396 RUBY_DEBUG_LOG("%p locked", mutex);
397
398 // assertion
399 if (mutex_owned_p(ec_serial, mutex) == Qfalse) rb_bug("do_mutex_lock: mutex is not owned.");
400
401 return self;
402}
403
404static VALUE
405mutex_lock_uninterruptible(VALUE self)
406{
407 struct mutex_args args;
408 mutex_args_init(&args, self);
409 return do_mutex_lock(&args, 0);
410}
411
412static VALUE
413rb_mut_lock(rb_execution_context_t *ec, VALUE self)
414{
415 struct mutex_args args = {
416 .self = self,
417 .mutex = mutex_ptr(self),
418 .ec = ec,
419 };
420 return do_mutex_lock(&args, 1);
421}
422
423VALUE
425{
426 struct mutex_args args;
427 mutex_args_init(&args, self);
428 return do_mutex_lock(&args, 1);
429}
430
431static VALUE
432rb_mut_owned_p(rb_execution_context_t *ec, VALUE self)
433{
434 return mutex_owned_p(rb_ec_serial(ec), mutex_ptr(self));
435}
436
437VALUE
438rb_mutex_owned_p(VALUE self)
439{
440 return rb_mut_owned_p(GET_EC(), self);
441}
442
443static const char *
444rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_serial_t ec_serial)
445{
446 RUBY_DEBUG_LOG("%p", mutex);
447
448 if (mutex->ec_serial == 0) {
449 return "Attempt to unlock a mutex which is not locked";
450 }
451 else if (ec_serial && mutex->ec_serial != ec_serial) {
452 return "Attempt to unlock a mutex which is locked by another thread/fiber";
453 }
454
455 struct sync_waiter *cur = 0, *next;
456
457 mutex->ec_serial = 0;
458 thread_mutex_remove(th, mutex);
459
460 ccan_list_for_each_safe(&mutex->waitq, cur, next, node) {
461 ccan_list_del_init(&cur->node);
462
463 if (cur->th->scheduler != Qnil && cur->fiber) {
464 rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
465 return NULL;
466 }
467 else {
468 switch (cur->th->status) {
469 case THREAD_RUNNABLE: /* from someone else calling Thread#run */
470 case THREAD_STOPPED_FOREVER: /* likely (rb_mutex_lock) */
471 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(cur->th));
472 rb_threadptr_interrupt(cur->th);
473 return NULL;
474 case THREAD_STOPPED: /* probably impossible */
475 rb_bug("unexpected THREAD_STOPPED");
476 case THREAD_KILLED:
477 /* not sure about this, possible in exit GC? */
478 rb_bug("unexpected THREAD_KILLED");
479 continue;
480 }
481 }
482 }
483
484 // We did not find any threads to wake up, so we can just return with no error:
485 return NULL;
486}
487
488static void
489do_mutex_unlock(struct mutex_args *args)
490{
491 const char *err;
492 rb_mutex_t *mutex = args->mutex;
493 rb_thread_t *th = rb_ec_thread_ptr(args->ec);
494
495 err = rb_mutex_unlock_th(mutex, th, rb_ec_serial(args->ec));
496 if (err) rb_raise(rb_eThreadError, "%s", err);
497}
498
499static VALUE
500do_mutex_unlock_safe(VALUE args)
501{
502 do_mutex_unlock((struct mutex_args *)args);
503 return Qnil;
504}
505
506/*
507 * call-seq:
508 * mutex.unlock -> self
509 *
510 * Releases the lock.
511 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
512 */
513VALUE
515{
516 struct mutex_args args;
517 mutex_args_init(&args, self);
518 do_mutex_unlock(&args);
519 return self;
520}
521
522static VALUE
523rb_mut_unlock(rb_execution_context_t *ec, VALUE self)
524{
525 struct mutex_args args = {
526 .self = self,
527 .mutex = mutex_ptr(self),
528 .ec = ec,
529 };
530 do_mutex_unlock(&args);
531 return self;
532}
533
534#if defined(HAVE_WORKING_FORK)
535static void
536rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
537{
538 rb_mutex_abandon_all(th->keeping_mutexes);
539 th->keeping_mutexes = NULL;
540}
541
542static void
543rb_mutex_abandon_locking_mutex(rb_thread_t *th)
544{
545 if (th->locking_mutex) {
546 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
547
548 ccan_list_head_init(&mutex->waitq);
549 th->locking_mutex = Qfalse;
550 }
551}
552
553static void
554rb_mutex_abandon_all(rb_mutex_t *mutexes)
555{
556 rb_mutex_t *mutex;
557
558 while (mutexes) {
559 mutex = mutexes;
560 mutexes = mutex->next_mutex;
561 mutex->ec_serial = 0;
562 mutex->next_mutex = 0;
563 ccan_list_head_init(&mutex->waitq);
564 }
565}
566#endif
567
569 VALUE self;
570 VALUE timeout;
571};
572
573static VALUE
574mutex_sleep_begin(VALUE _arguments)
575{
576 struct rb_mutex_sleep_arguments *arguments = (struct rb_mutex_sleep_arguments *)_arguments;
577 VALUE timeout = arguments->timeout;
578 VALUE woken = Qtrue;
579
580 VALUE scheduler = rb_fiber_scheduler_current();
581 if (scheduler != Qnil) {
582 rb_fiber_scheduler_kernel_sleep(scheduler, timeout);
583 }
584 else {
585 if (NIL_P(timeout)) {
586 rb_thread_sleep_deadly_allow_spurious_wakeup(arguments->self, Qnil, 0);
587 }
588 else {
589 struct timeval timeout_value = rb_time_interval(timeout);
590 rb_hrtime_t relative_timeout = rb_timeval2hrtime(&timeout_value);
591 /* permit spurious check */
592 woken = RBOOL(sleep_hrtime(GET_THREAD(), relative_timeout, 0));
593 }
594 }
595
596 return woken;
597}
598
599static VALUE
600rb_mut_sleep(rb_execution_context_t *ec, VALUE self, VALUE timeout)
601{
602 if (!NIL_P(timeout)) {
603 // Validate the argument:
604 rb_time_interval(timeout);
605 }
606
607 rb_mut_unlock(ec, self);
608 time_t beg = time(0);
609
610 struct rb_mutex_sleep_arguments arguments = {
611 .self = self,
612 .timeout = timeout,
613 };
614
615 VALUE woken = rb_ec_ensure(ec, mutex_sleep_begin, (VALUE)&arguments, mutex_lock_uninterruptible, self);
616
617 RUBY_VM_CHECK_INTS_BLOCKING(ec);
618 if (!woken) return Qnil;
619 time_t end = time(0) - beg;
620 return TIMET2NUM(end);
621}
622
623VALUE
625{
626 return rb_mut_sleep(GET_EC(), self, timeout);
627}
628
629VALUE
631{
632 struct mutex_args args;
633 mutex_args_init(&args, self);
634 do_mutex_lock(&args, 1);
635 return rb_ec_ensure(args.ec, func, arg, do_mutex_unlock_safe, (VALUE)&args);
636}
637
638static VALUE
639do_ec_yield(VALUE _ec)
640{
641 return rb_ec_yield((rb_execution_context_t *)_ec, Qundef);
642}
643
644VALUE
645rb_mut_synchronize(rb_execution_context_t *ec, VALUE self)
646{
647 struct mutex_args args = {
648 .self = self,
649 .mutex = mutex_ptr(self),
650 .ec = ec,
651 };
652 do_mutex_lock(&args, 1);
653 return rb_ec_ensure(args.ec, do_ec_yield, (VALUE)ec, do_mutex_unlock_safe, (VALUE)&args);
654}
655
656void
657rb_mutex_allow_trap(VALUE self, int val)
658{
659 Check_TypedStruct(self, &mutex_data_type);
660
661 if (val)
662 FL_SET_RAW(self, MUTEX_ALLOW_TRAP);
663 else
664 FL_UNSET_RAW(self, MUTEX_ALLOW_TRAP);
665}
666
667/* Queue */
668
669struct rb_queue {
670 struct ccan_list_head waitq;
671 rb_serial_t fork_gen;
672 long capa;
673 long len;
674 long offset;
675 VALUE *buffer;
676 int num_waiting;
677};
678
679#define szqueue_waitq(sq) &sq->q.waitq
680#define szqueue_pushq(sq) &sq->pushq
681
683 struct rb_queue q;
684 int num_waiting_push;
685 struct ccan_list_head pushq;
686 long max;
687};
688
689static void
690queue_mark_and_move(void *ptr)
691{
692 struct rb_queue *q = ptr;
693 /* no need to mark threads in waitq, they are on stack */
694 for (long index = 0; index < q->len; index++) {
695 rb_gc_mark_and_move(&q->buffer[((q->offset + index) % q->capa)]);
696 }
697}
698
699static void
700queue_free(void *ptr)
701{
702 struct rb_queue *q = ptr;
703 if (q->buffer) {
704 ruby_sized_xfree(q->buffer, q->capa * sizeof(VALUE));
705 }
706}
707
708static size_t
709queue_memsize(const void *ptr)
710{
711 const struct rb_queue *q = ptr;
712 return sizeof(struct rb_queue) + (q->capa * sizeof(VALUE));
713}
714
715static const rb_data_type_t queue_data_type = {
716 .wrap_struct_name = "Thread::Queue",
717 .function = {
718 .dmark = queue_mark_and_move,
719 .dfree = queue_free,
720 .dsize = queue_memsize,
721 .dcompact = queue_mark_and_move,
722 },
723 .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
724};
725
726static VALUE
727queue_alloc(VALUE klass)
728{
729 VALUE obj;
730 struct rb_queue *q;
731
732 obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
733 ccan_list_head_init(&q->waitq);
734 return obj;
735}
736
737static inline bool
738queue_fork_check(struct rb_queue *q)
739{
740 rb_serial_t fork_gen = GET_VM()->fork_gen;
741
742 if (RB_LIKELY(q->fork_gen == fork_gen)) {
743 return false;
744 }
745 /* forked children can't reach into parent thread stacks */
746 q->fork_gen = fork_gen;
747 ccan_list_head_init(&q->waitq);
748 q->num_waiting = 0;
749 return true;
750}
751
752static inline struct rb_queue *
753raw_queue_ptr(VALUE obj)
754{
755 struct rb_queue *q;
756
757 TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
758 queue_fork_check(q);
759
760 return q;
761}
762
763static inline void
764check_queue(VALUE obj, struct rb_queue *q)
765{
766 if (RB_UNLIKELY(q->buffer == NULL)) {
767 rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj);
768 }
769}
770
771static inline struct rb_queue *
772queue_ptr(VALUE obj)
773{
774 struct rb_queue *q = raw_queue_ptr(obj);
775 check_queue(obj, q);
776 return q;
777}
778
779#define QUEUE_CLOSED FL_USER5
780
781static rb_hrtime_t
782queue_timeout2hrtime(VALUE timeout)
783{
784 if (NIL_P(timeout)) {
785 return (rb_hrtime_t)0;
786 }
787 rb_hrtime_t rel = 0;
788 if (FIXNUM_P(timeout)) {
789 rel = rb_sec2hrtime(NUM2TIMET(timeout));
790 }
791 else {
792 double2hrtime(&rel, rb_num2dbl(timeout));
793 }
794 return rb_hrtime_add(rel, rb_hrtime_now());
795}
796
797static void
798szqueue_mark_and_move(void *ptr)
799{
800 struct rb_szqueue *sq = ptr;
801
802 queue_mark_and_move(&sq->q);
803}
804
805static void
806szqueue_free(void *ptr)
807{
808 struct rb_szqueue *sq = ptr;
809 queue_free(&sq->q);
810}
811
812static size_t
813szqueue_memsize(const void *ptr)
814{
815 const struct rb_szqueue *sq = ptr;
816 return sizeof(struct rb_szqueue) + (sq->q.capa * sizeof(VALUE));
817}
818
819static const rb_data_type_t szqueue_data_type = {
820 .wrap_struct_name = "Thread::SizedQueue",
821 .function = {
822 .dmark = szqueue_mark_and_move,
823 .dfree = szqueue_free,
824 .dsize = szqueue_memsize,
825 .dcompact = szqueue_mark_and_move,
826 },
827 .parent = &queue_data_type,
828 .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
829};
830
831static VALUE
832szqueue_alloc(VALUE klass)
833{
834 struct rb_szqueue *sq;
835 VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue,
836 &szqueue_data_type, sq);
837 ccan_list_head_init(szqueue_waitq(sq));
838 ccan_list_head_init(szqueue_pushq(sq));
839 return obj;
840}
841
842static inline struct rb_szqueue *
843raw_szqueue_ptr(VALUE obj)
844{
845 struct rb_szqueue *sq;
846
847 TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
848 if (RB_UNLIKELY(queue_fork_check(&sq->q))) {
849 ccan_list_head_init(szqueue_pushq(sq));
850 sq->num_waiting_push = 0;
851 }
852
853 return sq;
854}
855
856static inline struct rb_szqueue *
857szqueue_ptr(VALUE obj)
858{
859 struct rb_szqueue *sq = raw_szqueue_ptr(obj);
860 check_queue(obj, &sq->q);
861 return sq;
862}
863
864static inline bool
865queue_closed_p(VALUE self)
866{
867 return FL_TEST_RAW(self, QUEUE_CLOSED) != 0;
868}
869
870/*
871 * Document-class: ClosedQueueError
872 *
873 * The exception class which will be raised when pushing into a closed
874 * Queue. See Thread::Queue#close and Thread::SizedQueue#close.
875 */
876
877NORETURN(static void raise_closed_queue_error(VALUE self));
878
879static void
880raise_closed_queue_error(VALUE self)
881{
882 rb_raise(rb_eClosedQueueError, "queue closed");
883}
884
885static VALUE
886queue_closed_result(VALUE self, struct rb_queue *q)
887{
888 RUBY_ASSERT(q->len == 0);
889 return Qnil;
890}
891
892#define QUEUE_INITIAL_CAPA 8
893
894static inline void
895ring_buffer_init(struct rb_queue *q, long initial_capa)
896{
897 q->buffer = ALLOC_N(VALUE, initial_capa);
898 q->capa = initial_capa;
899}
900
901static inline void
902ring_buffer_expand(struct rb_queue *q)
903{
904 RUBY_ASSERT(q->capa > 0);
905 VALUE *new_buffer = ALLOC_N(VALUE, q->capa * 2);
906 MEMCPY(new_buffer, q->buffer + q->offset, VALUE, q->capa - q->offset);
907 MEMCPY(new_buffer + (q->capa - q->offset), q->buffer, VALUE, q->offset);
908 VALUE *old_buffer = q->buffer;
909 q->buffer = new_buffer;
910 q->offset = 0;
911 ruby_sized_xfree(old_buffer, q->capa * sizeof(VALUE));
912 q->capa *= 2;
913}
914
915static void
916ring_buffer_push(VALUE self, struct rb_queue *q, VALUE obj)
917{
918 if (RB_UNLIKELY(q->len >= q->capa)) {
919 ring_buffer_expand(q);
920 }
921 RUBY_ASSERT(q->capa > q->len);
922 long index = (q->offset + q->len) % q->capa;
923 q->len++;
924 RB_OBJ_WRITE(self, &q->buffer[index], obj);
925}
926
927static VALUE
928ring_buffer_shift(struct rb_queue *q)
929{
930 if (!q->len) {
931 return Qnil;
932 }
933
934 VALUE obj = q->buffer[q->offset];
935 q->len--;
936 if (q->len == 0) {
937 q->offset = 0;
938 }
939 else {
940 q->offset = (q->offset + 1) % q->capa;
941 }
942 return obj;
943}
944
945static VALUE
946queue_initialize(rb_execution_context_t *ec, VALUE self, VALUE initial)
947{
948 struct rb_queue *q = raw_queue_ptr(self);
949 ccan_list_head_init(&q->waitq);
950 if (NIL_P(initial)) {
951 ring_buffer_init(q, QUEUE_INITIAL_CAPA);
952 }
953 else {
954 initial = rb_to_array(initial);
955 long len = RARRAY_LEN(initial);
956 long initial_capa = QUEUE_INITIAL_CAPA;
957 while (initial_capa < len) {
958 initial_capa *= 2;
959 }
960 ring_buffer_init(q, initial_capa);
961 MEMCPY(q->buffer, RARRAY_CONST_PTR(initial), VALUE, len);
962 q->len = len;
963 }
964 return self;
965}
966
967static VALUE
968queue_do_push(VALUE self, struct rb_queue *q, VALUE obj)
969{
970 check_queue(self, q);
971 if (queue_closed_p(self)) {
972 raise_closed_queue_error(self);
973 }
974 ring_buffer_push(self, q, obj);
975 wakeup_one(&q->waitq);
976 return self;
977}
978
979static VALUE
980queue_sleep(VALUE _args)
981{
982 struct queue_sleep_arg *args = (struct queue_sleep_arg *)_args;
983 rb_thread_sleep_deadly_allow_spurious_wakeup(args->self, args->timeout, args->end);
984 return Qnil;
985}
986
988 struct sync_waiter w;
989 union {
990 struct rb_queue *q;
991 struct rb_szqueue *sq;
992 } as;
993};
994
995static VALUE
996queue_sleep_done(VALUE p)
997{
998 struct queue_waiter *qw = (struct queue_waiter *)p;
999
1000 ccan_list_del(&qw->w.node);
1001 qw->as.q->num_waiting--;
1002
1003 return Qfalse;
1004}
1005
1006static VALUE
1007szqueue_sleep_done(VALUE p)
1008{
1009 struct queue_waiter *qw = (struct queue_waiter *)p;
1010
1011 ccan_list_del(&qw->w.node);
1012 qw->as.sq->num_waiting_push--;
1013
1014 return Qfalse;
1015}
1016
1017static inline VALUE
1018queue_do_pop(rb_execution_context_t *ec, VALUE self, struct rb_queue *q, VALUE non_block, VALUE timeout)
1019{
1020 if (q->len == 0) {
1021 if (RTEST(non_block)) {
1022 rb_raise(rb_eThreadError, "queue empty");
1023 }
1024
1025 if (RTEST(rb_equal(INT2FIX(0), timeout))) {
1026 return Qnil;
1027 }
1028 }
1029
1030 rb_hrtime_t end = queue_timeout2hrtime(timeout);
1031 while (q->len == 0) {
1032 if (queue_closed_p(self)) {
1033 return queue_closed_result(self, q);
1034 }
1035 else {
1036 RUBY_ASSERT(q->len == 0);
1037 RUBY_ASSERT(queue_closed_p(self) == 0);
1038
1039 struct queue_waiter queue_waiter = {
1040 .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
1041 .as = {.q = q}
1042 };
1043
1044 struct ccan_list_head *waitq = &q->waitq;
1045
1046 ccan_list_add_tail(waitq, &queue_waiter.w.node);
1047 queue_waiter.as.q->num_waiting++;
1048
1050 .self = self,
1051 .timeout = timeout,
1052 .end = end
1053 };
1054
1055 rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, queue_sleep_done, (VALUE)&queue_waiter);
1056 if (!NIL_P(timeout) && (rb_hrtime_now() >= end))
1057 break;
1058 }
1059 }
1060
1061 return ring_buffer_shift(q);
1062}
1063
1064static VALUE
1065rb_queue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout)
1066{
1067 return queue_do_pop(ec, self, queue_ptr(self), non_block, timeout);
1068}
1069
1070static void
1071queue_clear(struct rb_queue *q)
1072{
1073 q->len = 0;
1074 q->offset = 0;
1075}
1076
1077static VALUE
1078szqueue_initialize(rb_execution_context_t *ec, VALUE self, VALUE vmax)
1079{
1080 long max = NUM2LONG(vmax);
1081 struct rb_szqueue *sq = raw_szqueue_ptr(self);
1082
1083 if (max <= 0) {
1084 rb_raise(rb_eArgError, "queue size must be positive");
1085 }
1086 ring_buffer_init(&sq->q, QUEUE_INITIAL_CAPA);
1087 ccan_list_head_init(szqueue_waitq(sq));
1088 ccan_list_head_init(szqueue_pushq(sq));
1089 sq->max = max;
1090
1091 return self;
1092}
1093
1094static VALUE
1095rb_szqueue_push(rb_execution_context_t *ec, VALUE self, VALUE object, VALUE non_block, VALUE timeout)
1096{
1097 struct rb_szqueue *sq = szqueue_ptr(self);
1098
1099 if (sq->q.len >= sq->max) {
1100 if (RTEST(non_block)) {
1101 rb_raise(rb_eThreadError, "queue full");
1102 }
1103
1104 if (RTEST(rb_equal(INT2FIX(0), timeout))) {
1105 return Qnil;
1106 }
1107 }
1108
1109 rb_hrtime_t end = queue_timeout2hrtime(timeout);
1110 while (sq->q.len >= sq->max) {
1111 if (queue_closed_p(self)) {
1112 raise_closed_queue_error(self);
1113 }
1114 else {
1115 struct queue_waiter queue_waiter = {
1116 .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
1117 .as = {.sq = sq}
1118 };
1119
1120 struct ccan_list_head *pushq = szqueue_pushq(sq);
1121
1122 ccan_list_add_tail(pushq, &queue_waiter.w.node);
1123 sq->num_waiting_push++;
1124
1126 .self = self,
1127 .timeout = timeout,
1128 .end = end
1129 };
1130 rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, szqueue_sleep_done, (VALUE)&queue_waiter);
1131 if (!NIL_P(timeout) && rb_hrtime_now() >= end) {
1132 return Qnil;
1133 }
1134 }
1135 }
1136
1137 return queue_do_push(self, &sq->q, object);
1138}
1139
1140static VALUE
1141rb_szqueue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout)
1142{
1143 struct rb_szqueue *sq = szqueue_ptr(self);
1144 VALUE retval = queue_do_pop(ec, self, &sq->q, non_block, timeout);
1145
1146 if (sq->q.len < sq->max) {
1147 wakeup_one(szqueue_pushq(sq));
1148 }
1149
1150 return retval;
1151}
1152
1153/* ConditionalVariable */
1155 struct ccan_list_head waitq;
1156 rb_serial_t fork_gen;
1157};
1158
1159static size_t
1160condvar_memsize(const void *ptr)
1161{
1162 return sizeof(struct rb_condvar);
1163}
1164
1165static const rb_data_type_t cv_data_type = {
1166 "condvar",
1167 {0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,},
1168 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
1169};
1170
1171static struct rb_condvar *
1172condvar_ptr(VALUE self)
1173{
1174 struct rb_condvar *cv;
1175 rb_serial_t fork_gen = GET_VM()->fork_gen;
1176
1177 TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
1178
1179 /* forked children can't reach into parent thread stacks */
1180 if (cv->fork_gen != fork_gen) {
1181 cv->fork_gen = fork_gen;
1182 ccan_list_head_init(&cv->waitq);
1183 }
1184
1185 return cv;
1186}
1187
1188static VALUE
1189condvar_alloc(VALUE klass)
1190{
1191 struct rb_condvar *cv;
1192 VALUE obj;
1193
1194 obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
1195 ccan_list_head_init(&cv->waitq);
1196
1197 return obj;
1198}
1199
1202 VALUE mutex;
1203 VALUE timeout;
1204};
1205
1206static ID id_sleep;
1207
1208static VALUE
1209do_sleep(VALUE args)
1210{
1211 struct sleep_call *p = (struct sleep_call *)args;
1212 if (CLASS_OF(p->mutex) == rb_cMutex) {
1213 return rb_mut_sleep(p->ec, p->mutex, p->timeout);
1214 }
1215 else {
1216 return rb_funcallv(p->mutex, id_sleep, 1, &p->timeout);
1217 }
1218}
1219
1220static VALUE
1221rb_condvar_wait(rb_execution_context_t *ec, VALUE self, VALUE mutex, VALUE timeout)
1222{
1223 struct rb_condvar *cv = condvar_ptr(self);
1224 struct sleep_call args = {
1225 .ec = ec,
1226 .mutex = mutex,
1227 .timeout = timeout,
1228 };
1229
1230 struct sync_waiter sync_waiter = {
1231 .self = mutex,
1232 .th = ec->thread_ptr,
1233 .fiber = nonblocking_fiber(ec->fiber_ptr)
1234 };
1235
1236 ccan_list_add_tail(&cv->waitq, &sync_waiter.node);
1237 return rb_ec_ensure(ec, do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&sync_waiter);
1238}
1239
1240static VALUE
1241rb_condvar_signal(rb_execution_context_t *ec, VALUE self)
1242{
1243 struct rb_condvar *cv = condvar_ptr(self);
1244 wakeup_one(&cv->waitq);
1245 return self;
1246}
1247
1248static VALUE
1249rb_condvar_broadcast(rb_execution_context_t *ec, VALUE self)
1250{
1251 struct rb_condvar *cv = condvar_ptr(self);
1252 wakeup_all(&cv->waitq);
1253 return self;
1254}
1255
1256static void
1257Init_thread_sync(void)
1258{
1259 /* Mutex */
1260 rb_cMutex = rb_define_class_id_under(rb_cThread, rb_intern("Mutex"), rb_cObject);
1261 rb_define_alloc_func(rb_cMutex, mutex_alloc);
1262
1263 /* Queue */
1264 VALUE rb_cQueue = rb_define_class_id_under_no_pin(rb_cThread, rb_intern("Queue"), rb_cObject);
1265 rb_define_alloc_func(rb_cQueue, queue_alloc);
1266
1267 rb_eClosedQueueError = rb_define_class("ClosedQueueError", rb_eStopIteration);
1268
1269 VALUE rb_cSizedQueue = rb_define_class_id_under_no_pin(rb_cThread, rb_intern("SizedQueue"), rb_cQueue);
1270 rb_define_alloc_func(rb_cSizedQueue, szqueue_alloc);
1271
1272 /* CVar */
1273 VALUE rb_cConditionVariable = rb_define_class_id_under_no_pin(rb_cThread, rb_intern("ConditionVariable"), rb_cObject);
1274 rb_define_alloc_func(rb_cConditionVariable, condvar_alloc);
1275
1276 id_sleep = rb_intern("sleep");
1277
1278 rb_provide("thread.rb");
1279}
1280
1281#include "thread_sync.rbinc"
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1591
VALUE rb_define_class_id_under(VALUE outer, ID id, VALUE super)
Identical to rb_define_class_under(), except it takes the name in ID instead of C's string.
Definition class.c:1661
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
Definition fl_type.h:132
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:130
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define Check_TypedStruct(v, t)
Old name of rb_check_typeddata.
Definition rtypeddata.h:106
#define NUM2LONG
Old name of RB_NUM2LONG.
Definition long.h:51
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:128
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1418
VALUE rb_eStopIteration
StopIteration exception.
Definition enumerator.c:180
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition eval.c:1161
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:1028
VALUE rb_cObject
Object class.
Definition object.c:61
VALUE rb_cThread
Thread class.
Definition vm.c:671
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3830
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:176
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
Defines RBIMPL_HAS_BUILTIN.
void rb_provide(const char *feature)
Declares that the given feature is already provided by someone else.
Definition load.c:695
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_mutex_trylock(VALUE mutex)
Attempts to lock the mutex, without waiting for other threads to unlock it.
VALUE rb_mutex_locked_p(VALUE mutex)
Queries if there are any threads that holds the lock.
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
struct timeval rb_time_interval(VALUE num)
Creates a "time interval".
Definition time.c:2949
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int capa
Designed capacity of the buffer.
Definition io.h:11
int len
Length of the buffer.
Definition io.h:8
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:80
#define RUBY_TYPED_FREE_IMMEDIATELY
Macros to see if each corresponding flag is defined.
Definition rtypeddata.h:119
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:729
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:554
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:471
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:660
VALUE rb_fiber_scheduler_kernel_sleep(VALUE scheduler, VALUE duration)
Non-blocking sleep.
Definition scheduler.c:543
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:679
#define RTEST
This is an old name of RB_TEST.
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:211
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:218
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:318
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40