Ruby 3.5.0dev (2025-01-10 revision 5fab31b15e32622c4b71d1d347a41937e9f9c212)
vm_sync.c (5fab31b15e32622c4b71d1d347a41937e9f9c212)
1#include "internal/gc.h"
2#include "internal/thread.h"
3#include "vm_core.h"
4#include "vm_sync.h"
5#include "ractor_core.h"
6#include "vm_debug.h"
7
8void rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr);
9void rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr);
10
11static bool
12vm_locked(rb_vm_t *vm)
13{
14 return vm->ractor.sync.lock_owner == GET_RACTOR();
15}
16
17#if RUBY_DEBUG > 0
18void
19RUBY_ASSERT_vm_locking(void)
20{
21 if (rb_multi_ractor_p()) {
22 rb_vm_t *vm = GET_VM();
23 VM_ASSERT(vm_locked(vm));
24 }
25}
26
27void
28RUBY_ASSERT_vm_unlocking(void)
29{
30 if (rb_multi_ractor_p()) {
31 rb_vm_t *vm = GET_VM();
32 VM_ASSERT(!vm_locked(vm));
33 }
34}
35#endif
36
37bool
38rb_vm_locked_p(void)
39{
40 return vm_locked(GET_VM());
41}
42
43static bool
44vm_need_barrier_waiting(const rb_vm_t *vm)
45{
46#ifdef RUBY_THREAD_PTHREAD_H
47 return vm->ractor.sched.barrier_waiting;
48#else
49 return vm->ractor.sync.barrier_waiting;
50#endif
51}
52
53static bool
54vm_need_barrier(bool no_barrier, const rb_ractor_t *cr, const rb_vm_t *vm)
55{
56#ifdef RUBY_THREAD_PTHREAD_H
57 return !no_barrier && cr->threads.sched.running != NULL && vm_need_barrier_waiting(vm); // ractor has running threads.
58#else
59 return !no_barrier && vm_need_barrier_waiting(vm);
60#endif
61}
62
63static void
64vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, bool no_barrier, unsigned int *lev APPEND_LOCATION_ARGS)
65{
66 RUBY_DEBUG_LOG2(file, line, "start locked:%d", locked);
67
68 if (locked) {
69 ASSERT_vm_locking();
70 }
71 else {
72#if RACTOR_CHECK_MODE
73 // locking ractor and acquire VM lock will cause deadlock
74 VM_ASSERT(cr->sync.locked_by != rb_ractor_self(cr));
75#endif
76 // lock
77 rb_native_mutex_lock(&vm->ractor.sync.lock);
78 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
79 VM_ASSERT(vm->ractor.sync.lock_rec == 0);
80
81 // barrier
82 if (vm_need_barrier(no_barrier, cr, vm)) {
83 rb_execution_context_t *ec = GET_EC();
84 RB_VM_SAVE_MACHINE_CONTEXT(rb_ec_thread_ptr(ec));
85
86 do {
87 VM_ASSERT(vm_need_barrier_waiting(vm));
88 RUBY_DEBUG_LOG("barrier serial:%u", vm->ractor.sched.barrier_serial);
89 rb_ractor_sched_barrier_join(vm, cr);
90 } while (vm_need_barrier_waiting(vm));
91 }
92
93 VM_ASSERT(vm->ractor.sync.lock_rec == 0);
94 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
95 vm->ractor.sync.lock_owner = cr;
96 }
97
98 vm->ractor.sync.lock_rec++;
99 *lev = vm->ractor.sync.lock_rec;
100
101 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u", vm->ractor.sync.lock_rec,
102 (unsigned int)rb_ractor_id(vm->ractor.sync.lock_owner));
103}
104
105static void
106vm_lock_leave(rb_vm_t *vm, unsigned int *lev APPEND_LOCATION_ARGS)
107{
108 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u%s", vm->ractor.sync.lock_rec,
109 (unsigned int)rb_ractor_id(vm->ractor.sync.lock_owner),
110 vm->ractor.sync.lock_rec == 1 ? " (leave)" : "");
111
112 ASSERT_vm_locking();
113 VM_ASSERT(vm->ractor.sync.lock_rec > 0);
114 VM_ASSERT(vm->ractor.sync.lock_rec == *lev);
115
116 vm->ractor.sync.lock_rec--;
117 *lev = vm->ractor.sync.lock_rec;
118
119 if (vm->ractor.sync.lock_rec == 0) {
120 vm->ractor.sync.lock_owner = NULL;
121 rb_native_mutex_unlock(&vm->ractor.sync.lock);
122 }
123}
124
125void
126rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS)
127{
128 rb_vm_t *vm = GET_VM();
129 if (vm_locked(vm)) {
130 vm_lock_enter(NULL, vm, true, false, lev APPEND_LOCATION_PARAMS);
131 }
132 else {
133 vm_lock_enter(GET_RACTOR(), vm, false, false, lev APPEND_LOCATION_PARAMS);
134 }
135}
136
137void
138rb_vm_lock_enter_body_nb(unsigned int *lev APPEND_LOCATION_ARGS)
139{
140 rb_vm_t *vm = GET_VM();
141 if (vm_locked(vm)) {
142 vm_lock_enter(NULL, vm, true, true, lev APPEND_LOCATION_PARAMS);
143 }
144 else {
145 vm_lock_enter(GET_RACTOR(), vm, false, true, lev APPEND_LOCATION_PARAMS);
146 }
147}
148
149void
150rb_vm_lock_enter_body_cr(rb_ractor_t *cr, unsigned int *lev APPEND_LOCATION_ARGS)
151{
152 rb_vm_t *vm = GET_VM();
153 vm_lock_enter(cr, vm, vm_locked(vm), false, lev APPEND_LOCATION_PARAMS);
154}
155
156void
157rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS)
158{
159 vm_lock_leave(GET_VM(), lev APPEND_LOCATION_PARAMS);
160}
161
162void
163rb_vm_lock_body(LOCATION_ARGS)
164{
165 rb_vm_t *vm = GET_VM();
166 ASSERT_vm_unlocking();
167
168 vm_lock_enter(GET_RACTOR(), vm, false, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
169}
170
171void
172rb_vm_unlock_body(LOCATION_ARGS)
173{
174 rb_vm_t *vm = GET_VM();
175 ASSERT_vm_locking();
176 VM_ASSERT(vm->ractor.sync.lock_rec == 1);
177 vm_lock_leave(vm, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
178}
179
180static void
181vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
182{
183 ASSERT_vm_locking();
184 unsigned int lock_rec = vm->ractor.sync.lock_rec;
185 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
186
187 vm->ractor.sync.lock_rec = 0;
188 vm->ractor.sync.lock_owner = NULL;
189 if (msec > 0) {
190 rb_native_cond_timedwait(cond, &vm->ractor.sync.lock, msec);
191 }
192 else {
193 rb_native_cond_wait(cond, &vm->ractor.sync.lock);
194 }
195 vm->ractor.sync.lock_rec = lock_rec;
196 vm->ractor.sync.lock_owner = cr;
197}
198
199void
200rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond)
201{
202 vm_cond_wait(vm, cond, 0);
203}
204
205void
206rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
207{
208 vm_cond_wait(vm, cond, msec);
209}
210
211void
212rb_vm_barrier(void)
213{
214 RB_DEBUG_COUNTER_INC(vm_sync_barrier);
215
216 if (!rb_multi_ractor_p()) {
217 // no other ractors
218 return;
219 }
220 else {
221 rb_vm_t *vm = GET_VM();
222 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
223 ASSERT_vm_locking();
224 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
225 VM_ASSERT(cr == GET_RACTOR());
226 VM_ASSERT(rb_ractor_status_p(cr, ractor_running));
227
228 rb_ractor_sched_barrier_start(vm, cr);
229 }
230}
231
232void
233rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
234 unsigned int recorded_lock_rec,
235 unsigned int current_lock_rec)
236{
237 VM_ASSERT(recorded_lock_rec != current_lock_rec);
238
239 if (UNLIKELY(recorded_lock_rec > current_lock_rec)) {
240 rb_bug("unexpected situation - recordd:%u current:%u",
241 recorded_lock_rec, current_lock_rec);
242 }
243 else {
244 while (recorded_lock_rec < current_lock_rec) {
245 RB_VM_LOCK_LEAVE_LEV(&current_lock_rec);
246 }
247 }
248
249 VM_ASSERT(recorded_lock_rec == rb_ec_vm_lock_rec(ec));
250}
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.