Ruby 3.5.0dev (2025-07-16 revision 37d088ad24d700db4b59f2c44dee2d202faf122b)
vm_sync.c (37d088ad24d700db4b59f2c44dee2d202faf122b)
1#include "internal/gc.h"
2#include "internal/thread.h"
3#include "vm_core.h"
4#include "vm_sync.h"
5#include "ractor_core.h"
6#include "vm_debug.h"
7
8void rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr);
9void rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr);
10void rb_ractor_sched_barrier_end(rb_vm_t *vm, rb_ractor_t *cr);
11
12static bool
13vm_locked(rb_vm_t *vm)
14{
15 return vm->ractor.sync.lock_owner == GET_RACTOR();
16}
17
18#if RUBY_DEBUG > 0
19void
20RUBY_ASSERT_vm_locking(void)
21{
22 if (rb_multi_ractor_p()) {
23 rb_vm_t *vm = GET_VM();
24 VM_ASSERT(vm_locked(vm));
25 }
26}
27
28void
29RUBY_ASSERT_vm_unlocking(void)
30{
31 if (rb_multi_ractor_p()) {
32 rb_vm_t *vm = GET_VM();
33 VM_ASSERT(!vm_locked(vm));
34 }
35}
36#endif
37
38bool
39rb_vm_locked_p(void)
40{
41 return vm_locked(GET_VM());
42}
43
44static bool
45vm_need_barrier_waiting(const rb_vm_t *vm)
46{
47#ifdef RUBY_THREAD_PTHREAD_H
48 return vm->ractor.sched.barrier_waiting;
49#else
50 return vm->ractor.sync.barrier_waiting;
51#endif
52}
53
54static bool
55vm_need_barrier(bool no_barrier, const rb_ractor_t *cr, const rb_vm_t *vm)
56{
57#ifdef RUBY_THREAD_PTHREAD_H
58 return !no_barrier && cr->threads.sched.running != NULL && vm_need_barrier_waiting(vm); // ractor has running threads.
59#else
60 return !no_barrier && vm_need_barrier_waiting(vm);
61#endif
62}
63
64static void
65vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, bool no_barrier, unsigned int *lev APPEND_LOCATION_ARGS)
66{
67 RUBY_DEBUG_LOG2(file, line, "start locked:%d", locked);
68
69 if (locked) {
70 ASSERT_vm_locking();
71 }
72 else {
73#if RACTOR_CHECK_MODE
74 // locking ractor and acquire VM lock will cause deadlock
75 VM_ASSERT(cr->sync.locked_by != rb_ractor_self(cr));
76#endif
77 // lock
78 rb_native_mutex_lock(&vm->ractor.sync.lock);
79 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
80 VM_ASSERT(vm->ractor.sync.lock_rec == 0);
81
82 // barrier
83 if (vm_need_barrier(no_barrier, cr, vm)) {
84 rb_execution_context_t *ec = GET_EC();
85 RB_VM_SAVE_MACHINE_CONTEXT(rb_ec_thread_ptr(ec));
86
87 do {
88 VM_ASSERT(vm_need_barrier_waiting(vm));
89 RUBY_DEBUG_LOG("barrier serial:%u", vm->ractor.sched.barrier_serial);
90 rb_ractor_sched_barrier_join(vm, cr);
91 } while (vm_need_barrier_waiting(vm));
92 }
93
94 VM_ASSERT(vm->ractor.sync.lock_rec == 0);
95 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
96 vm->ractor.sync.lock_owner = cr;
97 }
98
99 vm->ractor.sync.lock_rec++;
100 *lev = vm->ractor.sync.lock_rec;
101
102 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u", vm->ractor.sync.lock_rec,
103 (unsigned int)rb_ractor_id(vm->ractor.sync.lock_owner));
104}
105
106static void
107vm_lock_leave(rb_vm_t *vm, bool no_barrier, unsigned int *lev APPEND_LOCATION_ARGS)
108{
109 MAYBE_UNUSED(rb_ractor_t *cr = vm->ractor.sync.lock_owner);
110
111 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u%s", vm->ractor.sync.lock_rec,
112 (unsigned int)rb_ractor_id(cr),
113 vm->ractor.sync.lock_rec == 1 ? " (leave)" : "");
114
115 ASSERT_vm_locking();
116 VM_ASSERT(vm->ractor.sync.lock_rec > 0);
117 VM_ASSERT(vm->ractor.sync.lock_rec == *lev);
118 VM_ASSERT(cr == GET_RACTOR());
119
120#ifdef RUBY_THREAD_PTHREAD_H
121 if (vm->ractor.sched.barrier_ractor == cr &&
122 vm->ractor.sched.barrier_lock_rec == vm->ractor.sync.lock_rec) {
123 VM_ASSERT(!no_barrier);
124 rb_ractor_sched_barrier_end(vm, cr);
125 }
126#endif
127
128 vm->ractor.sync.lock_rec--;
129 *lev = vm->ractor.sync.lock_rec;
130
131 if (vm->ractor.sync.lock_rec == 0) {
132 vm->ractor.sync.lock_owner = NULL;
133 rb_native_mutex_unlock(&vm->ractor.sync.lock);
134 }
135}
136
137void
138rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS)
139{
140 rb_vm_t *vm = GET_VM();
141 if (vm_locked(vm)) {
142 vm_lock_enter(NULL, vm, true, false, lev APPEND_LOCATION_PARAMS);
143 }
144 else {
145 vm_lock_enter(GET_RACTOR(), vm, false, false, lev APPEND_LOCATION_PARAMS);
146 }
147}
148
149void
150rb_vm_lock_enter_body_nb(unsigned int *lev APPEND_LOCATION_ARGS)
151{
152 rb_vm_t *vm = GET_VM();
153 if (vm_locked(vm)) {
154 vm_lock_enter(NULL, vm, true, true, lev APPEND_LOCATION_PARAMS);
155 }
156 else {
157 vm_lock_enter(GET_RACTOR(), vm, false, true, lev APPEND_LOCATION_PARAMS);
158 }
159}
160
161void
162rb_vm_lock_enter_body_cr(rb_ractor_t *cr, unsigned int *lev APPEND_LOCATION_ARGS)
163{
164 rb_vm_t *vm = GET_VM();
165 vm_lock_enter(cr, vm, vm_locked(vm), false, lev APPEND_LOCATION_PARAMS);
166}
167
168void
169rb_vm_lock_leave_body_nb(unsigned int *lev APPEND_LOCATION_ARGS)
170{
171 vm_lock_leave(GET_VM(), true, lev APPEND_LOCATION_PARAMS);
172}
173
174void
175rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS)
176{
177 vm_lock_leave(GET_VM(), false, lev APPEND_LOCATION_PARAMS);
178}
179
180void
181rb_vm_lock_body(LOCATION_ARGS)
182{
183 rb_vm_t *vm = GET_VM();
184 ASSERT_vm_unlocking();
185
186 vm_lock_enter(GET_RACTOR(), vm, false, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
187}
188
189void
190rb_vm_unlock_body(LOCATION_ARGS)
191{
192 rb_vm_t *vm = GET_VM();
193 ASSERT_vm_locking();
194 VM_ASSERT(vm->ractor.sync.lock_rec == 1);
195 vm_lock_leave(vm, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
196}
197
198static void
199vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
200{
201 ASSERT_vm_locking();
202 unsigned int lock_rec = vm->ractor.sync.lock_rec;
203 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
204
205 vm->ractor.sync.lock_rec = 0;
206 vm->ractor.sync.lock_owner = NULL;
207 if (msec > 0) {
208 rb_native_cond_timedwait(cond, &vm->ractor.sync.lock, msec);
209 }
210 else {
211 rb_native_cond_wait(cond, &vm->ractor.sync.lock);
212 }
213 vm->ractor.sync.lock_rec = lock_rec;
214 vm->ractor.sync.lock_owner = cr;
215}
216
217void
218rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond)
219{
220 vm_cond_wait(vm, cond, 0);
221}
222
223void
224rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
225{
226 vm_cond_wait(vm, cond, msec);
227}
228
229static bool
230vm_barrier_acquired_p(const rb_vm_t *vm, const rb_ractor_t *cr)
231{
232#ifdef RUBY_THREAD_PTHREAD_H
233 return vm->ractor.sched.barrier_ractor == cr;
234#else
235 return false;
236#endif
237}
238
239void
240rb_vm_barrier(void)
241{
242 RB_DEBUG_COUNTER_INC(vm_sync_barrier);
243
244 if (!rb_multi_ractor_p()) {
245 // no other ractors
246 return;
247 }
248 else {
249 rb_vm_t *vm = GET_VM();
250 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
251
252 ASSERT_vm_locking();
253 VM_ASSERT(cr == GET_RACTOR());
254 VM_ASSERT(rb_ractor_status_p(cr, ractor_running));
255
256 if (vm_barrier_acquired_p(vm, cr)) {
257 // already in barrier synchronization
258 return;
259 }
260 else {
261 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
262 rb_ractor_sched_barrier_start(vm, cr);
263 }
264 }
265}
266
267void
268rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
269 unsigned int recorded_lock_rec,
270 unsigned int current_lock_rec)
271{
272 VM_ASSERT(recorded_lock_rec != current_lock_rec);
273
274 if (UNLIKELY(recorded_lock_rec > current_lock_rec)) {
275 rb_bug("unexpected situation - recordd:%u current:%u",
276 recorded_lock_rec, current_lock_rec);
277 }
278 else {
279 while (recorded_lock_rec < current_lock_rec) {
280 RB_VM_LOCK_LEAVE_LEV(&current_lock_rec);
281 }
282 }
283
284 VM_ASSERT(recorded_lock_rec == rb_ec_vm_lock_rec(ec));
285}
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.