Ruby 3.5.0dev (2025-10-23 revision add78e76cedbe9ce430a0219dd80cbee734080b3)
vm_sync.c (add78e76cedbe9ce430a0219dd80cbee734080b3)
1#include "internal/gc.h"
2#include "internal/thread.h"
3#include "vm_core.h"
4#include "vm_sync.h"
5#include "ractor_core.h"
6#include "vm_debug.h"
7
8void rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr);
9void rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr);
10void rb_ractor_sched_barrier_end(rb_vm_t *vm, rb_ractor_t *cr);
11
12static bool
13vm_locked(rb_vm_t *vm)
14{
15 return vm_locked_by_ractor_p(vm, GET_RACTOR());
16}
17
18#if RUBY_DEBUG > 0
19void
20RUBY_ASSERT_vm_locking(void)
21{
22 if (rb_multi_ractor_p()) {
23 rb_vm_t *vm = GET_VM();
24 VM_ASSERT(vm_locked(vm));
25 }
26}
27
28void
29RUBY_ASSERT_vm_locking_with_barrier(void)
30{
31 if (rb_multi_ractor_p()) {
32 rb_vm_t *vm = GET_VM();
33 VM_ASSERT(vm_locked(vm));
34
35 if (vm->ractor.cnt > 1) {
36 /* Written to only when holding both ractor.sync and ractor.sched lock */
37 VM_ASSERT(vm->ractor.sched.barrier_waiting);
38 }
39 }
40}
41
42void
43RUBY_ASSERT_vm_unlocking(void)
44{
45 if (rb_multi_ractor_p()) {
46 rb_vm_t *vm = GET_VM();
47 VM_ASSERT(!vm_locked(vm));
48 }
49}
50#endif
51
52bool
53rb_vm_locked_p(void)
54{
55 return vm_locked(GET_VM());
56}
57
58static bool
59vm_need_barrier_waiting(const rb_vm_t *vm)
60{
61#ifdef RUBY_THREAD_PTHREAD_H
62 return vm->ractor.sched.barrier_waiting;
63#else
64 return vm->ractor.sync.barrier_waiting;
65#endif
66}
67
68static bool
69vm_need_barrier(bool no_barrier, const rb_ractor_t *cr, const rb_vm_t *vm)
70{
71#ifdef RUBY_THREAD_PTHREAD_H
72 return !no_barrier && cr->threads.sched.running != NULL && vm_need_barrier_waiting(vm); // ractor has running threads.
73#else
74 return !no_barrier && vm_need_barrier_waiting(vm);
75#endif
76}
77
78static void
79vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, bool no_barrier, unsigned int *lev APPEND_LOCATION_ARGS)
80{
81 RUBY_DEBUG_LOG2(file, line, "start locked:%d", locked);
82
83 if (locked) {
84 ASSERT_vm_locking();
85 }
86 else {
87#if RACTOR_CHECK_MODE
88 // locking ractor and acquire VM lock will cause deadlock
89 VM_ASSERT(cr->sync.locked_by != rb_ractor_self(cr));
90#endif
91 // lock
92 rb_native_mutex_lock(&vm->ractor.sync.lock);
93 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
94 VM_ASSERT(vm->ractor.sync.lock_rec == 0);
95
96 // barrier
97 if (vm_need_barrier(no_barrier, cr, vm)) {
98 rb_execution_context_t *ec = GET_EC();
99 RB_VM_SAVE_MACHINE_CONTEXT(rb_ec_thread_ptr(ec));
100
101 do {
102 VM_ASSERT(vm_need_barrier_waiting(vm));
103 RUBY_DEBUG_LOG("barrier serial:%u", vm->ractor.sched.barrier_serial);
104 rb_ractor_sched_barrier_join(vm, cr);
105 } while (vm_need_barrier_waiting(vm));
106 }
107
108 VM_ASSERT(vm->ractor.sync.lock_rec == 0);
109 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
110 vm->ractor.sync.lock_owner = cr;
111 }
112
113 vm->ractor.sync.lock_rec++;
114 *lev = vm->ractor.sync.lock_rec;
115
116 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u", vm->ractor.sync.lock_rec,
117 (unsigned int)rb_ractor_id(vm->ractor.sync.lock_owner));
118}
119
120static void
121vm_lock_leave(rb_vm_t *vm, bool no_barrier, unsigned int *lev APPEND_LOCATION_ARGS)
122{
123 MAYBE_UNUSED(rb_ractor_t *cr = vm->ractor.sync.lock_owner);
124
125 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u%s", vm->ractor.sync.lock_rec,
126 (unsigned int)rb_ractor_id(cr),
127 vm->ractor.sync.lock_rec == 1 ? " (leave)" : "");
128
129 ASSERT_vm_locking();
130 VM_ASSERT(vm->ractor.sync.lock_rec > 0);
131 VM_ASSERT(vm->ractor.sync.lock_rec == *lev);
132 VM_ASSERT(cr == GET_RACTOR());
133
134#ifdef RUBY_THREAD_PTHREAD_H
135 if (vm->ractor.sched.barrier_ractor == cr &&
136 vm->ractor.sched.barrier_lock_rec == vm->ractor.sync.lock_rec) {
137 VM_ASSERT(!no_barrier);
138 rb_ractor_sched_barrier_end(vm, cr);
139 }
140#endif
141
142 vm->ractor.sync.lock_rec--;
143 *lev = vm->ractor.sync.lock_rec;
144
145 if (vm->ractor.sync.lock_rec == 0) {
146 vm->ractor.sync.lock_owner = NULL;
147 rb_native_mutex_unlock(&vm->ractor.sync.lock);
148 }
149}
150
151void
152rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS)
153{
154 rb_vm_t *vm = GET_VM();
155 if (vm_locked(vm)) {
156 vm_lock_enter(NULL, vm, true, false, lev APPEND_LOCATION_PARAMS);
157 }
158 else {
159 vm_lock_enter(GET_RACTOR(), vm, false, false, lev APPEND_LOCATION_PARAMS);
160 }
161}
162
163void
164rb_vm_lock_enter_body_nb(unsigned int *lev APPEND_LOCATION_ARGS)
165{
166 rb_vm_t *vm = GET_VM();
167 if (vm_locked(vm)) {
168 vm_lock_enter(NULL, vm, true, true, lev APPEND_LOCATION_PARAMS);
169 }
170 else {
171 vm_lock_enter(GET_RACTOR(), vm, false, true, lev APPEND_LOCATION_PARAMS);
172 }
173}
174
175void
176rb_vm_lock_enter_body_cr(rb_ractor_t *cr, unsigned int *lev APPEND_LOCATION_ARGS)
177{
178 rb_vm_t *vm = GET_VM();
179 vm_lock_enter(cr, vm, vm_locked(vm), false, lev APPEND_LOCATION_PARAMS);
180}
181
182void
183rb_vm_lock_leave_body_nb(unsigned int *lev APPEND_LOCATION_ARGS)
184{
185 vm_lock_leave(GET_VM(), true, lev APPEND_LOCATION_PARAMS);
186}
187
188void
189rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS)
190{
191 vm_lock_leave(GET_VM(), false, lev APPEND_LOCATION_PARAMS);
192}
193
194void
195rb_vm_lock_body(LOCATION_ARGS)
196{
197 rb_vm_t *vm = GET_VM();
198 ASSERT_vm_unlocking();
199
200 vm_lock_enter(GET_RACTOR(), vm, false, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
201}
202
203void
204rb_vm_unlock_body(LOCATION_ARGS)
205{
206 rb_vm_t *vm = GET_VM();
207 ASSERT_vm_locking();
208 VM_ASSERT(vm->ractor.sync.lock_rec == 1);
209 vm_lock_leave(vm, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
210}
211
212static void
213vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
214{
215 ASSERT_vm_locking();
216 unsigned int lock_rec = vm->ractor.sync.lock_rec;
217 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
218
219 vm->ractor.sync.lock_rec = 0;
220 vm->ractor.sync.lock_owner = NULL;
221 if (msec > 0) {
222 rb_native_cond_timedwait(cond, &vm->ractor.sync.lock, msec);
223 }
224 else {
225 rb_native_cond_wait(cond, &vm->ractor.sync.lock);
226 }
227 vm->ractor.sync.lock_rec = lock_rec;
228 vm->ractor.sync.lock_owner = cr;
229}
230
231void
232rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond)
233{
234 vm_cond_wait(vm, cond, 0);
235}
236
237void
238rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
239{
240 vm_cond_wait(vm, cond, msec);
241}
242
243static bool
244vm_barrier_acquired_p(const rb_vm_t *vm, const rb_ractor_t *cr)
245{
246#ifdef RUBY_THREAD_PTHREAD_H
247 return vm->ractor.sched.barrier_ractor == cr;
248#else
249 return false;
250#endif
251}
252
253void
254rb_vm_barrier(void)
255{
256 RB_DEBUG_COUNTER_INC(vm_sync_barrier);
257
258 if (!rb_multi_ractor_p()) {
259 // no other ractors
260 return;
261 }
262 else {
263 rb_vm_t *vm = GET_VM();
264 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
265
266 ASSERT_vm_locking();
267 VM_ASSERT(cr == GET_RACTOR());
268 VM_ASSERT(rb_ractor_status_p(cr, ractor_running));
269
270 if (vm_barrier_acquired_p(vm, cr)) {
271 // already in barrier synchronization
272 return;
273 }
274 else {
275 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
276 rb_ractor_sched_barrier_start(vm, cr);
277 }
278 }
279}
280
281void
282rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
283 unsigned int recorded_lock_rec,
284 unsigned int current_lock_rec)
285{
286 VM_ASSERT(recorded_lock_rec != current_lock_rec);
287
288 if (UNLIKELY(recorded_lock_rec > current_lock_rec)) {
289 rb_bug("unexpected situation - recordd:%u current:%u",
290 recorded_lock_rec, current_lock_rec);
291 }
292 else {
293 while (recorded_lock_rec < current_lock_rec) {
294 RB_VM_LOCK_LEAVE_LEV(&current_lock_rec);
295 }
296 }
297
298 VM_ASSERT(recorded_lock_rec == rb_ec_vm_lock_rec(ec));
299}
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.