2#include "internal/gc.h"
3#include "internal/concurrent_set.h"
7enum concurrent_set_special_values {
9 CONCURRENT_SET_DELETED,
11 CONCURRENT_SET_SPECIAL_VALUE_COUNT
21 unsigned int capacity;
22 unsigned int deleted_entries;
28concurrent_set_free(
void *ptr)
35concurrent_set_size(
const void *ptr)
48concurrent_set_mark(
void *ptr)
56 .dmark = concurrent_set_mark,
57 .dfree = concurrent_set_free,
58 .dsize = concurrent_set_size,
61 .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_EMBEDDABLE
71 set->capacity = capacity;
76rb_concurrent_set_size(
VALUE set_obj)
92 RUBY_ASSERT((set->capacity & (set->capacity - 1)) == 0);
94 probe->mask = set->capacity - 1;
95 probe->idx = hash & probe->mask;
103 probe->idx = (probe->idx + probe->d) & probe->mask;
108concurrent_set_try_resize_without_locking(
VALUE old_set_obj,
VALUE *set_obj_ptr)
111 if (rbimpl_atomic_value_load(set_obj_ptr, RBIMPL_ATOMIC_ACQUIRE) != old_set_obj) {
115 struct concurrent_set *old_set = RTYPEDDATA_GET_DATA(old_set_obj);
119 int expected_size = rbimpl_atomic_load(&old_set->size, RBIMPL_ATOMIC_RELAXED) - old_set->deleted_entries;
122 int old_capacity = old_set->capacity;
123 int new_capacity = old_capacity * 2;
124 if (new_capacity > expected_size * 8) {
125 new_capacity = old_capacity / 2;
127 else if (new_capacity > expected_size * 4) {
128 new_capacity = old_capacity;
132 VALUE new_set_obj = rb_concurrent_set_new(old_set->funcs, new_capacity);
133 struct concurrent_set *new_set = RTYPEDDATA_GET_DATA(new_set_obj);
135 for (
int i = 0; i < old_capacity; i++) {
137 VALUE key = rbimpl_atomic_value_exchange(&entry->key, CONCURRENT_SET_MOVED, RBIMPL_ATOMIC_ACQUIRE);
140 if (key < CONCURRENT_SET_SPECIAL_VALUE_COUNT)
continue;
143 VALUE hash = rbimpl_atomic_value_load(&entry->hash, RBIMPL_ATOMIC_RELAXED);
147 hash = old_set->funcs->hash(key);
153 int idx = concurrent_set_probe_start(&probe, new_set, hash);
158 if (entry->key == CONCURRENT_SET_EMPTY) {
161 RUBY_ASSERT(new_set->size <= new_set->capacity / 2);
169 RUBY_ASSERT(entry->key >= CONCURRENT_SET_SPECIAL_VALUE_COUNT);
172 idx = concurrent_set_probe_next(&probe);
176 rbimpl_atomic_value_store(set_obj_ptr, new_set_obj, RBIMPL_ATOMIC_RELEASE);
182concurrent_set_try_resize(
VALUE old_set_obj,
VALUE *set_obj_ptr)
185 concurrent_set_try_resize_without_locking(old_set_obj, set_obj_ptr);
190rb_concurrent_set_find(
VALUE *set_obj_ptr,
VALUE key)
192 RUBY_ASSERT(key >= CONCURRENT_SET_SPECIAL_VALUE_COUNT);
198 set_obj = rbimpl_atomic_value_load(set_obj_ptr, RBIMPL_ATOMIC_ACQUIRE);
205 hash = set->funcs->hash(key);
210 int idx = concurrent_set_probe_start(&probe, set, hash);
214 VALUE curr_key = rbimpl_atomic_value_load(&entry->key, RBIMPL_ATOMIC_ACQUIRE);
217 case CONCURRENT_SET_EMPTY:
219 case CONCURRENT_SET_DELETED:
221 case CONCURRENT_SET_MOVED:
227 VALUE curr_hash = rbimpl_atomic_value_load(&entry->hash, RBIMPL_ATOMIC_RELAXED);
228 if (curr_hash != 0 && curr_hash != hash)
break;
230 if (UNLIKELY(!
RB_SPECIAL_CONST_P(curr_key) && rb_objspace_garbage_object_p(curr_key))) {
233 rbimpl_atomic_value_cas(&entry->key, curr_key, CONCURRENT_SET_DELETED, RBIMPL_ATOMIC_RELEASE, RBIMPL_ATOMIC_RELAXED);
237 if (set->funcs->cmp(key, curr_key)) {
247 idx = concurrent_set_probe_next(&probe);
252rb_concurrent_set_find_or_insert(
VALUE *set_obj_ptr,
VALUE key,
void *data)
254 RUBY_ASSERT(key >= CONCURRENT_SET_SPECIAL_VALUE_COUNT);
256 bool inserting =
false;
261 set_obj = rbimpl_atomic_value_load(set_obj_ptr, RBIMPL_ATOMIC_ACQUIRE);
268 hash = set->funcs->hash(key);
273 int idx = concurrent_set_probe_start(&probe, set, hash);
277 VALUE curr_key = rbimpl_atomic_value_load(&entry->key, RBIMPL_ATOMIC_ACQUIRE);
280 case CONCURRENT_SET_EMPTY: {
283 key = set->funcs->create(key, data);
288 rb_atomic_t prev_size = rbimpl_atomic_fetch_add(&set->size, 1, RBIMPL_ATOMIC_RELAXED);
290 if (UNLIKELY(prev_size > set->capacity / 2)) {
291 concurrent_set_try_resize(set_obj, set_obj_ptr);
296 curr_key = rbimpl_atomic_value_cas(&entry->key, CONCURRENT_SET_EMPTY, key, RBIMPL_ATOMIC_RELEASE, RBIMPL_ATOMIC_RELAXED);
297 if (curr_key == CONCURRENT_SET_EMPTY) {
298 rbimpl_atomic_value_store(&entry->hash, hash, RBIMPL_ATOMIC_RELAXED);
305 rbimpl_atomic_sub(&set->size, 1, RBIMPL_ATOMIC_RELAXED);
311 case CONCURRENT_SET_DELETED:
313 case CONCURRENT_SET_MOVED:
319 VALUE curr_hash = rbimpl_atomic_value_load(&entry->hash, RBIMPL_ATOMIC_RELAXED);
320 if (curr_hash != 0 && curr_hash != hash)
break;
322 if (UNLIKELY(!
RB_SPECIAL_CONST_P(curr_key) && rb_objspace_garbage_object_p(curr_key))) {
325 rbimpl_atomic_value_cas(&entry->key, curr_key, CONCURRENT_SET_DELETED, RBIMPL_ATOMIC_RELEASE, RBIMPL_ATOMIC_RELAXED);
329 if (set->funcs->cmp(key, curr_key)) {
337 if (set->funcs->free) set->funcs->free(key);
347 idx = concurrent_set_probe_next(&probe);
352rb_concurrent_set_delete_by_identity(
VALUE set_obj,
VALUE key)
354 ASSERT_vm_locking_with_barrier();
358 VALUE hash = set->funcs->hash(key);
361 int idx = concurrent_set_probe_start(&probe, set, hash);
365 VALUE curr_key = entry->key;
368 case CONCURRENT_SET_EMPTY:
371 case CONCURRENT_SET_DELETED:
373 case CONCURRENT_SET_MOVED:
374 rb_bug(
"rb_concurrent_set_delete_by_identity: moved entry");
377 if (key == curr_key) {
378 entry->key = CONCURRENT_SET_DELETED;
379 set->deleted_entries++;
385 idx = concurrent_set_probe_next(&probe);
390rb_concurrent_set_foreach_with_replace(
VALUE set_obj,
int (*callback)(
VALUE *key,
void *data),
void *data)
392 ASSERT_vm_locking_with_barrier();
396 for (
unsigned int i = 0; i < set->capacity; i++) {
398 VALUE key = set->entries[i].key;
401 case CONCURRENT_SET_EMPTY:
402 case CONCURRENT_SET_DELETED:
404 case CONCURRENT_SET_MOVED:
405 rb_bug(
"rb_concurrent_set_foreach_with_replace: moved entry");
408 int ret = callback(&entry->key, data);
413 set->entries[i].key = CONCURRENT_SET_DELETED;
414 set->deleted_entries++;
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
#define xfree
Old name of ruby_xfree.
#define ZALLOC_N
Old name of RB_ZALLOC_N.
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
This is the struct that holds necessary info for a struct.
const char * wrap_struct_name
Name of structs of this kind.
uintptr_t VALUE
Type that represents a Ruby object.