Ruby 3.5.0dev (2025-08-09 revision 2a6345e957c01f4495323723c7a3d7ac0d4ac339)
id_table.c (2a6345e957c01f4495323723c7a3d7ac0d4ac339)
1/* This file is included by symbol.c */
2
3#include "id_table.h"
4
5#ifndef ID_TABLE_DEBUG
6#define ID_TABLE_DEBUG 0
7#endif
8
9#if ID_TABLE_DEBUG == 0
10#undef NDEBUG
11#define NDEBUG
12#endif
13#include "ruby_assert.h"
14
15typedef rb_id_serial_t id_key_t;
16
17static inline ID
18key2id(id_key_t key)
19{
20 return rb_id_serial_to_id(key);
21}
22
23static inline id_key_t
24id2key(ID id)
25{
26 return rb_id_to_serial(id);
27}
28
29/* simple open addressing with quadratic probing.
30 uses mark-bit on collisions - need extra 1 bit,
31 ID is strictly 3 bits larger than rb_id_serial_t */
32
33typedef struct rb_id_item {
34 id_key_t key;
35#if SIZEOF_VALUE == 8
36 int collision;
37#endif
38 VALUE val;
39} item_t;
40
42 int capa;
43 int num;
44 int used;
45 item_t *items;
46};
47
48#if SIZEOF_VALUE == 8
49#define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key)
50#define ITEM_KEY_ISSET(tbl, i) ((tbl)->items && (tbl)->items[i].key)
51#define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].collision)
52#define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].collision = 1)
53static inline void
54ITEM_SET_KEY(struct rb_id_table *tbl, int i, id_key_t key)
55{
56 tbl->items[i].key = key;
57}
58#else
59#define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key >> 1)
60#define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key > 1)
61#define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].key & 1)
62#define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].key |= 1)
63static inline void
64ITEM_SET_KEY(struct rb_id_table *tbl, int i, id_key_t key)
65{
66 tbl->items[i].key = (key << 1) | ITEM_COLLIDED(tbl, i);
67}
68#endif
69
70static inline int
71round_capa(int capa)
72{
73 /* minsize is 4 */
74 capa >>= 2;
75 capa |= capa >> 1;
76 capa |= capa >> 2;
77 capa |= capa >> 4;
78 capa |= capa >> 8;
79 capa |= capa >> 16;
80 return (capa + 1) << 2;
81}
82
83struct rb_id_table *
84rb_id_table_init(struct rb_id_table *tbl, size_t s_capa)
85{
86 int capa = (int)s_capa;
87 MEMZERO(tbl, struct rb_id_table, 1);
88 if (capa > 0) {
89 capa = round_capa(capa);
90 tbl->capa = (int)capa;
91 tbl->items = ZALLOC_N(item_t, capa);
92 }
93 return tbl;
94}
95
96struct rb_id_table *
97rb_id_table_create(size_t capa)
98{
99 struct rb_id_table *tbl = ALLOC(struct rb_id_table);
100 return rb_id_table_init(tbl, capa);
101}
102
103void
104rb_id_table_free_items(struct rb_id_table *tbl)
105{
106 xfree(tbl->items);
107}
108
109void
110rb_id_table_free(struct rb_id_table *tbl)
111{
112 xfree(tbl->items);
113 xfree(tbl);
114}
115
116void
117rb_id_table_clear(struct rb_id_table *tbl)
118{
119 tbl->num = 0;
120 tbl->used = 0;
121 MEMZERO(tbl->items, item_t, tbl->capa);
122}
123
124size_t
125rb_id_table_size(const struct rb_id_table *tbl)
126{
127 return (size_t)tbl->num;
128}
129
130size_t
131rb_id_table_memsize(const struct rb_id_table *tbl)
132{
133 return sizeof(item_t) * tbl->capa + sizeof(struct rb_id_table);
134}
135
136static int
137hash_table_index(struct rb_id_table* tbl, id_key_t key)
138{
139 if (tbl->capa > 0) {
140 int mask = tbl->capa - 1;
141 int ix = key & mask;
142 int d = 1;
143 while (key != ITEM_GET_KEY(tbl, ix)) {
144 if (!ITEM_COLLIDED(tbl, ix))
145 return -1;
146 ix = (ix + d) & mask;
147 d++;
148 }
149 return ix;
150 }
151 return -1;
152}
153
154static void
155hash_table_raw_insert(struct rb_id_table *tbl, id_key_t key, VALUE val)
156{
157 int mask = tbl->capa - 1;
158 int ix = key & mask;
159 int d = 1;
160 RUBY_ASSERT(key != 0);
161 while (ITEM_KEY_ISSET(tbl, ix)) {
162 ITEM_SET_COLLIDED(tbl, ix);
163 ix = (ix + d) & mask;
164 d++;
165 }
166 tbl->num++;
167 if (!ITEM_COLLIDED(tbl, ix)) {
168 tbl->used++;
169 }
170 ITEM_SET_KEY(tbl, ix, key);
171 tbl->items[ix].val = val;
172}
173
174static int
175hash_delete_index(struct rb_id_table *tbl, int ix)
176{
177 if (ix >= 0) {
178 if (!ITEM_COLLIDED(tbl, ix)) {
179 tbl->used--;
180 }
181 tbl->num--;
182 ITEM_SET_KEY(tbl, ix, 0);
183 tbl->items[ix].val = 0;
184 return TRUE;
185 }
186 else {
187 return FALSE;
188 }
189}
190
191static void
192hash_table_extend(struct rb_id_table* tbl)
193{
194 if (tbl->used + (tbl->used >> 1) >= tbl->capa) {
195 int new_cap = round_capa(tbl->num + (tbl->num >> 1));
196 int i;
197 item_t* old;
198 struct rb_id_table tmp_tbl = {0, 0, 0};
199 if (new_cap < tbl->capa) {
200 new_cap = round_capa(tbl->used + (tbl->used >> 1));
201 }
202 tmp_tbl.capa = new_cap;
203 tmp_tbl.items = ZALLOC_N(item_t, new_cap);
204 for (i = 0; i < tbl->capa; i++) {
205 id_key_t key = ITEM_GET_KEY(tbl, i);
206 if (key != 0) {
207 hash_table_raw_insert(&tmp_tbl, key, tbl->items[i].val);
208 }
209 }
210 old = tbl->items;
211 *tbl = tmp_tbl;
212 xfree(old);
213 }
214}
215
216#if ID_TABLE_DEBUG && 0
217static void
218hash_table_show(struct rb_id_table *tbl)
219{
220 const id_key_t *keys = tbl->keys;
221 const int capa = tbl->capa;
222 int i;
223
224 fprintf(stderr, "tbl: %p (capa: %d, num: %d, used: %d)\n", tbl, tbl->capa, tbl->num, tbl->used);
225 for (i=0; i<capa; i++) {
226 if (ITEM_KEY_ISSET(tbl, i)) {
227 fprintf(stderr, " -> [%d] %s %d\n", i, rb_id2name(key2id(keys[i])), (int)keys[i]);
228 }
229 }
230}
231#endif
232
233int
234rb_id_table_lookup(struct rb_id_table *tbl, ID id, VALUE *valp)
235{
236 id_key_t key = id2key(id);
237 int index = hash_table_index(tbl, key);
238
239 if (index >= 0) {
240 *valp = tbl->items[index].val;
241 return TRUE;
242 }
243 else {
244 return FALSE;
245 }
246}
247
248static int
249rb_id_table_insert_key(struct rb_id_table *tbl, const id_key_t key, const VALUE val)
250{
251 const int index = hash_table_index(tbl, key);
252
253 if (index >= 0) {
254 tbl->items[index].val = val;
255 }
256 else {
257 hash_table_extend(tbl);
258 hash_table_raw_insert(tbl, key, val);
259 }
260 return TRUE;
261}
262
263int
264rb_id_table_insert(struct rb_id_table *tbl, ID id, VALUE val)
265{
266 return rb_id_table_insert_key(tbl, id2key(id), val);
267}
268
269int
270rb_id_table_delete(struct rb_id_table *tbl, ID id)
271{
272 const id_key_t key = id2key(id);
273 int index = hash_table_index(tbl, key);
274 return hash_delete_index(tbl, index);
275}
276
277void
278rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
279{
280 int i, capa = tbl->capa;
281
282 for (i=0; i<capa; i++) {
283 if (ITEM_KEY_ISSET(tbl, i)) {
284 const id_key_t key = ITEM_GET_KEY(tbl, i);
285 enum rb_id_table_iterator_result ret = (*func)(key2id(key), tbl->items[i].val, data);
286 RUBY_ASSERT(key != 0);
287
288 if (ret == ID_TABLE_DELETE)
289 hash_delete_index(tbl, i);
290 else if (ret == ID_TABLE_STOP)
291 return;
292 }
293 }
294}
295
296void
297rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
298{
299 int i, capa = tbl->capa;
300
301 if (!tbl->items) {
302 return;
303 }
304
305 for (i=0; i<capa; i++) {
306 if (ITEM_KEY_ISSET(tbl, i)) {
307 enum rb_id_table_iterator_result ret = (*func)(tbl->items[i].val, data);
308
309 if (ret == ID_TABLE_DELETE)
310 hash_delete_index(tbl, i);
311 else if (ret == ID_TABLE_STOP)
312 return;
313 }
314 }
315}
316
317void
318rb_id_table_foreach_values_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, rb_id_table_update_value_callback_func_t *replace, void *data)
319{
320 int i, capa = tbl->capa;
321
322 for (i = 0; i < capa; i++) {
323 if (ITEM_KEY_ISSET(tbl, i)) {
324 enum rb_id_table_iterator_result ret = (*func)(tbl->items[i].val, data);
325
326 if (ret == ID_TABLE_REPLACE) {
327 VALUE val = tbl->items[i].val;
328 ret = (*replace)(&val, data, TRUE);
329 tbl->items[i].val = val;
330 }
331
332 if (ret == ID_TABLE_STOP)
333 return;
334 }
335 }
336}
337
338static void
339managed_id_table_free(void *data)
340{
341 struct rb_id_table *tbl = (struct rb_id_table *)data;
342 rb_id_table_free_items(tbl);
343}
344
345static size_t
346managed_id_table_memsize(const void *data)
347{
348 const struct rb_id_table *tbl = (const struct rb_id_table *)data;
349 return rb_id_table_memsize(tbl) - sizeof(struct rb_id_table);
350}
351
352const rb_data_type_t rb_managed_id_table_type = {
353 .wrap_struct_name = "VM/managed_id_table",
354 .function = {
355 .dmark = NULL, // Nothing to mark
356 .dfree = (RUBY_DATA_FUNC)managed_id_table_free,
357 .dsize = managed_id_table_memsize,
358 },
359 .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE,
360};
361
362static inline struct rb_id_table *
363managed_id_table_ptr(VALUE obj)
364{
366 RUBY_ASSERT(rb_typeddata_inherited_p(RTYPEDDATA_TYPE(obj), &rb_managed_id_table_type));
367
368 return RTYPEDDATA_GET_DATA(obj);
369}
370
371VALUE
372rb_managed_id_table_create(const rb_data_type_t *type, size_t capa)
373{
374 struct rb_id_table *tbl;
375 VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, type, tbl);
376 rb_id_table_init(tbl, capa);
377 return obj;
378}
379
380VALUE
381rb_managed_id_table_new(size_t capa)
382{
383 return rb_managed_id_table_create(&rb_managed_id_table_type, capa);
384}
385
386static enum rb_id_table_iterator_result
387managed_id_table_dup_i(ID id, VALUE val, void *data)
388{
389 struct rb_id_table *new_tbl = (struct rb_id_table *)data;
390 rb_id_table_insert(new_tbl, id, val);
391 return ID_TABLE_CONTINUE;
392}
393
394VALUE
395rb_managed_id_table_dup(VALUE old_table)
396{
397 struct rb_id_table *new_tbl;
398 VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, RTYPEDDATA_TYPE(old_table), new_tbl);
399 struct rb_id_table *old_tbl = managed_id_table_ptr(old_table);
400 rb_id_table_init(new_tbl, old_tbl->num + 1);
401 rb_id_table_foreach(old_tbl, managed_id_table_dup_i, new_tbl);
402 return obj;
403}
404
405int
406rb_managed_id_table_lookup(VALUE table, ID id, VALUE *valp)
407{
408 return rb_id_table_lookup(managed_id_table_ptr(table), id, valp);
409}
410
411int
412rb_managed_id_table_insert(VALUE table, ID id, VALUE val)
413{
414 return rb_id_table_insert(managed_id_table_ptr(table), id, val);
415}
416
417size_t
418rb_managed_id_table_size(VALUE table)
419{
420 return rb_id_table_size(managed_id_table_ptr(table));
421}
422
423void
424rb_managed_id_table_foreach(VALUE table, rb_id_table_foreach_func_t *func, void *data)
425{
426 rb_id_table_foreach(managed_id_table_ptr(table), func, data);
427}
428
429void
430rb_managed_id_table_foreach_values(VALUE table, rb_id_table_foreach_values_func_t *func, void *data)
431{
432 rb_id_table_foreach_values(managed_id_table_ptr(table), func, data);
433}
434
435int
436rb_managed_id_table_delete(VALUE table, ID id)
437{
438 return rb_id_table_delete(managed_id_table_ptr(table), id);
439}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
int rb_typeddata_inherited_p(const rb_data_type_t *child, const rb_data_type_t *parent)
Checks for the domestic relationship between the two.
Definition error.c:1370
int capa
Designed capacity of the buffer.
Definition io.h:11
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
VALUE type(ANYARGS)
ANYARGS-ed function type.
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:601
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:203
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:210
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376