Ruby 3.5.0dev (2025-02-20 revision 34098b669c0cbc024cd08e686891f1dfe0a10aaf)
id_table.c (34098b669c0cbc024cd08e686891f1dfe0a10aaf)
1/* This file is included by symbol.c */
2
3#include "id_table.h"
4
5#ifndef ID_TABLE_DEBUG
6#define ID_TABLE_DEBUG 0
7#endif
8
9#if ID_TABLE_DEBUG == 0
10#undef NDEBUG
11#define NDEBUG
12#endif
13#include "ruby_assert.h"
14
15typedef rb_id_serial_t id_key_t;
16
17static inline ID
18key2id(id_key_t key)
19{
20 return rb_id_serial_to_id(key);
21}
22
23static inline id_key_t
24id2key(ID id)
25{
26 return rb_id_to_serial(id);
27}
28
29/* simple open addressing with quadratic probing.
30 uses mark-bit on collisions - need extra 1 bit,
31 ID is strictly 3 bits larger than rb_id_serial_t */
32
33typedef struct rb_id_item {
34 id_key_t key;
35#if SIZEOF_VALUE == 8
36 int collision;
37#endif
38 VALUE val;
39} item_t;
40
42 int capa;
43 int num;
44 int used;
45 item_t *items;
46};
47
48#if SIZEOF_VALUE == 8
49#define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key)
50#define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key)
51#define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].collision)
52#define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].collision = 1)
53static inline void
54ITEM_SET_KEY(struct rb_id_table *tbl, int i, id_key_t key)
55{
56 tbl->items[i].key = key;
57}
58#else
59#define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key >> 1)
60#define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key > 1)
61#define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].key & 1)
62#define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].key |= 1)
63static inline void
64ITEM_SET_KEY(struct rb_id_table *tbl, int i, id_key_t key)
65{
66 tbl->items[i].key = (key << 1) | ITEM_COLLIDED(tbl, i);
67}
68#endif
69
70static inline int
71round_capa(int capa)
72{
73 /* minsize is 4 */
74 capa >>= 2;
75 capa |= capa >> 1;
76 capa |= capa >> 2;
77 capa |= capa >> 4;
78 capa |= capa >> 8;
79 capa |= capa >> 16;
80 return (capa + 1) << 2;
81}
82
83static struct rb_id_table *
84rb_id_table_init(struct rb_id_table *tbl, int capa)
85{
86 MEMZERO(tbl, struct rb_id_table, 1);
87 if (capa > 0) {
88 capa = round_capa(capa);
89 tbl->capa = (int)capa;
90 tbl->items = ZALLOC_N(item_t, capa);
91 }
92 return tbl;
93}
94
95struct rb_id_table *
96rb_id_table_create(size_t capa)
97{
98 struct rb_id_table *tbl = ALLOC(struct rb_id_table);
99 return rb_id_table_init(tbl, (int)capa);
100}
101
102void
103rb_id_table_free(struct rb_id_table *tbl)
104{
105 xfree(tbl->items);
106 xfree(tbl);
107}
108
109void
110rb_id_table_clear(struct rb_id_table *tbl)
111{
112 tbl->num = 0;
113 tbl->used = 0;
114 MEMZERO(tbl->items, item_t, tbl->capa);
115}
116
117size_t
118rb_id_table_size(const struct rb_id_table *tbl)
119{
120 return (size_t)tbl->num;
121}
122
123size_t
124rb_id_table_memsize(const struct rb_id_table *tbl)
125{
126 return sizeof(item_t) * tbl->capa + sizeof(struct rb_id_table);
127}
128
129static int
130hash_table_index(struct rb_id_table* tbl, id_key_t key)
131{
132 if (tbl->capa > 0) {
133 int mask = tbl->capa - 1;
134 int ix = key & mask;
135 int d = 1;
136 while (key != ITEM_GET_KEY(tbl, ix)) {
137 if (!ITEM_COLLIDED(tbl, ix))
138 return -1;
139 ix = (ix + d) & mask;
140 d++;
141 }
142 return ix;
143 }
144 return -1;
145}
146
147static void
148hash_table_raw_insert(struct rb_id_table *tbl, id_key_t key, VALUE val)
149{
150 int mask = tbl->capa - 1;
151 int ix = key & mask;
152 int d = 1;
153 RUBY_ASSERT(key != 0);
154 while (ITEM_KEY_ISSET(tbl, ix)) {
155 ITEM_SET_COLLIDED(tbl, ix);
156 ix = (ix + d) & mask;
157 d++;
158 }
159 tbl->num++;
160 if (!ITEM_COLLIDED(tbl, ix)) {
161 tbl->used++;
162 }
163 ITEM_SET_KEY(tbl, ix, key);
164 tbl->items[ix].val = val;
165}
166
167static int
168hash_delete_index(struct rb_id_table *tbl, int ix)
169{
170 if (ix >= 0) {
171 if (!ITEM_COLLIDED(tbl, ix)) {
172 tbl->used--;
173 }
174 tbl->num--;
175 ITEM_SET_KEY(tbl, ix, 0);
176 tbl->items[ix].val = 0;
177 return TRUE;
178 }
179 else {
180 return FALSE;
181 }
182}
183
184static void
185hash_table_extend(struct rb_id_table* tbl)
186{
187 if (tbl->used + (tbl->used >> 1) >= tbl->capa) {
188 int new_cap = round_capa(tbl->num + (tbl->num >> 1));
189 int i;
190 item_t* old;
191 struct rb_id_table tmp_tbl = {0, 0, 0};
192 if (new_cap < tbl->capa) {
193 new_cap = round_capa(tbl->used + (tbl->used >> 1));
194 }
195 tmp_tbl.capa = new_cap;
196 tmp_tbl.items = ZALLOC_N(item_t, new_cap);
197 for (i = 0; i < tbl->capa; i++) {
198 id_key_t key = ITEM_GET_KEY(tbl, i);
199 if (key != 0) {
200 hash_table_raw_insert(&tmp_tbl, key, tbl->items[i].val);
201 }
202 }
203 old = tbl->items;
204 *tbl = tmp_tbl;
205 xfree(old);
206 }
207}
208
209#if ID_TABLE_DEBUG && 0
210static void
211hash_table_show(struct rb_id_table *tbl)
212{
213 const id_key_t *keys = tbl->keys;
214 const int capa = tbl->capa;
215 int i;
216
217 fprintf(stderr, "tbl: %p (capa: %d, num: %d, used: %d)\n", tbl, tbl->capa, tbl->num, tbl->used);
218 for (i=0; i<capa; i++) {
219 if (ITEM_KEY_ISSET(tbl, i)) {
220 fprintf(stderr, " -> [%d] %s %d\n", i, rb_id2name(key2id(keys[i])), (int)keys[i]);
221 }
222 }
223}
224#endif
225
226int
227rb_id_table_lookup(struct rb_id_table *tbl, ID id, VALUE *valp)
228{
229 id_key_t key = id2key(id);
230 int index = hash_table_index(tbl, key);
231
232 if (index >= 0) {
233 *valp = tbl->items[index].val;
234 return TRUE;
235 }
236 else {
237 return FALSE;
238 }
239}
240
241static int
242rb_id_table_insert_key(struct rb_id_table *tbl, const id_key_t key, const VALUE val)
243{
244 const int index = hash_table_index(tbl, key);
245
246 if (index >= 0) {
247 tbl->items[index].val = val;
248 }
249 else {
250 hash_table_extend(tbl);
251 hash_table_raw_insert(tbl, key, val);
252 }
253 return TRUE;
254}
255
256int
257rb_id_table_insert(struct rb_id_table *tbl, ID id, VALUE val)
258{
259 return rb_id_table_insert_key(tbl, id2key(id), val);
260}
261
262int
263rb_id_table_delete(struct rb_id_table *tbl, ID id)
264{
265 const id_key_t key = id2key(id);
266 int index = hash_table_index(tbl, key);
267 return hash_delete_index(tbl, index);
268}
269
270void
271rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
272{
273 int i, capa = tbl->capa;
274
275 for (i=0; i<capa; i++) {
276 if (ITEM_KEY_ISSET(tbl, i)) {
277 const id_key_t key = ITEM_GET_KEY(tbl, i);
278 enum rb_id_table_iterator_result ret = (*func)(key2id(key), tbl->items[i].val, data);
279 RUBY_ASSERT(key != 0);
280
281 if (ret == ID_TABLE_DELETE)
282 hash_delete_index(tbl, i);
283 else if (ret == ID_TABLE_STOP)
284 return;
285 }
286 }
287}
288
289void
290rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
291{
292 int i, capa = tbl->capa;
293
294 for (i=0; i<capa; i++) {
295 if (ITEM_KEY_ISSET(tbl, i)) {
296 enum rb_id_table_iterator_result ret = (*func)(tbl->items[i].val, data);
297
298 if (ret == ID_TABLE_DELETE)
299 hash_delete_index(tbl, i);
300 else if (ret == ID_TABLE_STOP)
301 return;
302 }
303 }
304}
305
306void
307rb_id_table_foreach_values_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, rb_id_table_update_value_callback_func_t *replace, void *data)
308{
309 int i, capa = tbl->capa;
310
311 for (i = 0; i < capa; i++) {
312 if (ITEM_KEY_ISSET(tbl, i)) {
313 enum rb_id_table_iterator_result ret = (*func)(tbl->items[i].val, data);
314
315 if (ret == ID_TABLE_REPLACE) {
316 VALUE val = tbl->items[i].val;
317 ret = (*replace)(&val, data, TRUE);
318 tbl->items[i].val = val;
319 }
320
321 if (ret == ID_TABLE_STOP)
322 return;
323 }
324 }
325}
326
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
int capa
Designed capacity of the buffer.
Definition io.h:11
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40