Ruby 3.5.0dev (2025-05-21 revision 3487117e55f09e710cdf338ce5b87607b0b51d6d)
siphash.c (3487117e55f09e710cdf338ce5b87607b0b51d6d)
1#include <string.h>
2#include <stdio.h>
3#include "siphash.h"
4#ifndef SIP_HASH_STREAMING
5 #define SIP_HASH_STREAMING 1
6#endif
7
8#if defined(__MINGW32__)
9 #include <sys/param.h>
10
11 /* MinGW only defines LITTLE_ENDIAN and BIG_ENDIAN macros */
12 #define __LITTLE_ENDIAN LITTLE_ENDIAN
13 #define __BIG_ENDIAN BIG_ENDIAN
14#elif defined(_WIN32)
15 #define BYTE_ORDER __LITTLE_ENDIAN
16#elif !defined(BYTE_ORDER)
17 #include <endian.h>
18#endif
19
20#ifndef LITTLE_ENDIAN
21#define LITTLE_ENDIAN __LITTLE_ENDIAN
22#endif
23#ifndef BIG_ENDIAN
24#define BIG_ENDIAN __BIG_ENDIAN
25#endif
26
27#if BYTE_ORDER == LITTLE_ENDIAN
28 #define lo u32[0]
29 #define hi u32[1]
30#elif BYTE_ORDER == BIG_ENDIAN
31 #define hi u32[0]
32 #define lo u32[1]
33#else
34 #error "Only strictly little or big endian supported"
35#endif
36
37/* __POWERPC__ added to accommodate Darwin case. */
38#ifndef UNALIGNED_WORD_ACCESS
39# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
40 defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || \
41 defined(__powerpc64__) || defined(__POWERPC__) || defined(__aarch64__) || \
42 defined(__mc68020__)
43# define UNALIGNED_WORD_ACCESS 1
44# endif
45#endif
46#ifndef UNALIGNED_WORD_ACCESS
47# define UNALIGNED_WORD_ACCESS 0
48#endif
49
50#define U8TO32_LE(p) \
51 (((uint32_t)((p)[0]) ) | ((uint32_t)((p)[1]) << 8) | \
52 ((uint32_t)((p)[2]) << 16) | ((uint32_t)((p)[3]) << 24)) \
53
54#define U32TO8_LE(p, v) \
55do { \
56 (p)[0] = (uint8_t)((v) ); \
57 (p)[1] = (uint8_t)((v) >> 8); \
58 (p)[2] = (uint8_t)((v) >> 16); \
59 (p)[3] = (uint8_t)((v) >> 24); \
60} while (0)
61
62#ifdef HAVE_UINT64_T
63#define U8TO64_LE(p) \
64 ((uint64_t)U8TO32_LE(p) | ((uint64_t)U8TO32_LE((p) + 4)) << 32 )
65
66#define U64TO8_LE(p, v) \
67do { \
68 U32TO8_LE((p), (uint32_t)((v) )); \
69 U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); \
70} while (0)
71
72#define ROTL64(v, s) \
73 ((v) << (s)) | ((v) >> (64 - (s)))
74
75#define ROTL64_TO(v, s) ((v) = ROTL64((v), (s)))
76
77#define ADD64_TO(v, s) ((v) += (s))
78#define XOR64_TO(v, s) ((v) ^= (s))
79#define XOR64_INT(v, x) ((v) ^= (x))
80#else
81#define U8TO64_LE(p) u8to64_le(p)
82static inline uint64_t
83u8to64_le(const uint8_t *p)
84{
85 uint64_t ret;
86 ret.lo = U8TO32_LE(p);
87 ret.hi = U8TO32_LE(p + 4);
88 return ret;
89}
90
91#define U64TO8_LE(p, v) u64to8_le(p, v)
92static inline void
93u64to8_le(uint8_t *p, uint64_t v)
94{
95 U32TO8_LE(p, v.lo);
96 U32TO8_LE(p + 4, v.hi);
97}
98
99#define ROTL64_TO(v, s) ((s) > 32 ? rotl64_swap(rotl64_to(&(v), (s) - 32)) : \
100 (s) == 32 ? rotl64_swap(&(v)) : rotl64_to(&(v), (s)))
101static inline uint64_t *
102rotl64_to(uint64_t *v, unsigned int s)
103{
104 uint32_t uhi = (v->hi << s) | (v->lo >> (32 - s));
105 uint32_t ulo = (v->lo << s) | (v->hi >> (32 - s));
106 v->hi = uhi;
107 v->lo = ulo;
108 return v;
109}
110
111static inline uint64_t *
112rotl64_swap(uint64_t *v)
113{
114 uint32_t t = v->lo;
115 v->lo = v->hi;
116 v->hi = t;
117 return v;
118}
119
120#define ADD64_TO(v, s) add64_to(&(v), (s))
121static inline uint64_t *
122add64_to(uint64_t *v, const uint64_t s)
123{
124 v->lo += s.lo;
125 v->hi += s.hi;
126 if (v->lo < s.lo) v->hi++;
127 return v;
128}
129
130#define XOR64_TO(v, s) xor64_to(&(v), (s))
131static inline uint64_t *
132xor64_to(uint64_t *v, const uint64_t s)
133{
134 v->lo ^= s.lo;
135 v->hi ^= s.hi;
136 return v;
137}
138
139#define XOR64_INT(v, x) ((v).lo ^= (x))
140#endif
141
142static const union {
143#if defined(__has_attribute) && __has_attribute(nonstring)
144 __attribute__((nonstring))
145#endif
146 char bin[32];
147 uint64_t u64[4];
148} sip_init_state_bin = {"uespemos""modnarod""arenegyl""setybdet"};
149#define sip_init_state sip_init_state_bin.u64
150
151#if SIP_HASH_STREAMING
152struct sip_interface_st {
153 void (*init)(sip_state *s, const uint8_t *key);
154 void (*update)(sip_state *s, const uint8_t *data, size_t len);
155 void (*final)(sip_state *s, uint64_t *digest);
156};
157
158static void int_sip_init(sip_state *state, const uint8_t *key);
159static void int_sip_update(sip_state *state, const uint8_t *data, size_t len);
160static void int_sip_final(sip_state *state, uint64_t *digest);
161
162static const sip_interface sip_methods = {
163 int_sip_init,
164 int_sip_update,
165 int_sip_final
166};
167#endif /* SIP_HASH_STREAMING */
168
169#define SIP_COMPRESS(v0, v1, v2, v3) \
170do { \
171 ADD64_TO((v0), (v1)); \
172 ADD64_TO((v2), (v3)); \
173 ROTL64_TO((v1), 13); \
174 ROTL64_TO((v3), 16); \
175 XOR64_TO((v1), (v0)); \
176 XOR64_TO((v3), (v2)); \
177 ROTL64_TO((v0), 32); \
178 ADD64_TO((v2), (v1)); \
179 ADD64_TO((v0), (v3)); \
180 ROTL64_TO((v1), 17); \
181 ROTL64_TO((v3), 21); \
182 XOR64_TO((v1), (v2)); \
183 XOR64_TO((v3), (v0)); \
184 ROTL64_TO((v2), 32); \
185} while(0)
186
187#if SIP_HASH_STREAMING
188static void
189int_sip_dump(sip_state *state)
190{
191 int v;
192
193 for (v = 0; v < 4; v++) {
194#ifdef HAVE_UINT64_T
195 printf("v%d: %" PRIx64 "\n", v, state->v[v]);
196#else
197 printf("v%d: %" PRIx32 "%.8" PRIx32 "\n", v, state->v[v].hi, state->v[v].lo);
198#endif
199 }
200}
201
202static void
203int_sip_init(sip_state *state, const uint8_t key[16])
204{
205 uint64_t k0, k1;
206
207 k0 = U8TO64_LE(key);
208 k1 = U8TO64_LE(key + sizeof(uint64_t));
209
210 state->v[0] = k0; XOR64_TO(state->v[0], sip_init_state[0]);
211 state->v[1] = k1; XOR64_TO(state->v[1], sip_init_state[1]);
212 state->v[2] = k0; XOR64_TO(state->v[2], sip_init_state[2]);
213 state->v[3] = k1; XOR64_TO(state->v[3], sip_init_state[3]);
214}
215
216static inline void
217int_sip_round(sip_state *state, int n)
218{
219 int i;
220
221 for (i = 0; i < n; i++) {
222 SIP_COMPRESS(state->v[0], state->v[1], state->v[2], state->v[3]);
223 }
224}
225
226static inline void
227int_sip_update_block(sip_state *state, uint64_t m)
228{
229 XOR64_TO(state->v[3], m);
230 int_sip_round(state, state->c);
231 XOR64_TO(state->v[0], m);
232}
233
234static inline void
235int_sip_pre_update(sip_state *state, const uint8_t **pdata, size_t *plen)
236{
237 int to_read;
238 uint64_t m;
239
240 if (!state->buflen) return;
241
242 to_read = sizeof(uint64_t) - state->buflen;
243 memcpy(state->buf + state->buflen, *pdata, to_read);
244 m = U8TO64_LE(state->buf);
245 int_sip_update_block(state, m);
246 *pdata += to_read;
247 *plen -= to_read;
248 state->buflen = 0;
249}
250
251static inline void
252int_sip_post_update(sip_state *state, const uint8_t *data, size_t len)
253{
254 uint8_t r = len % sizeof(uint64_t);
255 if (r) {
256 memcpy(state->buf, data + len - r, r);
257 state->buflen = r;
258 }
259}
260
261static void
262int_sip_update(sip_state *state, const uint8_t *data, size_t len)
263{
264 uint64_t *end;
265 uint64_t *data64;
266
267 state->msglen_byte = state->msglen_byte + (len % 256);
268 data64 = (uint64_t *) data;
269
270 int_sip_pre_update(state, &data, &len);
271
272 end = data64 + (len / sizeof(uint64_t));
273
274#if BYTE_ORDER == LITTLE_ENDIAN
275 while (data64 != end) {
276 int_sip_update_block(state, *data64++);
277 }
278#elif BYTE_ORDER == BIG_ENDIAN
279 {
280 uint64_t m;
281 uint8_t *data8 = data;
282 for (; data8 != (uint8_t *) end; data8 += sizeof(uint64_t)) {
283 m = U8TO64_LE(data8);
284 int_sip_update_block(state, m);
285 }
286 }
287#endif
288
289 int_sip_post_update(state, data, len);
290}
291
292static inline void
293int_sip_pad_final_block(sip_state *state)
294{
295 int i;
296 /* pad with 0's and finalize with msg_len mod 256 */
297 for (i = state->buflen; i < sizeof(uint64_t); i++) {
298 state->buf[i] = 0x00;
299 }
300 state->buf[sizeof(uint64_t) - 1] = state->msglen_byte;
301}
302
303static void
304int_sip_final(sip_state *state, uint64_t *digest)
305{
306 uint64_t m;
307
308 int_sip_pad_final_block(state);
309
310 m = U8TO64_LE(state->buf);
311 int_sip_update_block(state, m);
312
313 XOR64_INT(state->v[2], 0xff);
314
315 int_sip_round(state, state->d);
316
317 *digest = state->v[0];
318 XOR64_TO(*digest, state->v[1]);
319 XOR64_TO(*digest, state->v[2]);
320 XOR64_TO(*digest, state->v[3]);
321}
322
323sip_hash *
324sip_hash_new(const uint8_t key[16], int c, int d)
325{
326 sip_hash *h = NULL;
327
328 if (!(h = (sip_hash *) malloc(sizeof(sip_hash)))) return NULL;
329 return sip_hash_init(h, key, c, d);
330}
331
332sip_hash *
333sip_hash_init(sip_hash *h, const uint8_t key[16], int c, int d)
334{
335 h->state->c = c;
336 h->state->d = d;
337 h->state->buflen = 0;
338 h->state->msglen_byte = 0;
339 h->methods = &sip_methods;
340 h->methods->init(h->state, key);
341 return h;
342}
343
344int
345sip_hash_update(sip_hash *h, const uint8_t *msg, size_t len)
346{
347 h->methods->update(h->state, msg, len);
348 return 1;
349}
350
351int
352sip_hash_final(sip_hash *h, uint8_t **digest, size_t* len)
353{
354 uint64_t digest64;
355 uint8_t *ret;
356
357 h->methods->final(h->state, &digest64);
358 if (!(ret = (uint8_t *)malloc(sizeof(uint64_t)))) return 0;
359 U64TO8_LE(ret, digest64);
360 *len = sizeof(uint64_t);
361 *digest = ret;
362
363 return 1;
364}
365
366int
367sip_hash_final_integer(sip_hash *h, uint64_t *digest)
368{
369 h->methods->final(h->state, digest);
370 return 1;
371}
372
373int
374sip_hash_digest(sip_hash *h, const uint8_t *data, size_t data_len, uint8_t **digest, size_t *digest_len)
375{
376 if (!sip_hash_update(h, data, data_len)) return 0;
377 return sip_hash_final(h, digest, digest_len);
378}
379
380int
381sip_hash_digest_integer(sip_hash *h, const uint8_t *data, size_t data_len, uint64_t *digest)
382{
383 if (!sip_hash_update(h, data, data_len)) return 0;
384 return sip_hash_final_integer(h, digest);
385}
386
387void
388sip_hash_free(sip_hash *h)
389{
390 free(h);
391}
392
393void
394sip_hash_dump(sip_hash *h)
395{
396 int_sip_dump(h->state);
397}
398#endif /* SIP_HASH_STREAMING */
399
400#define SIP_ROUND(m, v0, v1, v2, v3) \
401do { \
402 XOR64_TO((v3), (m)); \
403 SIP_COMPRESS(v0, v1, v2, v3); \
404 XOR64_TO((v0), (m)); \
405} while (0)
406
407uint64_t
408sip_hash13(const uint8_t key[16], const uint8_t *data, size_t len)
409{
410 uint64_t k0, k1;
411 uint64_t v0, v1, v2, v3;
412 uint64_t m, last;
413 const uint8_t *end = data + len - (len % sizeof(uint64_t));
414
415 k0 = U8TO64_LE(key);
416 k1 = U8TO64_LE(key + sizeof(uint64_t));
417
418 v0 = k0; XOR64_TO(v0, sip_init_state[0]);
419 v1 = k1; XOR64_TO(v1, sip_init_state[1]);
420 v2 = k0; XOR64_TO(v2, sip_init_state[2]);
421 v3 = k1; XOR64_TO(v3, sip_init_state[3]);
422
423#if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
424 {
425 uint64_t *data64 = (uint64_t *)data;
426 while (data64 != (uint64_t *) end) {
427 m = *data64++;
428 SIP_ROUND(m, v0, v1, v2, v3);
429 }
430 }
431#else
432 for (; data != end; data += sizeof(uint64_t)) {
433 m = U8TO64_LE(data);
434 SIP_ROUND(m, v0, v1, v2, v3);
435 }
436#endif
437
438#ifdef HAVE_UINT64_T
439 last = (uint64_t)len << 56;
440#define OR_BYTE(n) (last |= ((uint64_t) end[n]) << ((n) * 8))
441#else
442 last.hi = len << 24;
443 last.lo = 0;
444#define OR_BYTE(n) do { \
445 if (n >= 4) \
446 last.hi |= ((uint32_t) end[n]) << ((n) >= 4 ? (n) * 8 - 32 : 0); \
447 else \
448 last.lo |= ((uint32_t) end[n]) << ((n) >= 4 ? 0 : (n) * 8); \
449 } while (0)
450#endif
451
452 switch (len % sizeof(uint64_t)) {
453 case 7:
454 OR_BYTE(6);
455 case 6:
456 OR_BYTE(5);
457 case 5:
458 OR_BYTE(4);
459 case 4:
460#if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
461 #ifdef HAVE_UINT64_T
462 last |= (uint64_t) ((uint32_t *) end)[0];
463 #else
464 last.lo |= ((uint32_t *) end)[0];
465 #endif
466 break;
467#else
468 OR_BYTE(3);
469#endif
470 case 3:
471 OR_BYTE(2);
472 case 2:
473 OR_BYTE(1);
474 case 1:
475 OR_BYTE(0);
476 break;
477 case 0:
478 break;
479 }
480
481 SIP_ROUND(last, v0, v1, v2, v3);
482
483 XOR64_INT(v2, 0xff);
484
485 SIP_COMPRESS(v0, v1, v2, v3);
486 SIP_COMPRESS(v0, v1, v2, v3);
487 SIP_COMPRESS(v0, v1, v2, v3);
488
489 XOR64_TO(v0, v1);
490 XOR64_TO(v0, v2);
491 XOR64_TO(v0, v3);
492 return v0;
493}
int len
Length of the buffer.
Definition io.h:8