summaryrefslogtreecommitdiff
path: root/src/journal
diff options
context:
space:
mode:
authorLennart Poettering <lennart@poettering.net>2013-12-22 19:59:12 +0100
committerLennart Poettering <lennart@poettering.net>2013-12-22 21:12:25 +0100
commit9bf3b53533cdc9b95c921b71da755401f223f765 (patch)
tree812e99b25cc09f5d5d3b130d25a02754283ff7a7 /src/journal
parent14f862a508ee64466fa8b3f036797d472f4d03ed (diff)
shared: switch our hash table implementation over to SipHash
SipHash appears to be the new gold standard for hashing smaller strings for hashtables these days, so let's make use of it.
Diffstat (limited to 'src/journal')
-rw-r--r--src/journal/catalog.c34
-rw-r--r--src/journal/catalog.h2
-rw-r--r--src/journal/journal-file.c4
-rw-r--r--src/journal/journald-rate-limit.c12
4 files changed, 25 insertions, 27 deletions
diff --git a/src/journal/catalog.c b/src/journal/catalog.c
index e3a3354ab..2823232cb 100644
--- a/src/journal/catalog.c
+++ b/src/journal/catalog.c
@@ -39,6 +39,7 @@
#include "conf-files.h"
#include "mkdir.h"
#include "catalog.h"
+#include "siphash24.h"
const char * const catalog_file_dirs[] = {
"/usr/local/lib/systemd/catalog/",
@@ -63,28 +64,21 @@ typedef struct CatalogItem {
le64_t offset;
} CatalogItem;
-unsigned catalog_hash_func(const void *p) {
+unsigned long catalog_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
const CatalogItem *i = p;
+ uint64_t u;
+ size_t l, sz;
+ void *v;
- assert_cc(sizeof(unsigned) == sizeof(uint8_t)*4);
-
- return (((unsigned) i->id.bytes[0] << 24) |
- ((unsigned) i->id.bytes[1] << 16) |
- ((unsigned) i->id.bytes[2] << 8) |
- ((unsigned) i->id.bytes[3])) ^
- (((unsigned) i->id.bytes[4] << 24) |
- ((unsigned) i->id.bytes[5] << 16) |
- ((unsigned) i->id.bytes[6] << 8) |
- ((unsigned) i->id.bytes[7])) ^
- (((unsigned) i->id.bytes[8] << 24) |
- ((unsigned) i->id.bytes[9] << 16) |
- ((unsigned) i->id.bytes[10] << 8) |
- ((unsigned) i->id.bytes[11])) ^
- (((unsigned) i->id.bytes[12] << 24) |
- ((unsigned) i->id.bytes[13] << 16) |
- ((unsigned) i->id.bytes[14] << 8) |
- ((unsigned) i->id.bytes[15])) ^
- string_hash_func(i->language);
+ l = strlen(i->language);
+ sz = sizeof(i->id) + l;
+ v = alloca(sz);
+
+ memcpy(mempcpy(v, &i->id, sizeof(i->id)), i->language, l);
+
+ siphash24((uint8_t*) &u, v, sz, hash_key);
+
+ return (unsigned long) u;
}
int catalog_compare_func(const void *a, const void *b) {
diff --git a/src/journal/catalog.h b/src/journal/catalog.h
index 5e15b99ac..fdde67aee 100644
--- a/src/journal/catalog.h
+++ b/src/journal/catalog.h
@@ -28,7 +28,7 @@
#include "strbuf.h"
int catalog_import_file(Hashmap *h, struct strbuf *sb, const char *path);
-unsigned catalog_hash_func(const void *p);
+unsigned long catalog_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]);
int catalog_compare_func(const void *a, const void *b) _pure_;
int catalog_update(const char* database, const char* root, const char* const* dirs);
int catalog_get(const char* database, sd_id128_t id, char **data);
diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c
index b7e5cf0ab..121b40a58 100644
--- a/src/journal/journal-file.c
+++ b/src/journal/journal-file.c
@@ -2696,10 +2696,10 @@ int journal_file_open_reliably(
/* The file is corrupted. Rotate it away and try it again (but only once) */
l = strlen(fname);
- if (asprintf(&p, "%.*s@%016llx-%016llx.journal~",
+ if (asprintf(&p, "%.*s@%016llx-%016" PRIx64 ".journal~",
(int) l - 8, fname,
(unsigned long long) now(CLOCK_REALTIME),
- random_ull()) < 0)
+ random_u64()) < 0)
return -ENOMEM;
r = rename(fname, p);
diff --git a/src/journal/journald-rate-limit.c b/src/journal/journald-rate-limit.c
index 4b7622152..6d779d296 100644
--- a/src/journal/journald-rate-limit.c
+++ b/src/journal/journald-rate-limit.c
@@ -56,7 +56,7 @@ struct JournalRateLimitGroup {
char *id;
JournalRateLimitPool pools[POOLS_MAX];
- unsigned hash;
+ unsigned long hash;
LIST_FIELDS(JournalRateLimitGroup, bucket);
LIST_FIELDS(JournalRateLimitGroup, lru);
@@ -70,6 +70,8 @@ struct JournalRateLimit {
JournalRateLimitGroup *lru, *lru_tail;
unsigned n_groups;
+
+ uint8_t hash_key[16];
};
JournalRateLimit *journal_rate_limit_new(usec_t interval, unsigned burst) {
@@ -84,6 +86,8 @@ JournalRateLimit *journal_rate_limit_new(usec_t interval, unsigned burst) {
r->interval = interval;
r->burst = burst;
+ random_bytes(r->hash_key, sizeof(r->hash_key));
+
return r;
}
@@ -152,7 +156,7 @@ static JournalRateLimitGroup* journal_rate_limit_group_new(JournalRateLimit *r,
if (!g->id)
goto fail;
- g->hash = string_hash_func(g->id);
+ g->hash = string_hash_func(g->id, r->hash_key);
journal_rate_limit_vacuum(r, ts);
@@ -199,7 +203,7 @@ static unsigned burst_modulate(unsigned burst, uint64_t available) {
}
int journal_rate_limit_test(JournalRateLimit *r, const char *id, int priority, uint64_t available) {
- unsigned h;
+ unsigned long h;
JournalRateLimitGroup *g;
JournalRateLimitPool *p;
unsigned burst;
@@ -217,7 +221,7 @@ int journal_rate_limit_test(JournalRateLimit *r, const char *id, int priority, u
ts = now(CLOCK_MONOTONIC);
- h = string_hash_func(id);
+ h = string_hash_func(id, r->hash_key);
g = r->buckets[h % BUCKETS_MAX];
LIST_FOREACH(bucket, g, g)