lkml.org 
[lkml]   [2021]   [Dec]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
SubjectRe: [PATCH v2 07/67] fscache: Implement a hash function
Date
Linus Torvalds <torvalds@linux-foundation.org> wrote:

> > What I'm trying to get at is that the hash needs to be consistent, no matter
> > the endianness of the cpu, for any particular input blob.
>
> Yeah, if that's the case, then you should probably make that "unsigned
> int *data" argument probably just be "void *" and then:
>
> > a = *data++; <<<<<<<
> > HASH_MIX(x, y, a);
> > }
> > return fold_hash(x, y);
> > }
> >
> > The marked line should probably use something like le/be32_to_cpu().
>
> Yes, it should be using a '__le32 *' inside that function and you
> should use l32_to_cpu(). Obviously, BE would work too, but cause
> unnecessary work on common hardware.

Okay, how about I make the attached change to make the hashing stable? This
will make fscache_hash() take an opaque buffer and a length (the length must
be a multiple of four).

David
---
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index e287952292c5..65cf2ae22a70 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -269,22 +269,23 @@ EXPORT_SYMBOL(fscache_caching_failed);
static int fscache_set_key(struct fscache_cookie *cookie,
const void *index_key, size_t index_key_len)
{
- u32 *buf;
- int bufs;
+ void *buf;
+ size_t buf_size;

- bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
+ buf_size = round_up(index_key_len, sizeof(__le32));

if (index_key_len > sizeof(cookie->inline_key)) {
- buf = kcalloc(bufs, sizeof(*buf), GFP_KERNEL);
+ buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
cookie->key = buf;
} else {
- buf = (u32 *)cookie->inline_key;
+ buf = cookie->inline_key;
}

memcpy(buf, index_key, index_key_len);
- cookie->key_hash = fscache_hash(cookie->volume->key_hash, buf, bufs);
+ cookie->key_hash = fscache_hash(cookie->volume->key_hash,
+ buf, buf_size);
return 0;
}

diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 87884f4b34fb..f121c21590dc 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -86,7 +86,7 @@ static inline void fscache_end_operation(struct netfs_cache_resources *cres)
*/
extern unsigned fscache_debug;

-extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
+extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);

/*
* proc.c
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 01d57433702c..dad85fd84f6f 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -53,15 +53,16 @@ static inline unsigned int fold_hash(unsigned long x, unsigned long y)
/*
* Generate a hash. This is derived from full_name_hash(), but we want to be
* sure it is arch independent and that it doesn't change as bits of the
- * computed hash value might appear on disk. The caller also guarantees that
- * the hashed data will be a series of aligned 32-bit words.
+ * computed hash value might appear on disk. The caller must guarantee that
+ * the source data is a multiple of four bytes in size.
*/
-unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
+unsigned int fscache_hash(unsigned int salt, const void *data, size_t len)
{
- unsigned int a, x = 0, y = salt;
+ const __le32 *p = data;
+ unsigned int a, x = 0, y = salt, n = len / sizeof(__le32);

for (; n; n--) {
- a = *data++;
+ a = le32_to_cpu(*p++);
HASH_MIX(x, y, a);
}
return fold_hash(x, y);
diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c
index edd3c245010e..26a6b8f315e1 100644
--- a/fs/fscache/volume.c
+++ b/fs/fscache/volume.c
@@ -131,7 +131,7 @@ static long fscache_compare_volume(const struct fscache_volume *a,
if (a->key[0] != b->key[0])
return (long)a->key[0] - (long)b->key[0];

- klen = round_up(a->key[0] + 1, sizeof(unsigned int));
+ klen = round_up(a->key[0] + 1, sizeof(__le32));
return memcmp(a->key, b->key, klen);
}

@@ -225,7 +225,7 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
* hashing easier.
*/
klen = strlen(volume_key);
- hlen = round_up(1 + klen + 1, sizeof(unsigned int));
+ hlen = round_up(1 + klen + 1, sizeof(__le32));
key = kzalloc(hlen, GFP_KERNEL);
if (!key)
goto err_vol;
@@ -233,8 +233,7 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
memcpy(key + 1, volume_key, klen);

volume->key = key;
- volume->key_hash = fscache_hash(0, (unsigned int *)key,
- hlen / sizeof(unsigned int));
+ volume->key_hash = fscache_hash(0, key, hlen);

volume->debug_id = atomic_inc_return(&fscache_volume_debug_id);
down_write(&fscache_addremove_sem);
\
 
 \ /
  Last update: 2021-12-10 15:37    [W:0.164 / U:0.432 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site