aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorStephen Rothwell2019-04-16 17:27:20 +1000
committerGreg Kroah-Hartman2019-04-25 19:47:24 +0200
commitba2e544075c282a5bb21df7752efad3b42d6077b (patch)
treefc81c9eb298ff422f10d5686756e3a807675b663 /lib
parentae0c2d725512f32a0d1a25f0cf2f07616d33a72e (diff)
lib/siphash.c: mark expected switch fall-throughs
In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. This patch aims to suppress up to 18 missing-break-in-switch false positives on some architectures. Cc: Gustavo A. R. Silva <gustavo@embeddedor.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Reviewed-by: Jason A. Donenfeld <Jason@zx2c4.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/siphash.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/lib/siphash.c b/lib/siphash.c
index 3ae58b4edad6..c47bb6ff2149 100644
--- a/lib/siphash.c
+++ b/lib/siphash.c
@@ -68,11 +68,11 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48;
- case 6: b |= ((u64)end[5]) << 40;
- case 5: b |= ((u64)end[4]) << 32;
+ case 7: b |= ((u64)end[6]) << 48; /* fall through */
+ case 6: b |= ((u64)end[5]) << 40; /* fall through */
+ case 5: b |= ((u64)end[4]) << 32; /* fall through */
case 4: b |= le32_to_cpup(data); break;
- case 3: b |= ((u64)end[2]) << 16;
+ case 3: b |= ((u64)end[2]) << 16; /* fall through */
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
@@ -101,11 +101,11 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48;
- case 6: b |= ((u64)end[5]) << 40;
- case 5: b |= ((u64)end[4]) << 32;
+ case 7: b |= ((u64)end[6]) << 48; /* fall through */
+ case 6: b |= ((u64)end[5]) << 40; /* fall through */
+ case 5: b |= ((u64)end[4]) << 32; /* fall through */
case 4: b |= get_unaligned_le32(end); break;
- case 3: b |= ((u64)end[2]) << 16;
+ case 3: b |= ((u64)end[2]) << 16; /* fall through */
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
@@ -268,11 +268,11 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48;
- case 6: b |= ((u64)end[5]) << 40;
- case 5: b |= ((u64)end[4]) << 32;
+ case 7: b |= ((u64)end[6]) << 48; /* fall through */
+ case 6: b |= ((u64)end[5]) << 40; /* fall through */
+ case 5: b |= ((u64)end[4]) << 32; /* fall through */
case 4: b |= le32_to_cpup(data); break;
- case 3: b |= ((u64)end[2]) << 16;
+ case 3: b |= ((u64)end[2]) << 16; /* fall through */
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
@@ -301,11 +301,11 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48;
- case 6: b |= ((u64)end[5]) << 40;
- case 5: b |= ((u64)end[4]) << 32;
+ case 7: b |= ((u64)end[6]) << 48; /* fall through */
+ case 6: b |= ((u64)end[5]) << 40; /* fall through */
+ case 5: b |= ((u64)end[4]) << 32; /* fall through */
case 4: b |= get_unaligned_le32(end); break;
- case 3: b |= ((u64)end[2]) << 16;
+ case 3: b |= ((u64)end[2]) << 16; /* fall through */
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
@@ -431,7 +431,7 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
v0 ^= m;
}
switch (left) {
- case 3: b |= ((u32)end[2]) << 16;
+ case 3: b |= ((u32)end[2]) << 16; /* fall through */
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
@@ -454,7 +454,7 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
v0 ^= m;
}
switch (left) {
- case 3: b |= ((u32)end[2]) << 16;
+ case 3: b |= ((u32)end[2]) << 16; /* fall through */
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}