From e73aaae2fa9024832e1f42e30c787c7baf61d014 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sat, 7 May 2022 14:03:46 +0200 Subject: siphash: use one source of truth for siphash permutations The SipHash family of permutations is currently used in three places: - siphash.c itself, used in the ordinary way it was intended. - random32.c, in a construction from an anonymous contributor. - random.c, as part of its fast_mix function. Each one of these places reinvents the wheel with the same C code, same rotation constants, and same symmetry-breaking constants. This commit tidies things up a bit by placing macros for the permutations and constants into siphash.h, where each of the three .c users can access them. It also leaves a note dissuading more users of them from emerging. Signed-off-by: Jason A. Donenfeld --- lib/siphash.c | 32 ++++++++++---------------------- 1 file changed, 10 insertions(+), 22 deletions(-) (limited to 'lib') diff --git a/lib/siphash.c b/lib/siphash.c index 72b9068ab57b..71d315a6ad62 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -18,19 +18,13 @@ #include #endif -#define SIPROUND \ - do { \ - v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ - v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ - v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ - v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ - } while (0) +#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3) #define PREAMBLE(len) \ - u64 v0 = 0x736f6d6570736575ULL; \ - u64 v1 = 0x646f72616e646f6dULL; \ - u64 v2 = 0x6c7967656e657261ULL; \ - u64 v3 = 0x7465646279746573ULL; \ + u64 v0 = SIPHASH_CONST_0; \ + u64 v1 = SIPHASH_CONST_1; \ + u64 v2 = SIPHASH_CONST_2; \ + u64 v3 = SIPHASH_CONST_3; \ u64 b = ((u64)(len)) << 56; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ @@ -389,19 +383,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, } EXPORT_SYMBOL(hsiphash_4u32); #else -#define HSIPROUND \ - do { \ - v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \ - v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \ - v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \ - v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \ - } while (0) +#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3) #define HPREAMBLE(len) \ - u32 v0 = 0; \ - u32 v1 = 0; \ - u32 v2 = 0x6c796765U; \ - u32 v3 = 0x74656462U; \ + u32 v0 = HSIPHASH_CONST_0; \ + u32 v1 = HSIPHASH_CONST_1; \ + u32 v2 = HSIPHASH_CONST_2; \ + u32 v3 = HSIPHASH_CONST_3; \ u32 b = ((u32)(len)) << 24; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ -- cgit v1.2.3 From d4150779e60fb6c49be25572596b2cdfc5d46a09 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Wed, 11 May 2022 16:11:29 +0200 Subject: random32: use real rng for non-deterministic randomness random32.c has two random number generators in it: one that is meant to be used deterministically, with some predefined seed, and one that does the same exact thing as random.c, except does it poorly. The first one has some use cases. The second one no longer does and can be replaced with calls to random.c's proper random number generator. The relatively recent siphash-based bad random32.c code was added in response to concerns that the prior random32.c was too deterministic. Out of fears that random.c was (at the time) too slow, this code was anonymously contributed. Then out of that emerged a kind of shadow entropy gathering system, with its own tentacles throughout various net code, added willy nilly. Stop👏making👏bespoke👏random👏number👏generators👏. Fortunately, recent advances in random.c mean that we can stop playing with this sketchiness, and just use get_random_u32(), which is now fast enough. In micro benchmarks using RDPMC, I'm seeing the same median cycle count between the two functions, with the mean being _slightly_ higher due to batches refilling (which we can optimize further need be). However, when doing *real* benchmarks of the net functions that actually use these random numbers, the mean cycles actually *decreased* slightly (with the median still staying the same), likely because the additional prandom code means icache misses and complexity, whereas random.c is generally already being used by something else nearby. The biggest benefit of this is that there are many users of prandom who probably should be using cryptographically secure random numbers. This makes all of those accidental cases become secure by just flipping a switch. Later on, we can do a tree-wide cleanup to remove the static inline wrapper functions that this commit adds. There are also some low-ish hanging fruits for making this even faster in the future: a get_random_u16() function for use in the networking stack will give a 2x performance boost there, using SIMD for ChaCha20 will let us compute 4 or 8 or 16 blocks of output in parallel, instead of just one, giving us large buffers for cheap, and introducing a get_random_*_bh() function that assumes irqs are already disabled will shave off a few cycles for ordinary calls. These are things we can chip away at down the road. Acked-by: Jakub Kicinski Acked-by: Theodore Ts'o Signed-off-by: Jason A. Donenfeld --- include/linux/prandom.h | 52 +------- kernel/time/timer.c | 2 - lib/random32.c | 347 +----------------------------------------------- net/core/dev.c | 3 - net/ipv4/devinet.c | 4 +- net/ipv6/addrconf.c | 2 - 6 files changed, 15 insertions(+), 395 deletions(-) (limited to 'lib') diff --git a/include/linux/prandom.h b/include/linux/prandom.h index a4aadd2dc153..deace5fb4e62 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -10,53 +10,16 @@ #include #include -#include +#include -u32 prandom_u32(void); -void prandom_bytes(void *buf, size_t nbytes); -void prandom_seed(u32 seed); -void prandom_reseed_late(void); - -DECLARE_PER_CPU(unsigned long, net_rand_noise); - -#define PRANDOM_ADD_NOISE(a, b, c, d) \ - prandom_u32_add_noise((unsigned long)(a), (unsigned long)(b), \ - (unsigned long)(c), (unsigned long)(d)) - -#if BITS_PER_LONG == 64 -/* - * The core SipHash round function. Each line can be executed in - * parallel given enough CPU resources. - */ -#define PRND_SIPROUND(v0, v1, v2, v3) SIPHASH_PERMUTATION(v0, v1, v2, v3) - -#define PRND_K0 (SIPHASH_CONST_0 ^ SIPHASH_CONST_2) -#define PRND_K1 (SIPHASH_CONST_1 ^ SIPHASH_CONST_3) - -#elif BITS_PER_LONG == 32 -/* - * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash. - * This is weaker, but 32-bit machines are not used for high-traffic - * applications, so there is less output for an attacker to analyze. - */ -#define PRND_SIPROUND(v0, v1, v2, v3) HSIPHASH_PERMUTATION(v0, v1, v2, v3) -#define PRND_K0 (HSIPHASH_CONST_0 ^ HSIPHASH_CONST_2) -#define PRND_K1 (HSIPHASH_CONST_1 ^ HSIPHASH_CONST_3) - -#else -#error Unsupported BITS_PER_LONG -#endif +static inline u32 prandom_u32(void) +{ + return get_random_u32(); +} -static inline void prandom_u32_add_noise(unsigned long a, unsigned long b, - unsigned long c, unsigned long d) +static inline void prandom_bytes(void *buf, size_t nbytes) { - /* - * This is not used cryptographically; it's just - * a convenient 4-word hash function. (3 xor, 2 add, 2 rol) - */ - a ^= raw_cpu_read(net_rand_noise); - PRND_SIPROUND(a, b, c, d); - raw_cpu_write(net_rand_noise, d); + return get_random_bytes(buf, nbytes); } struct rnd_state { @@ -108,7 +71,6 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed) state->s2 = __seed(i, 8U); state->s3 = __seed(i, 16U); state->s4 = __seed(i, 128U); - PRANDOM_ADD_NOISE(state, i, 0, 0); } /* Pseudo random number generator from numerical recipes. */ diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 9dd2a39cb3b0..c12fe329c9ff 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1780,8 +1780,6 @@ void update_process_times(int user_tick) { struct task_struct *p = current; - PRANDOM_ADD_NOISE(jiffies, user_tick, p, 0); - /* Note: this timer irq context must be accounted for as well. */ account_process_tick(p, user_tick); run_local_timers(); diff --git a/lib/random32.c b/lib/random32.c index 976632003ec6..d5d9029362cb 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -245,25 +245,13 @@ static struct prandom_test2 { { 407983964U, 921U, 728767059U }, }; -static u32 __extract_hwseed(void) -{ - unsigned int val = 0; - - (void)(arch_get_random_seed_int(&val) || - arch_get_random_int(&val)); - - return val; -} - -static void prandom_seed_early(struct rnd_state *state, u32 seed, - bool mix_with_hwseed) +static void prandom_state_selftest_seed(struct rnd_state *state, u32 seed) { #define LCG(x) ((x) * 69069U) /* super-duper LCG */ -#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0) - state->s1 = __seed(HWSEED() ^ LCG(seed), 2U); - state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U); - state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U); - state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U); + state->s1 = __seed(LCG(seed), 2U); + state->s2 = __seed(LCG(state->s1), 8U); + state->s3 = __seed(LCG(state->s2), 16U); + state->s4 = __seed(LCG(state->s3), 128U); } static int __init prandom_state_selftest(void) @@ -274,7 +262,7 @@ static int __init prandom_state_selftest(void) for (i = 0; i < ARRAY_SIZE(test1); i++) { struct rnd_state state; - prandom_seed_early(&state, test1[i].seed, false); + prandom_state_selftest_seed(&state, test1[i].seed); prandom_warmup(&state); if (test1[i].result != prandom_u32_state(&state)) @@ -289,7 +277,7 @@ static int __init prandom_state_selftest(void) for (i = 0; i < ARRAY_SIZE(test2); i++) { struct rnd_state state; - prandom_seed_early(&state, test2[i].seed, false); + prandom_state_selftest_seed(&state, test2[i].seed); prandom_warmup(&state); for (j = 0; j < test2[i].iteration - 1; j++) @@ -310,324 +298,3 @@ static int __init prandom_state_selftest(void) } core_initcall(prandom_state_selftest); #endif - -/* - * The prandom_u32() implementation is now completely separate from the - * prandom_state() functions, which are retained (for now) for compatibility. - * - * Because of (ab)use in the networking code for choosing random TCP/UDP port - * numbers, which open DoS possibilities if guessable, we want something - * stronger than a standard PRNG. But the performance requirements of - * the network code do not allow robust crypto for this application. - * - * So this is a homebrew Junior Spaceman implementation, based on the - * lowest-latency trustworthy crypto primitive available, SipHash. - * (The authors of SipHash have not been consulted about this abuse of - * their work.) - * - * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to - * one word of output. This abbreviated version uses 2 rounds per word - * of output. - */ - -struct siprand_state { - unsigned long v0; - unsigned long v1; - unsigned long v2; - unsigned long v3; -}; - -static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy; -DEFINE_PER_CPU(unsigned long, net_rand_noise); -EXPORT_PER_CPU_SYMBOL(net_rand_noise); - -/* - * This is the core CPRNG function. As "pseudorandom", this is not used - * for truly valuable things, just intended to be a PITA to guess. - * For maximum speed, we do just two SipHash rounds per word. This is - * the same rate as 4 rounds per 64 bits that SipHash normally uses, - * so hopefully it's reasonably secure. - * - * There are two changes from the official SipHash finalization: - * - We omit some constants XORed with v2 in the SipHash spec as irrelevant; - * they are there only to make the output rounds distinct from the input - * rounds, and this application has no input rounds. - * - Rather than returning v0^v1^v2^v3, return v1+v3. - * If you look at the SipHash round, the last operation on v3 is - * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time. - * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but - * it still cancels out half of the bits in v2 for no benefit.) - * Second, since the last combining operation was xor, continue the - * pattern of alternating xor/add for a tiny bit of extra non-linearity. - */ -static inline u32 siprand_u32(struct siprand_state *s) -{ - unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3; - unsigned long n = raw_cpu_read(net_rand_noise); - - v3 ^= n; - PRND_SIPROUND(v0, v1, v2, v3); - PRND_SIPROUND(v0, v1, v2, v3); - v0 ^= n; - s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3; - return v1 + v3; -} - - -/** - * prandom_u32 - pseudo random number generator - * - * A 32 bit pseudo-random number is generated using a fast - * algorithm suitable for simulation. This algorithm is NOT - * considered safe for cryptographic use. - */ -u32 prandom_u32(void) -{ - struct siprand_state *state = get_cpu_ptr(&net_rand_state); - u32 res = siprand_u32(state); - - put_cpu_ptr(&net_rand_state); - return res; -} -EXPORT_SYMBOL(prandom_u32); - -/** - * prandom_bytes - get the requested number of pseudo-random bytes - * @buf: where to copy the pseudo-random bytes to - * @bytes: the requested number of bytes - */ -void prandom_bytes(void *buf, size_t bytes) -{ - struct siprand_state *state = get_cpu_ptr(&net_rand_state); - u8 *ptr = buf; - - while (bytes >= sizeof(u32)) { - put_unaligned(siprand_u32(state), (u32 *)ptr); - ptr += sizeof(u32); - bytes -= sizeof(u32); - } - - if (bytes > 0) { - u32 rem = siprand_u32(state); - - do { - *ptr++ = (u8)rem; - rem >>= BITS_PER_BYTE; - } while (--bytes > 0); - } - put_cpu_ptr(&net_rand_state); -} -EXPORT_SYMBOL(prandom_bytes); - -/** - * prandom_seed - add entropy to pseudo random number generator - * @entropy: entropy value - * - * Add some additional seed material to the prandom pool. - * The "entropy" is actually our IP address (the only caller is - * the network code), not for unpredictability, but to ensure that - * different machines are initialized differently. - */ -void prandom_seed(u32 entropy) -{ - int i; - - add_device_randomness(&entropy, sizeof(entropy)); - - for_each_possible_cpu(i) { - struct siprand_state *state = per_cpu_ptr(&net_rand_state, i); - unsigned long v0 = state->v0, v1 = state->v1; - unsigned long v2 = state->v2, v3 = state->v3; - - do { - v3 ^= entropy; - PRND_SIPROUND(v0, v1, v2, v3); - PRND_SIPROUND(v0, v1, v2, v3); - v0 ^= entropy; - } while (unlikely(!v0 || !v1 || !v2 || !v3)); - - WRITE_ONCE(state->v0, v0); - WRITE_ONCE(state->v1, v1); - WRITE_ONCE(state->v2, v2); - WRITE_ONCE(state->v3, v3); - } -} -EXPORT_SYMBOL(prandom_seed); - -/* - * Generate some initially weak seeding values to allow - * the prandom_u32() engine to be started. - */ -static int __init prandom_init_early(void) -{ - int i; - unsigned long v0, v1, v2, v3; - - if (!arch_get_random_long(&v0)) - v0 = jiffies; - if (!arch_get_random_long(&v1)) - v1 = random_get_entropy(); - v2 = v0 ^ PRND_K0; - v3 = v1 ^ PRND_K1; - - for_each_possible_cpu(i) { - struct siprand_state *state; - - v3 ^= i; - PRND_SIPROUND(v0, v1, v2, v3); - PRND_SIPROUND(v0, v1, v2, v3); - v0 ^= i; - - state = per_cpu_ptr(&net_rand_state, i); - state->v0 = v0; state->v1 = v1; - state->v2 = v2; state->v3 = v3; - } - - return 0; -} -core_initcall(prandom_init_early); - - -/* Stronger reseeding when available, and periodically thereafter. */ -static void prandom_reseed(struct timer_list *unused); - -static DEFINE_TIMER(seed_timer, prandom_reseed); - -static void prandom_reseed(struct timer_list *unused) -{ - unsigned long expires; - int i; - - /* - * Reinitialize each CPU's PRNG with 128 bits of key. - * No locking on the CPUs, but then somewhat random results are, - * well, expected. - */ - for_each_possible_cpu(i) { - struct siprand_state *state; - unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0; - unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1; -#if BITS_PER_LONG == 32 - int j; - - /* - * On 32-bit machines, hash in two extra words to - * approximate 128-bit key length. Not that the hash - * has that much security, but this prevents a trivial - * 64-bit brute force. - */ - for (j = 0; j < 2; j++) { - unsigned long m = get_random_long(); - - v3 ^= m; - PRND_SIPROUND(v0, v1, v2, v3); - PRND_SIPROUND(v0, v1, v2, v3); - v0 ^= m; - } -#endif - /* - * Probably impossible in practice, but there is a - * theoretical risk that a race between this reseeding - * and the target CPU writing its state back could - * create the all-zero SipHash fixed point. - * - * To ensure that never happens, ensure the state - * we write contains no zero words. - */ - state = per_cpu_ptr(&net_rand_state, i); - WRITE_ONCE(state->v0, v0 ? v0 : -1ul); - WRITE_ONCE(state->v1, v1 ? v1 : -1ul); - WRITE_ONCE(state->v2, v2 ? v2 : -1ul); - WRITE_ONCE(state->v3, v3 ? v3 : -1ul); - } - - /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ - expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ)); - mod_timer(&seed_timer, expires); -} - -/* - * The random ready callback can be called from almost any interrupt. - * To avoid worrying about whether it's safe to delay that interrupt - * long enough to seed all CPUs, just schedule an immediate timer event. - */ -static int prandom_timer_start(struct notifier_block *nb, - unsigned long action, void *data) -{ - mod_timer(&seed_timer, jiffies); - return 0; -} - -#ifdef CONFIG_RANDOM32_SELFTEST -/* Principle: True 32-bit random numbers will all have 16 differing bits on - * average. For each 32-bit number, there are 601M numbers differing by 16 - * bits, and 89% of the numbers differ by at least 12 bits. Note that more - * than 16 differing bits also implies a correlation with inverted bits. Thus - * we take 1024 random numbers and compare each of them to the other ones, - * counting the deviation of correlated bits to 16. Constants report 32, - * counters 32-log2(TEST_SIZE), and pure randoms, around 6 or lower. With the - * u32 total, TEST_SIZE may be as large as 4096 samples. - */ -#define TEST_SIZE 1024 -static int __init prandom32_state_selftest(void) -{ - unsigned int x, y, bits, samples; - u32 xor, flip; - u32 total; - u32 *data; - - data = kmalloc(sizeof(*data) * TEST_SIZE, GFP_KERNEL); - if (!data) - return 0; - - for (samples = 0; samples < TEST_SIZE; samples++) - data[samples] = prandom_u32(); - - flip = total = 0; - for (x = 0; x < samples; x++) { - for (y = 0; y < samples; y++) { - if (x == y) - continue; - xor = data[x] ^ data[y]; - flip |= xor; - bits = hweight32(xor); - total += (bits - 16) * (bits - 16); - } - } - - /* We'll return the average deviation as 2*sqrt(corr/samples), which - * is also sqrt(4*corr/samples) which provides a better resolution. - */ - bits = int_sqrt(total / (samples * (samples - 1)) * 4); - if (bits > 6) - pr_warn("prandom32: self test failed (at least %u bits" - " correlated, fixed_mask=%#x fixed_value=%#x\n", - bits, ~flip, data[0] & ~flip); - else - pr_info("prandom32: self test passed (less than %u bits" - " correlated)\n", - bits+1); - kfree(data); - return 0; -} -core_initcall(prandom32_state_selftest); -#endif /* CONFIG_RANDOM32_SELFTEST */ - -/* - * Start periodic full reseeding as soon as strong - * random numbers are available. - */ -static int __init prandom_init_late(void) -{ - static struct notifier_block random_ready = { - .notifier_call = prandom_timer_start - }; - int ret = register_random_ready_notifier(&random_ready); - - if (ret == -EALREADY) { - prandom_timer_start(&random_ready, 0, NULL); - ret = 0; - } - return ret; -} -late_initcall(prandom_init_late); diff --git a/net/core/dev.c b/net/core/dev.c index 1461c2d9dec8..19c9beb1136b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3527,7 +3527,6 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev, dev_queue_xmit_nit(skb, dev); len = skb->len; - PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies); trace_net_dev_start_xmit(skb, dev); rc = netdev_start_xmit(skb, dev, txq, more); trace_net_dev_xmit(skb, rc, dev, len); @@ -4168,7 +4167,6 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) if (!skb) goto out; - PRANDOM_ADD_NOISE(skb, dev, txq, jiffies); HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) { @@ -4234,7 +4232,6 @@ int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) skb_set_queue_mapping(skb, queue_id); txq = skb_get_tx_queue(dev, skb); - PRANDOM_ADD_NOISE(skb, dev, txq, jiffies); local_bh_disable(); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 53a6b14dc50a..3d6d33ac20cc 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -536,10 +536,8 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, return ret; } - if (!(ifa->ifa_flags & IFA_F_SECONDARY)) { - prandom_seed((__force u32) ifa->ifa_local); + if (!(ifa->ifa_flags & IFA_F_SECONDARY)) ifap = last_primary; - } rcu_assign_pointer(ifa->ifa_next, *ifap); rcu_assign_pointer(*ifap, ifa); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index b22504176588..e7c68fa12fae 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3972,8 +3972,6 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) addrconf_join_solict(dev, &ifp->addr); - prandom_seed((__force u32) ifp->addr.s6_addr32[3]); - read_lock_bh(&idev->lock); spin_lock(&ifp->lock); if (ifp->state == INET6_IFADDR_STATE_DEAD) -- cgit v1.2.3 From cc1e127bfa95b5fb2f9307e7168bf8b2b45b4c5e Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Mon, 9 May 2022 16:13:18 +0200 Subject: random: remove ratelimiting for in-kernel unseeded randomness The CONFIG_WARN_ALL_UNSEEDED_RANDOM debug option controls whether the kernel warns about all unseeded randomness or just the first instance. There's some complicated rate limiting and comparison to the previous caller, such that even with CONFIG_WARN_ALL_UNSEEDED_RANDOM enabled, developers still don't see all the messages or even an accurate count of how many were missed. This is the result of basically parallel mechanisms aimed at accomplishing more or less the same thing, added at different points in random.c history, which sort of compete with the first-instance-only limiting we have now. It turns out, however, that nobody cares about the first unseeded randomness instance of in-kernel users. The same first user has been there for ages now, and nobody is doing anything about it. It isn't even clear that anybody _can_ do anything about it. Most places that can do something about it have switched over to using get_random_bytes_wait() or wait_for_random_bytes(), which is the right thing to do, but there is still much code that needs randomness sometimes during init, and as a geeneral rule, if you're not using one of the _wait functions or the readiness notifier callback, you're bound to be doing it wrong just based on that fact alone. So warning about this same first user that can't easily change is simply not an effective mechanism for anything at all. Users can't do anything about it, as the Kconfig text points out -- the problem isn't in userspace code -- and kernel developers don't or more often can't react to it. Instead, show the warning for all instances when CONFIG_WARN_ALL_UNSEEDED_RANDOM is set, so that developers can debug things need be, or if it isn't set, don't show a warning at all. At the same time, CONFIG_WARN_ALL_UNSEEDED_RANDOM now implies setting random.ratelimit_disable=1 on by default, since if you care about one you probably care about the other too. And we can clean up usage around the related urandom_warning ratelimiter as well (whose behavior isn't changing), so that it properly counts missed messages after the 10 message threshold is reached. Cc: Theodore Ts'o Cc: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 62 +++++++++++++++------------------------------------ lib/Kconfig.debug | 3 +-- 2 files changed, 19 insertions(+), 46 deletions(-) (limited to 'lib') diff --git a/drivers/char/random.c b/drivers/char/random.c index 07200b8f4591..3860d534cf05 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -87,11 +87,10 @@ static DEFINE_SPINLOCK(random_ready_chain_lock); static RAW_NOTIFIER_HEAD(random_ready_chain); /* Control how we warn userspace. */ -static struct ratelimit_state unseeded_warning = - RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); static struct ratelimit_state urandom_warning = RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); -static int ratelimit_disable __read_mostly; +static int ratelimit_disable __read_mostly = + IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM); module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); @@ -182,27 +181,15 @@ static void process_random_ready_list(void) spin_unlock_irqrestore(&random_ready_chain_lock, flags); } -#define warn_unseeded_randomness(previous) \ - _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous)) +#define warn_unseeded_randomness() \ + _warn_unseeded_randomness(__func__, (void *)_RET_IP_) -static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous) +static void _warn_unseeded_randomness(const char *func_name, void *caller) { -#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM - const bool print_once = false; -#else - static bool print_once __read_mostly; -#endif - - if (print_once || crng_ready() || - (previous && (caller == READ_ONCE(*previous)))) + if (!IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) || crng_ready()) return; - WRITE_ONCE(*previous, caller); -#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM - print_once = true; -#endif - if (__ratelimit(&unseeded_warning)) - printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", - func_name, caller, crng_init); + printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", + func_name, caller, crng_init); } @@ -455,9 +442,7 @@ static void _get_random_bytes(void *buf, size_t nbytes) */ void get_random_bytes(void *buf, size_t nbytes) { - static void *previous; - - warn_unseeded_randomness(&previous); + warn_unseeded_randomness(); _get_random_bytes(buf, nbytes); } EXPORT_SYMBOL(get_random_bytes); @@ -553,10 +538,9 @@ u64 get_random_u64(void) u64 ret; unsigned long flags; struct batched_entropy *batch; - static void *previous; unsigned long next_gen; - warn_unseeded_randomness(&previous); + warn_unseeded_randomness(); if (!crng_ready()) { _get_random_bytes(&ret, sizeof(ret)); @@ -592,10 +576,9 @@ u32 get_random_u32(void) u32 ret; unsigned long flags; struct batched_entropy *batch; - static void *previous; unsigned long next_gen; - warn_unseeded_randomness(&previous); + warn_unseeded_randomness(); if (!crng_ready()) { _get_random_bytes(&ret, sizeof(ret)); @@ -822,16 +805,9 @@ static void credit_init_bits(size_t nbits) wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); pr_notice("crng init done\n"); - if (unseeded_warning.missed) { - pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", - unseeded_warning.missed); - unseeded_warning.missed = 0; - } - if (urandom_warning.missed) { + if (urandom_warning.missed) pr_notice("%d urandom warning(s) missed due to ratelimiting\n", urandom_warning.missed); - urandom_warning.missed = 0; - } } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { spin_lock_irqsave(&base_crng.lock, flags); /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ @@ -976,11 +952,6 @@ int __init rand_initialize(void) else if (arch_init && trust_cpu) credit_init_bits(BLAKE2S_BLOCK_SIZE * 8); - if (ratelimit_disable) { - urandom_warning.interval = 0; - unseeded_warning.interval = 0; - } - WARN_ON(register_pm_notifier(&pm_notifier)); WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG " @@ -1487,11 +1458,14 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, if (!crng_ready()) try_to_generate_entropy(); - if (!crng_ready() && maxwarn > 0) { - maxwarn--; - if (__ratelimit(&urandom_warning)) + if (!crng_ready()) { + if (!ratelimit_disable && maxwarn <= 0) + ++urandom_warning.missed; + else if (ratelimit_disable || __ratelimit(&urandom_warning)) { + --maxwarn; pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", current->comm, nbytes); + } } return get_random_bytes_user(buf, nbytes); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 075cd25363ac..7e282970177a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1616,8 +1616,7 @@ config WARN_ALL_UNSEEDED_RANDOM so architecture maintainers really need to do what they can to get the CRNG seeded sooner after the system is booted. However, since users cannot do anything actionable to - address this, by default the kernel will issue only a single - warning for the first use of unseeded randomness. + address this, by default this option is disabled. Say Y here if you want to receive warnings for all uses of unseeded randomness. This will be of use primarily for -- cgit v1.2.3 From 248561ad25a8ba4ecbc7df42f9a5a82fd5fbb4f6 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sat, 14 May 2022 13:09:17 +0200 Subject: random: remove get_random_bytes_arch() and add rng_has_arch_random() The RNG incorporates RDRAND into its state at boot and every time it reseeds, so there's no reason for callers to use it directly. The hashing that the RNG does on it is preferable to using the bytes raw. The only current use case of get_random_bytes_arch() is vsprintf's siphash key for pointer hashing, which uses it to initialize the pointer secret earlier than usual if RDRAND is available. In order to replace this narrow use case, just expose whether RDRAND is mixed into the RNG, with a new function called rng_has_arch_random(). With that taken care of, there are no users of get_random_bytes_arch() left, so it can be removed. Later, if trust_cpu gets turned on by default (as most distros are doing), this one use of rng_has_arch_random() can probably go away as well. Cc: Steven Rostedt Cc: Sergey Senozhatsky Acked-by: Petr Mladek # for vsprintf.c Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 49 ++++++++++++++++--------------------------------- include/linux/random.h | 2 +- lib/vsprintf.c | 7 +++---- 3 files changed, 20 insertions(+), 38 deletions(-) (limited to 'lib') diff --git a/drivers/char/random.c b/drivers/char/random.c index 7ec700683e42..6b8c89378954 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -433,12 +433,9 @@ static void _get_random_bytes(void *buf, size_t len) /* * This function is the exported kernel interface. It returns some * number of good random numbers, suitable for key generation, seeding - * TCP sequence numbers, etc. It does not rely on the hardware random - * number generator. For random bytes direct from the hardware RNG - * (when available), use get_random_bytes_arch(). In order to ensure - * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once - * at any point prior. + * TCP sequence numbers, etc. In order to ensure that the randomness + * by this function is okay, the function wait_for_random_bytes() + * should be called and return 0 at least once at any point prior. */ void get_random_bytes(void *buf, size_t len) { @@ -655,33 +652,6 @@ unsigned long randomize_page(unsigned long start, unsigned long range) return start + (get_random_long() % range << PAGE_SHIFT); } -/* - * This function will use the architecture-specific hardware random - * number generator if it is available. It is not recommended for - * use. Use get_random_bytes() instead. It returns the number of - * bytes filled in. - */ -size_t __must_check get_random_bytes_arch(void *buf, size_t len) -{ - size_t left = len; - u8 *p = buf; - - while (left) { - unsigned long v; - size_t block_len = min_t(size_t, left, sizeof(unsigned long)); - - if (!arch_get_random_long(&v)) - break; - - memcpy(p, &v, block_len); - p += block_len; - left -= block_len; - } - - return len - left; -} -EXPORT_SYMBOL(get_random_bytes_arch); - /********************************************************************** * @@ -879,6 +849,7 @@ static void __cold _credit_init_bits(size_t bits) * **********************************************************************/ +static bool used_arch_random; static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); static int __init parse_trust_cpu(char *arg) @@ -956,6 +927,7 @@ int __init random_init(const char *command_line) crng_reseed(); else if (trust_cpu) credit_init_bits(arch_bytes * 8); + used_arch_random = arch_bytes * 8 >= POOL_READY_BITS; WARN_ON(register_pm_notifier(&pm_notifier)); @@ -964,6 +936,17 @@ int __init random_init(const char *command_line) return 0; } +/* + * Returns whether arch randomness has been mixed into the initial + * state of the RNG, regardless of whether or not that randomness + * was credited. Knowing this is only good for a very limited set + * of uses, such as early init printk pointer obfuscation. + */ +bool rng_has_arch_random(void) +{ + return used_arch_random; +} + /* * Add device- or boot-specific data to the input pool to help * initialize it. diff --git a/include/linux/random.h b/include/linux/random.h index fc82f1dc36f1..6af130c6edb9 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -38,7 +38,6 @@ static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { #endif void get_random_bytes(void *buf, size_t len); -size_t __must_check get_random_bytes_arch(void *buf, size_t len); u32 get_random_u32(void); u64 get_random_u64(void); static inline unsigned int get_random_int(void) @@ -77,6 +76,7 @@ unsigned long randomize_page(unsigned long start, unsigned long range); int __init random_init(const char *command_line); bool rng_is_initialized(void); +bool rng_has_arch_random(void); int wait_for_random_bytes(void); int register_random_ready_notifier(struct notifier_block *nb); int unregister_random_ready_notifier(struct notifier_block *nb); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 40d26a07a133..20e9887faaaa 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -776,12 +776,11 @@ static struct notifier_block random_ready = { static int __init initialize_ptr_random(void) { - int key_size = sizeof(ptr_key); int ret; - /* Use hw RNG if available. */ - if (get_random_bytes_arch(&ptr_key, key_size) == key_size) { - static_branch_disable(¬_filled_random_ptr_key); + /* Don't bother waiting for RNG to be ready if RDRAND is mixed in already. */ + if (rng_has_arch_random()) { + enable_ptr_key_workfn(&enable_ptr_key_work); return 0; } -- cgit v1.2.3 From 6701de6c51c172b5de5633374479503c81fefc0b Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sun, 15 May 2022 15:06:18 +0200 Subject: random: remove mostly unused async readiness notifier The register_random_ready_notifier() notifier is somewhat complicated, and was already recently rewritten to use notifier blocks. It is only used now by one consumer in the kernel, vsprintf.c, for which the async mechanism is really overly complex for what it actually needs. This commit removes register_random_ready_notifier() and unregister_random_ ready_notifier(), because it just adds complication with little utility, and changes vsprintf.c to just check on `!rng_is_initialized() && !rng_has_arch_random()`, which will eventually be true. Performance- wise, that code was already using a static branch, so there's basically no overhead at all to this change. Cc: Steven Rostedt Cc: Sergey Senozhatsky Acked-by: Petr Mladek # for vsprintf.c Reviewed-by: Petr Mladek Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 48 ------------------------------------ include/linux/random.h | 2 -- lib/vsprintf.c | 66 +++++++++++++++++--------------------------------- 3 files changed, 22 insertions(+), 94 deletions(-) (limited to 'lib') diff --git a/drivers/char/random.c b/drivers/char/random.c index 6b8c89378954..16b39d2dead7 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -84,8 +84,6 @@ static DEFINE_STATIC_KEY_FALSE(crng_is_ready); /* Various types of waiters for crng_init->CRNG_READY transition. */ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); static struct fasync_struct *fasync; -static DEFINE_SPINLOCK(random_ready_chain_lock); -static RAW_NOTIFIER_HEAD(random_ready_chain); /* Control how we warn userspace. */ static struct ratelimit_state urandom_warning = @@ -142,51 +140,6 @@ int wait_for_random_bytes(void) } EXPORT_SYMBOL(wait_for_random_bytes); -/* - * Add a callback function that will be invoked when the input - * pool is initialised. - * - * returns: 0 if callback is successfully added - * -EALREADY if pool is already initialised (callback not called) - */ -int __cold register_random_ready_notifier(struct notifier_block *nb) -{ - unsigned long flags; - int ret = -EALREADY; - - if (crng_ready()) - return ret; - - spin_lock_irqsave(&random_ready_chain_lock, flags); - if (!crng_ready()) - ret = raw_notifier_chain_register(&random_ready_chain, nb); - spin_unlock_irqrestore(&random_ready_chain_lock, flags); - return ret; -} - -/* - * Delete a previously registered readiness callback function. - */ -int __cold unregister_random_ready_notifier(struct notifier_block *nb) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&random_ready_chain_lock, flags); - ret = raw_notifier_chain_unregister(&random_ready_chain, nb); - spin_unlock_irqrestore(&random_ready_chain_lock, flags); - return ret; -} - -static void __cold process_random_ready_list(void) -{ - unsigned long flags; - - spin_lock_irqsave(&random_ready_chain_lock, flags); - raw_notifier_call_chain(&random_ready_chain, 0, NULL); - spin_unlock_irqrestore(&random_ready_chain_lock, flags); -} - #define warn_unseeded_randomness() \ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \ @@ -775,7 +728,6 @@ static void __cold _credit_init_bits(size_t bits) if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ execute_in_process_context(crng_set_ready, &set_ready); - process_random_ready_list(); wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); pr_notice("crng init done\n"); diff --git a/include/linux/random.h b/include/linux/random.h index 6af130c6edb9..d2360b2825b6 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -78,8 +78,6 @@ int __init random_init(const char *command_line); bool rng_is_initialized(void); bool rng_has_arch_random(void); int wait_for_random_bytes(void); -int register_random_ready_notifier(struct notifier_block *nb); -int unregister_random_ready_notifier(struct notifier_block *nb); /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). * Returns the result of the call to wait_for_random_bytes. */ diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 20e9887faaaa..fb77f7bfd126 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -750,60 +750,38 @@ static int __init debug_boot_weak_hash_enable(char *str) } early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable); -static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key); -static siphash_key_t ptr_key __read_mostly; +static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key); static void enable_ptr_key_workfn(struct work_struct *work) { - get_random_bytes(&ptr_key, sizeof(ptr_key)); - /* Needs to run from preemptible context */ - static_branch_disable(¬_filled_random_ptr_key); + static_branch_enable(&filled_random_ptr_key); } -static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); - -static int fill_random_ptr_key(struct notifier_block *nb, - unsigned long action, void *data) -{ - /* This may be in an interrupt handler. */ - queue_work(system_unbound_wq, &enable_ptr_key_work); - return 0; -} - -static struct notifier_block random_ready = { - .notifier_call = fill_random_ptr_key -}; - -static int __init initialize_ptr_random(void) -{ - int ret; - - /* Don't bother waiting for RNG to be ready if RDRAND is mixed in already. */ - if (rng_has_arch_random()) { - enable_ptr_key_workfn(&enable_ptr_key_work); - return 0; - } - - ret = register_random_ready_notifier(&random_ready); - if (!ret) { - return 0; - } else if (ret == -EALREADY) { - /* This is in preemptible context */ - enable_ptr_key_workfn(&enable_ptr_key_work); - return 0; - } - - return ret; -} -early_initcall(initialize_ptr_random); - /* Maps a pointer to a 32 bit unique identifier. */ static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out) { + static siphash_key_t ptr_key __read_mostly; unsigned long hashval; - if (static_branch_unlikely(¬_filled_random_ptr_key)) - return -EAGAIN; + if (!static_branch_likely(&filled_random_ptr_key)) { + static bool filled = false; + static DEFINE_SPINLOCK(filling); + static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); + unsigned long flags; + + if (!system_unbound_wq || + (!rng_is_initialized() && !rng_has_arch_random()) || + !spin_trylock_irqsave(&filling, flags)) + return -EAGAIN; + + if (!filled) { + get_random_bytes(&ptr_key, sizeof(ptr_key)); + queue_work(system_unbound_wq, &enable_ptr_key_work); + filled = true; + } + spin_unlock_irqrestore(&filling, flags); + } + #ifdef CONFIG_64BIT hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key); -- cgit v1.2.3