From 4b758eda851eb9336ca86a0041a4d3da55f66511 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Tue, 12 Apr 2022 19:59:57 +0200 Subject: random: insist on random_get_entropy() existing in order to simplify All platforms are now guaranteed to provide some value for random_get_entropy(). In case some bug leads to this not being so, we print a warning, because that indicates that something is really very wrong (and likely other things are impacted too). This should never be hit, but it's a good and cheap way of finding out if something ever is problematic. Since we now have viable fallback code for random_get_entropy() on all platforms, which is, in the worst case, not worse than jiffies, we can count on getting the best possible value out of it. That means there's no longer a use for using jiffies as entropy input. It also means we no longer have a reason for doing the round-robin register flow in the IRQ handler, which was always of fairly dubious value. Instead we can greatly simplify the IRQ handler inputs and also unify the construction between 64-bits and 32-bits. We now collect the cycle counter and the return address, since those are the two things that matter. Because the return address and the irq number are likely related, to the extent we mix in the irq number, we can just xor it into the top unchanging bytes of the return address, rather than the bottom changing bytes of the cycle counter as before. Then, we can do a fixed 2 rounds of SipHash/HSipHash. Finally, we use the same construction of hashing only half of the [H]SipHash state on 32-bit and 64-bit. We're not actually discarding any entropy, since that entropy is carried through until the next time. And more importantly, it lets us do the same sponge-like construction everywhere. Cc: Theodore Ts'o Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 89 +++++++++++++++++---------------------------------- 1 file changed, 29 insertions(+), 60 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 4c9adb4f3d5d..6229d7d3c5b5 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1012,6 +1012,9 @@ int __init rand_initialize(void) urandom_warning.interval = 0; unseeded_warning.interval = 0; } + + WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG " + "entropy collection will consequently suffer."); return 0; } @@ -1025,15 +1028,14 @@ int __init rand_initialize(void) */ void add_device_randomness(const void *buf, size_t size) { - unsigned long cycles = random_get_entropy(); - unsigned long flags, now = jiffies; + unsigned long entropy = random_get_entropy(); + unsigned long flags; if (crng_init == 0 && size) crng_pre_init_inject(buf, size, false); spin_lock_irqsave(&input_pool.lock, flags); - _mix_pool_bytes(&cycles, sizeof(cycles)); - _mix_pool_bytes(&now, sizeof(now)); + _mix_pool_bytes(&entropy, sizeof(entropy)); _mix_pool_bytes(buf, size); spin_unlock_irqrestore(&input_pool.lock, flags); } @@ -1056,12 +1058,11 @@ struct timer_rand_state { */ static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) { - unsigned long cycles = random_get_entropy(), now = jiffies, flags; + unsigned long entropy = random_get_entropy(), now = jiffies, flags; long delta, delta2, delta3; spin_lock_irqsave(&input_pool.lock, flags); - _mix_pool_bytes(&cycles, sizeof(cycles)); - _mix_pool_bytes(&now, sizeof(now)); + _mix_pool_bytes(&entropy, sizeof(entropy)); _mix_pool_bytes(&num, sizeof(num)); spin_unlock_irqrestore(&input_pool.lock, flags); @@ -1223,7 +1224,6 @@ struct fast_pool { unsigned long pool[4]; unsigned long last; unsigned int count; - u16 reg_idx; }; static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { @@ -1241,13 +1241,13 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { * This is [Half]SipHash-1-x, starting from an empty key. Because * the key is fixed, it assumes that its inputs are non-malicious, * and therefore this has no security on its own. s represents the - * 128 or 256-bit SipHash state, while v represents a 128-bit input. + * four-word SipHash state, while v represents a two-word input. */ -static void fast_mix(unsigned long s[4], const unsigned long *v) +static void fast_mix(unsigned long s[4], const unsigned long v[2]) { size_t i; - for (i = 0; i < 16 / sizeof(long); ++i) { + for (i = 0; i < 2; ++i) { s[3] ^= v[i]; #ifdef CONFIG_64BIT s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32); @@ -1287,33 +1287,17 @@ int random_online_cpu(unsigned int cpu) } #endif -static unsigned long get_reg(struct fast_pool *f, struct pt_regs *regs) -{ - unsigned long *ptr = (unsigned long *)regs; - unsigned int idx; - - if (regs == NULL) - return 0; - idx = READ_ONCE(f->reg_idx); - if (idx >= sizeof(struct pt_regs) / sizeof(unsigned long)) - idx = 0; - ptr += idx++; - WRITE_ONCE(f->reg_idx, idx); - return *ptr; -} - static void mix_interrupt_randomness(struct work_struct *work) { struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); /* - * The size of the copied stack pool is explicitly 16 bytes so that we - * tax mix_pool_byte()'s compression function the same amount on all - * platforms. This means on 64-bit we copy half the pool into this, - * while on 32-bit we copy all of it. The entropy is supposed to be - * sufficiently dispersed between bits that in the sponge-like - * half case, on average we don't wind up "losing" some. + * The size of the copied stack pool is explicitly 2 longs so that we + * only ever ingest half of the siphash output each time, retaining + * the other half as the next "key" that carries over. The entropy is + * supposed to be sufficiently dispersed between bits so on average + * we don't wind up "losing" some. */ - u8 pool[16]; + unsigned long pool[2]; /* Check to see if we're running on the wrong CPU due to hotplug. */ local_irq_disable(); @@ -1345,36 +1329,21 @@ static void mix_interrupt_randomness(struct work_struct *work) void add_interrupt_randomness(int irq) { enum { MIX_INFLIGHT = 1U << 31 }; - unsigned long cycles = random_get_entropy(), now = jiffies; + unsigned long entropy = random_get_entropy(); struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); struct pt_regs *regs = get_irq_regs(); unsigned int new_count; - union { - u32 u32[4]; - u64 u64[2]; - unsigned long longs[16 / sizeof(long)]; - } irq_data; - - if (cycles == 0) - cycles = get_reg(fast_pool, regs); - - if (sizeof(unsigned long) == 8) { - irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq; - irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_; - } else { - irq_data.u32[0] = cycles ^ irq; - irq_data.u32[1] = now; - irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_; - irq_data.u32[3] = get_reg(fast_pool, regs); - } - fast_mix(fast_pool->pool, irq_data.longs); + fast_mix(fast_pool->pool, (unsigned long[2]){ + entropy, + (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq) + }); new_count = ++fast_pool->count; if (new_count & MIX_INFLIGHT) return; - if (new_count < 64 && (!time_after(now, fast_pool->last + HZ) || + if (new_count < 64 && (!time_is_before_jiffies(fast_pool->last + HZ) || unlikely(crng_init == 0))) return; @@ -1410,28 +1379,28 @@ static void entropy_timer(struct timer_list *t) static void try_to_generate_entropy(void) { struct { - unsigned long cycles; + unsigned long entropy; struct timer_list timer; } stack; - stack.cycles = random_get_entropy(); + stack.entropy = random_get_entropy(); /* Slow counter - or none. Don't even bother */ - if (stack.cycles == random_get_entropy()) + if (stack.entropy == random_get_entropy()) return; timer_setup_on_stack(&stack.timer, entropy_timer, 0); while (!crng_ready() && !signal_pending(current)) { if (!timer_pending(&stack.timer)) mod_timer(&stack.timer, jiffies + 1); - mix_pool_bytes(&stack.cycles, sizeof(stack.cycles)); + mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); schedule(); - stack.cycles = random_get_entropy(); + stack.entropy = random_get_entropy(); } del_timer_sync(&stack.timer); destroy_timer_on_stack(&stack.timer); - mix_pool_bytes(&stack.cycles, sizeof(stack.cycles)); + mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); } -- cgit v1.2.3 From 78c768e619fbd5157b3544915aad5158af0c5809 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Fri, 22 Apr 2022 14:47:42 +0200 Subject: random: vary jitter iterations based on cycle counter speed Currently, we do the jitter dance if two consecutive reads to the cycle counter return different values. If they do, then we consider the cycle counter to be fast enough that one trip through the scheduler will yield one "bit" of credited entropy. If those two reads return the same value, then we assume the cycle counter is too slow to show meaningful differences. This methodology is flawed for a variety of reasons, one of which Eric posted a patch to fix in [1]. The issue that patch solves is that on a system with a slow counter, you might be [un]lucky and read the counter _just_ before it changes, so that the second cycle counter you read differs from the first, even though there's usually quite a large period of time in between the two. For example: | real time | cycle counter | | --------- | ------------- | | 3 | 5 | | 4 | 5 | | 5 | 5 | | 6 | 5 | | 7 | 5 | <--- a | 8 | 6 | <--- b | 9 | 6 | <--- c If we read the counter at (a) and compare it to (b), we might be fooled into thinking that it's a fast counter, when in reality it is not. The solution in [1] is to also compare counter (b) to counter (c), on the theory that if the counter is _actually_ slow, and (a)!=(b), then certainly (b)==(c). This helps solve this particular issue, in one sense, but in another sense, it mostly functions to disallow jitter entropy on these systems, rather than simply taking more samples in that case. Instead, this patch takes a different approach. Right now we assume that a difference in one set of consecutive samples means one "bit" of credited entropy per scheduler trip. We can extend this so that a difference in two sets of consecutive samples means one "bit" of credited entropy per /two/ scheduler trips, and three for three, and four for four. In other words, we can increase the amount of jitter "work" we require for each "bit", depending on how slow the cycle counter is. So this patch takes whole bunch of samples, sees how many of them are different, and divides to find the amount of work required per "bit", and also requires that at least some minimum of them are different in order to attempt any jitter entropy. Note that this approach is still far from perfect. It's not a real statistical estimate on how much these samples vary; it's not a real-time analysis of the relevant input data. That remains a project for another time. However, it makes the same (partly flawed) assumptions as the code that's there now, so it's probably not worse than the status quo, and it handles the issue Eric mentioned in [1]. But, again, it's probably a far cry from whatever a really robust version of this would be. [1] https://lore.kernel.org/lkml/20220421233152.58522-1-ebiggers@kernel.org/ https://lore.kernel.org/lkml/20220421192939.250680-1-ebiggers@kernel.org/ Cc: Eric Biggers Cc: Theodore Ts'o Cc: Linus Torvalds Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 6229d7d3c5b5..bd6fdb624861 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1354,6 +1354,12 @@ void add_interrupt_randomness(int irq) } EXPORT_SYMBOL_GPL(add_interrupt_randomness); +struct entropy_timer_state { + unsigned long entropy; + struct timer_list timer; + unsigned int samples, samples_per_bit; +}; + /* * Each time the timer fires, we expect that we got an unpredictable * jump in the cycle counter. Even if the timer is running on another @@ -1367,9 +1373,14 @@ EXPORT_SYMBOL_GPL(add_interrupt_randomness); * * So the re-arming always happens in the entropy loop itself. */ -static void entropy_timer(struct timer_list *t) +static void entropy_timer(struct timer_list *timer) { - credit_entropy_bits(1); + struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer); + + if (++state->samples == state->samples_per_bit) { + credit_entropy_bits(1); + state->samples = 0; + } } /* @@ -1378,17 +1389,22 @@ static void entropy_timer(struct timer_list *t) */ static void try_to_generate_entropy(void) { - struct { - unsigned long entropy; - struct timer_list timer; - } stack; - - stack.entropy = random_get_entropy(); + enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 32 }; + struct entropy_timer_state stack; + unsigned int i, num_different = 0; + unsigned long last = random_get_entropy(); - /* Slow counter - or none. Don't even bother */ - if (stack.entropy == random_get_entropy()) + for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) { + stack.entropy = random_get_entropy(); + if (stack.entropy != last) + ++num_different; + last = stack.entropy; + } + stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1); + if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT) return; + stack.samples = 0; timer_setup_on_stack(&stack.timer, entropy_timer, 0); while (!crng_ready() && !signal_pending(current)) { if (!timer_pending(&stack.timer)) -- cgit v1.2.3 From b7b67d1391a831eb9de133e85a2230e2e81a2ce4 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sun, 1 May 2022 13:51:34 +0200 Subject: random: mix in timestamps and reseed on system restore Since the RNG loses freshness with system suspend/hibernation, when we resume, immediately reseed using whatever data we can, which for this particular case is the various timestamps regarding system suspend time, in addition to more generally the RDSEED/RDRAND/RDTSC values that happen whenever the crng reseeds. On systems that suspend and resume automatically all the time -- such as Android -- we skip the reseeding on suspend resumption, since that could wind up being far too busy. This is the same trade-off made in WireGuard. In addition to reseeding upon resumption always mix into the pool these various stamps on every power notification event. Cc: Theodore Ts'o Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index bd6fdb624861..2d10942ba534 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include @@ -970,6 +971,33 @@ static int __init parse_trust_bootloader(char *arg) early_param("random.trust_cpu", parse_trust_cpu); early_param("random.trust_bootloader", parse_trust_bootloader); +static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data) +{ + unsigned long flags, entropy = random_get_entropy(); + + /* + * Encode a representation of how long the system has been suspended, + * in a way that is distinct from prior system suspends. + */ + ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() }; + + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(&action, sizeof(action)); + _mix_pool_bytes(stamps, sizeof(stamps)); + _mix_pool_bytes(&entropy, sizeof(entropy)); + spin_unlock_irqrestore(&input_pool.lock, flags); + + if (crng_ready() && (action == PM_RESTORE_PREPARE || + (action == PM_POST_SUSPEND && + !IS_ENABLED(CONFIG_PM_AUTOSLEEP) && !IS_ENABLED(CONFIG_ANDROID)))) { + crng_reseed(true); + pr_notice("crng reseeded on system resumption\n"); + } + return 0; +} + +static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification }; + /* * The first collection of entropy occurs at system boot while interrupts * are still turned off. Here we push in RDSEED, a timestamp, and utsname(). @@ -1013,6 +1041,8 @@ int __init rand_initialize(void) unseeded_warning.interval = 0; } + WARN_ON(register_pm_notifier(&pm_notifier)); + WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG " "entropy collection will consequently suffer."); return 0; -- cgit v1.2.3 From cbe89e5a375a51bbb952929b93fa973416fea74e Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Tue, 3 May 2022 14:14:32 +0200 Subject: random: do not use batches when !crng_ready() It's too hard to keep the batches synchronized, and pointless anyway, since in !crng_ready(), we're updating the base_crng key really often, where batching only hurts. So instead, if the crng isn't ready, just call into get_random_bytes(). At this stage nothing is performance critical anyhow. Cc: Theodore Ts'o Reviewed-by: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 2d10942ba534..a9f887b92ba2 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -466,10 +466,8 @@ static void crng_pre_init_inject(const void *input, size_t len, bool account) if (account) { crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt); - if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { - ++base_crng.generation; + if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) crng_init = 1; - } } spin_unlock_irqrestore(&base_crng.lock, flags); @@ -625,6 +623,11 @@ u64 get_random_u64(void) warn_unseeded_randomness(&previous); + if (!crng_ready()) { + _get_random_bytes(&ret, sizeof(ret)); + return ret; + } + local_lock_irqsave(&batched_entropy_u64.lock, flags); batch = raw_cpu_ptr(&batched_entropy_u64); @@ -659,6 +662,11 @@ u32 get_random_u32(void) warn_unseeded_randomness(&previous); + if (!crng_ready()) { + _get_random_bytes(&ret, sizeof(ret)); + return ret; + } + local_lock_irqsave(&batched_entropy_u32.lock, flags); batch = raw_cpu_ptr(&batched_entropy_u32); -- cgit v1.2.3 From 5c3b747ef54fa2a7318776777f6044540d99f721 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sat, 30 Apr 2022 15:08:20 +0200 Subject: random: use first 128 bits of input as fast init Before, the first 64 bytes of input, regardless of how entropic it was, would be used to mutate the crng base key directly, and none of those bytes would be credited as having entropy. Then 256 bits of credited input would be accumulated, and only then would the rng transition from the earlier "fast init" phase into being actually initialized. The thinking was that by mixing and matching fast init and real init, an attacker who compromised the fast init state, considered easy to do given how little entropy might be in those first 64 bytes, would then be able to bruteforce bits from the actual initialization. By keeping these separate, bruteforcing became impossible. However, by not crediting potentially creditable bits from those first 64 bytes of input, we delay initialization, and actually make the problem worse, because it means the user is drawing worse random numbers for a longer period of time. Instead, we can take the first 128 bits as fast init, and allow them to be credited, and then hold off on the next 128 bits until they've accumulated. This is still a wide enough margin to prevent bruteforcing the rng state, while still initializing much faster. Then, rather than trying to piecemeal inject into the base crng key at various points, instead just extract from the pool when we need it, for the crng_init==0 phase. Performance may even be better for the various inputs here, since there are likely more calls to mix_pool_bytes() then there are to get_random_bytes() during this phase of system execution. Since the preinit injection code is gone, bootloader randomness can then do something significantly more straight forward, removing the weird system_wq hack in hwgenerator randomness. Cc: Theodore Ts'o Cc: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 146 +++++++++++++++++--------------------------------- 1 file changed, 49 insertions(+), 97 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index a9f887b92ba2..a2a032ceb108 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -232,10 +232,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void * *********************************************************************/ -enum { - CRNG_RESEED_INTERVAL = 300 * HZ, - CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE -}; +enum { CRNG_RESEED_INTERVAL = 300 * HZ }; static struct { u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long)); @@ -259,6 +256,8 @@ static DEFINE_PER_CPU(struct crng, crngs) = { /* Used by crng_reseed() to extract a new seed from the input pool. */ static bool drain_entropy(void *buf, size_t nbytes, bool force); +/* Used by crng_make_state() to extract a new seed when crng_init==0. */ +static void extract_entropy(void *buf, size_t nbytes); /* * This extracts a new crng key from the input pool, but only if there is a @@ -383,17 +382,20 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], /* * For the fast path, we check whether we're ready, unlocked first, and * then re-check once locked later. In the case where we're really not - * ready, we do fast key erasure with the base_crng directly, because - * this is what crng_pre_init_inject() mutates during early init. + * ready, we do fast key erasure with the base_crng directly, extracting + * when crng_init==0. */ if (!crng_ready()) { bool ready; spin_lock_irqsave(&base_crng.lock, flags); ready = crng_ready(); - if (!ready) + if (!ready) { + if (crng_init == 0) + extract_entropy(base_crng.key, sizeof(base_crng.key)); crng_fast_key_erasure(base_crng.key, chacha_state, random_data, random_data_len); + } spin_unlock_irqrestore(&base_crng.lock, flags); if (!ready) return; @@ -434,48 +436,6 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], local_unlock_irqrestore(&crngs.lock, flags); } -/* - * This function is for crng_init == 0 only. It loads entropy directly - * into the crng's key, without going through the input pool. It is, - * generally speaking, not very safe, but we use this only at early - * boot time when it's better to have something there rather than - * nothing. - * - * If account is set, then the crng_init_cnt counter is incremented. - * This shouldn't be set by functions like add_device_randomness(), - * where we can't trust the buffer passed to it is guaranteed to be - * unpredictable (so it might not have any entropy at all). - */ -static void crng_pre_init_inject(const void *input, size_t len, bool account) -{ - static int crng_init_cnt = 0; - struct blake2s_state hash; - unsigned long flags; - - blake2s_init(&hash, sizeof(base_crng.key)); - - spin_lock_irqsave(&base_crng.lock, flags); - if (crng_init != 0) { - spin_unlock_irqrestore(&base_crng.lock, flags); - return; - } - - blake2s_update(&hash, base_crng.key, sizeof(base_crng.key)); - blake2s_update(&hash, input, len); - blake2s_final(&hash, base_crng.key); - - if (account) { - crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt); - if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) - crng_init = 1; - } - - spin_unlock_irqrestore(&base_crng.lock, flags); - - if (crng_init == 1) - pr_notice("fast init done\n"); -} - static void _get_random_bytes(void *buf, size_t nbytes) { u32 chacha_state[CHACHA_STATE_WORDS]; @@ -788,7 +748,8 @@ EXPORT_SYMBOL(get_random_bytes_arch); enum { POOL_BITS = BLAKE2S_HASH_SIZE * 8, - POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */ + POOL_MIN_BITS = POOL_BITS, /* No point in settling for less. */ + POOL_FAST_INIT_BITS = POOL_MIN_BITS / 2 }; /* For notifying userspace should write into /dev/random. */ @@ -825,24 +786,6 @@ static void mix_pool_bytes(const void *in, size_t nbytes) spin_unlock_irqrestore(&input_pool.lock, flags); } -static void credit_entropy_bits(size_t nbits) -{ - unsigned int entropy_count, orig, add; - - if (!nbits) - return; - - add = min_t(size_t, nbits, POOL_BITS); - - do { - orig = READ_ONCE(input_pool.entropy_count); - entropy_count = min_t(unsigned int, POOL_BITS, orig + add); - } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig); - - if (!crng_ready() && entropy_count >= POOL_MIN_BITS) - crng_reseed(false); -} - /* * This is an HKDF-like construction for using the hashed collected entropy * as a PRF key, that's then expanded block-by-block. @@ -908,6 +851,33 @@ static bool drain_entropy(void *buf, size_t nbytes, bool force) return true; } +static void credit_entropy_bits(size_t nbits) +{ + unsigned int entropy_count, orig, add; + unsigned long flags; + + if (!nbits) + return; + + add = min_t(size_t, nbits, POOL_BITS); + + do { + orig = READ_ONCE(input_pool.entropy_count); + entropy_count = min_t(unsigned int, POOL_BITS, orig + add); + } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig); + + if (!crng_ready() && entropy_count >= POOL_MIN_BITS) + crng_reseed(false); + else if (unlikely(crng_init == 0 && entropy_count >= POOL_FAST_INIT_BITS)) { + spin_lock_irqsave(&base_crng.lock, flags); + if (crng_init == 0) { + extract_entropy(base_crng.key, sizeof(base_crng.key)); + crng_init = 1; + } + spin_unlock_irqrestore(&base_crng.lock, flags); + } +} + /********************************************************************** * @@ -951,9 +921,9 @@ static bool drain_entropy(void *buf, size_t nbytes, bool force) * entropy as specified by the caller. If the entropy pool is full it will * block until more entropy is needed. * - * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or - * add_device_randomness(), depending on whether or not the configuration - * option CONFIG_RANDOM_TRUST_BOOTLOADER is set. + * add_bootloader_randomness() is called by bootloader drivers, such as EFI + * and device tree, and credits its input depending on whether or not the + * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set. * * add_vmfork_randomness() adds a unique (but not necessarily secret) ID * representing the current instance of a VM to the pool, without crediting, @@ -1069,9 +1039,6 @@ void add_device_randomness(const void *buf, size_t size) unsigned long entropy = random_get_entropy(); unsigned long flags; - if (crng_init == 0 && size) - crng_pre_init_inject(buf, size, false); - spin_lock_irqsave(&input_pool.lock, flags); _mix_pool_bytes(&entropy, sizeof(entropy)); _mix_pool_bytes(buf, size); @@ -1187,12 +1154,6 @@ void rand_initialize_disk(struct gendisk *disk) void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy) { - if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) { - crng_pre_init_inject(buffer, count, true); - mix_pool_bytes(buffer, count); - return; - } - /* * Throttle writing if we're above the trickle threshold. * We'll be woken up again once below POOL_MIN_BITS, when @@ -1200,7 +1161,7 @@ void add_hwgenerator_randomness(const void *buffer, size_t count, * CRNG_RESEED_INTERVAL has elapsed. */ wait_event_interruptible_timeout(random_write_wait, - !system_wq || kthread_should_stop() || + kthread_should_stop() || input_pool.entropy_count < POOL_MIN_BITS, CRNG_RESEED_INTERVAL); mix_pool_bytes(buffer, count); @@ -1209,17 +1170,14 @@ void add_hwgenerator_randomness(const void *buffer, size_t count, EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); /* - * Handle random seed passed by bootloader. - * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise - * it would be regarded as device data. - * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. + * Handle random seed passed by bootloader, and credit it if + * CONFIG_RANDOM_TRUST_BOOTLOADER is set. */ void add_bootloader_randomness(const void *buf, size_t size) { + mix_pool_bytes(buf, size); if (trust_bootloader) - add_hwgenerator_randomness(buf, size, size * 8); - else - add_device_randomness(buf, size); + credit_entropy_bits(size * 8); } EXPORT_SYMBOL_GPL(add_bootloader_randomness); @@ -1353,13 +1311,8 @@ static void mix_interrupt_randomness(struct work_struct *work) fast_pool->last = jiffies; local_irq_enable(); - if (unlikely(crng_init == 0)) { - crng_pre_init_inject(pool, sizeof(pool), true); - mix_pool_bytes(pool, sizeof(pool)); - } else { - mix_pool_bytes(pool, sizeof(pool)); - credit_entropy_bits(1); - } + mix_pool_bytes(pool, sizeof(pool)); + credit_entropy_bits(1); memzero_explicit(pool, sizeof(pool)); } @@ -1381,8 +1334,7 @@ void add_interrupt_randomness(int irq) if (new_count & MIX_INFLIGHT) return; - if (new_count < 64 && (!time_is_before_jiffies(fast_pool->last + HZ) || - unlikely(crng_init == 0))) + if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ)) return; if (unlikely(!fast_pool->mix.func)) -- cgit v1.2.3 From e85c0fc1d94c52483a603651748d4c76d6aa1c6b Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sat, 30 Apr 2022 22:03:29 +0200 Subject: random: do not pretend to handle premature next security model Per the thread linked below, "premature next" is not considered to be a realistic threat model, and leads to more serious security problems. "Premature next" is the scenario in which: - Attacker compromises the current state of a fully initialized RNG via some kind of infoleak. - New bits of entropy are added directly to the key used to generate the /dev/urandom stream, without any buffering or pooling. - Attacker then, somehow having read access to /dev/urandom, samples RNG output and brute forces the individual new bits that were added. - Result: the RNG never "recovers" from the initial compromise, a so-called violation of what academics term "post-compromise security". The usual solutions to this involve some form of delaying when entropy gets mixed into the crng. With Fortuna, this involves multiple input buckets. With what the Linux RNG was trying to do prior, this involves entropy estimation. However, by delaying when entropy gets mixed in, it also means that RNG compromises are extremely dangerous during the window of time before the RNG has gathered enough entropy, during which time nonces may become predictable (or repeated), ephemeral keys may not be secret, and so forth. Moreover, it's unclear how realistic "premature next" is from an attack perspective, if these attacks even make sense in practice. Put together -- and discussed in more detail in the thread below -- these constitute grounds for just doing away with the current code that pretends to handle premature next. I say "pretends" because it wasn't doing an especially great job at it either; should we change our mind about this direction, we would probably implement Fortuna to "fix" the "problem", in which case, removing the pretend solution still makes sense. This also reduces the crng reseed period from 5 minutes down to 1 minute. The rationale from the thread might lead us toward reducing that even further in the future (or even eliminating it), but that remains a topic of a future commit. At a high level, this patch changes semantics from: Before: Seed for the first time after 256 "bits" of estimated entropy have been accumulated since the system booted. Thereafter, reseed once every five minutes, but only if 256 new "bits" have been accumulated since the last reseeding. After: Seed for the first time after 256 "bits" of estimated entropy have been accumulated since the system booted. Thereafter, reseed once every minute. Most of this patch is renaming and removing: POOL_MIN_BITS becomes POOL_INIT_BITS, credit_entropy_bits() becomes credit_init_bits(), crng_reseed() loses its "force" parameter since it's now always true, the drain_entropy() function no longer has any use so it's removed, entropy estimation is skipped if we've already init'd, the various notifiers for "low on entropy" are now only active prior to init, and finally, some documentation comments are cleaned up here and there. Link: https://lore.kernel.org/lkml/YmlMGx6+uigkGiZ0@zx2c4.com/ Cc: Theodore Ts'o Cc: Nadia Heninger Cc: Tom Ristenpart Reviewed-by: Eric Biggers Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 186 ++++++++++++++++++-------------------------------- 1 file changed, 68 insertions(+), 118 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index a2a032ceb108..9e971fb9e16b 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -15,14 +15,12 @@ * - Sysctl interface. * * The high level overview is that there is one input pool, into which - * various pieces of data are hashed. Some of that data is then "credited" as - * having a certain number of bits of entropy. When enough bits of entropy are - * available, the hash is finalized and handed as a key to a stream cipher that - * expands it indefinitely for various consumers. This key is periodically - * refreshed as the various entropy collectors, described below, add data to the - * input pool and credit it. There is currently no Fortuna-like scheduler - * involved, which can lead to malicious entropy sources causing a premature - * reseed, and the entropy estimates are, at best, conservative guesses. + * various pieces of data are hashed. Prior to initialization, some of that + * data is then "credited" as having a certain number of bits of entropy. + * When enough bits of entropy are available, the hash is finalized and + * handed as a key to a stream cipher that expands it indefinitely for + * various consumers. This key is periodically refreshed as the various + * entropy collectors, described below, add data to the input pool. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -232,7 +230,10 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void * *********************************************************************/ -enum { CRNG_RESEED_INTERVAL = 300 * HZ }; +enum { + CRNG_RESEED_START_INTERVAL = HZ, + CRNG_RESEED_INTERVAL = 60 * HZ +}; static struct { u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long)); @@ -254,26 +255,18 @@ static DEFINE_PER_CPU(struct crng, crngs) = { .lock = INIT_LOCAL_LOCK(crngs.lock), }; -/* Used by crng_reseed() to extract a new seed from the input pool. */ -static bool drain_entropy(void *buf, size_t nbytes, bool force); -/* Used by crng_make_state() to extract a new seed when crng_init==0. */ +/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */ static void extract_entropy(void *buf, size_t nbytes); -/* - * This extracts a new crng key from the input pool, but only if there is a - * sufficient amount of entropy available or force is true, in order to - * mitigate bruteforcing of newly added bits. - */ -static void crng_reseed(bool force) +/* This extracts a new crng key from the input pool. */ +static void crng_reseed(void) { unsigned long flags; unsigned long next_gen; u8 key[CHACHA_KEY_SIZE]; bool finalize_init = false; - /* Only reseed if we can, to prevent brute forcing a small amount of new bits. */ - if (!drain_entropy(key, sizeof(key), force)) - return; + extract_entropy(key, sizeof(key)); /* * We copy the new key into the base_crng, overwriting the old one, @@ -345,10 +338,10 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], } /* - * Return whether the crng seed is considered to be sufficiently - * old that a reseeding might be attempted. This happens if the last - * reseeding was CRNG_RESEED_INTERVAL ago, or during early boot, at - * an interval proportional to the uptime. + * Return whether the crng seed is considered to be sufficiently old + * that a reseeding is needed. This happens if the last reseeding + * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval + * proportional to the uptime. */ static bool crng_has_old_seed(void) { @@ -360,7 +353,7 @@ static bool crng_has_old_seed(void) if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2) WRITE_ONCE(early_boot, false); else - interval = max_t(unsigned int, 5 * HZ, + interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL, (unsigned int)uptime / 2 * HZ); } return time_after(jiffies, READ_ONCE(base_crng.birth) + interval); @@ -402,11 +395,11 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], } /* - * If the base_crng is old enough, we try to reseed, which in turn - * bumps the generation counter that we check below. + * If the base_crng is old enough, we reseed, which in turn bumps the + * generation counter that we check below. */ if (unlikely(crng_has_old_seed())) - crng_reseed(false); + crng_reseed(); local_lock_irqsave(&crngs.lock, flags); crng = raw_cpu_ptr(&crngs); @@ -735,30 +728,24 @@ EXPORT_SYMBOL(get_random_bytes_arch); * * After which, if added entropy should be credited: * - * static void credit_entropy_bits(size_t nbits) + * static void credit_init_bits(size_t nbits) * - * Finally, extract entropy via these two, with the latter one - * setting the entropy count to zero and extracting only if there - * is POOL_MIN_BITS entropy credited prior or force is true: + * Finally, extract entropy via: * * static void extract_entropy(void *buf, size_t nbytes) - * static bool drain_entropy(void *buf, size_t nbytes, bool force) * **********************************************************************/ enum { POOL_BITS = BLAKE2S_HASH_SIZE * 8, - POOL_MIN_BITS = POOL_BITS, /* No point in settling for less. */ - POOL_FAST_INIT_BITS = POOL_MIN_BITS / 2 + POOL_INIT_BITS = POOL_BITS, /* No point in settling for less. */ + POOL_FAST_INIT_BITS = POOL_INIT_BITS / 2 }; -/* For notifying userspace should write into /dev/random. */ -static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); - static struct { struct blake2s_state hash; spinlock_t lock; - unsigned int entropy_count; + unsigned int init_bits; } input_pool = { .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, @@ -773,9 +760,9 @@ static void _mix_pool_bytes(const void *in, size_t nbytes) } /* - * This function adds bytes into the entropy "pool". It does not - * update the entropy estimate. The caller should call - * credit_entropy_bits if this is appropriate. + * This function adds bytes into the input pool. It does not + * update the initialization bit counter; the caller should call + * credit_init_bits if this is appropriate. */ static void mix_pool_bytes(const void *in, size_t nbytes) { @@ -832,43 +819,24 @@ static void extract_entropy(void *buf, size_t nbytes) memzero_explicit(&block, sizeof(block)); } -/* - * First we make sure we have POOL_MIN_BITS of entropy in the pool unless force - * is true, and then we set the entropy count to zero (but don't actually touch - * any data). Only then can we extract a new key with extract_entropy(). - */ -static bool drain_entropy(void *buf, size_t nbytes, bool force) -{ - unsigned int entropy_count; - do { - entropy_count = READ_ONCE(input_pool.entropy_count); - if (!force && entropy_count < POOL_MIN_BITS) - return false; - } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count); - extract_entropy(buf, nbytes); - wake_up_interruptible(&random_write_wait); - kill_fasync(&fasync, SIGIO, POLL_OUT); - return true; -} - -static void credit_entropy_bits(size_t nbits) +static void credit_init_bits(size_t nbits) { - unsigned int entropy_count, orig, add; + unsigned int init_bits, orig, add; unsigned long flags; - if (!nbits) + if (crng_ready() || !nbits) return; add = min_t(size_t, nbits, POOL_BITS); do { - orig = READ_ONCE(input_pool.entropy_count); - entropy_count = min_t(unsigned int, POOL_BITS, orig + add); - } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig); + orig = READ_ONCE(input_pool.init_bits); + init_bits = min_t(unsigned int, POOL_BITS, orig + add); + } while (cmpxchg(&input_pool.init_bits, orig, init_bits) != orig); - if (!crng_ready() && entropy_count >= POOL_MIN_BITS) - crng_reseed(false); - else if (unlikely(crng_init == 0 && entropy_count >= POOL_FAST_INIT_BITS)) { + if (!crng_ready() && init_bits >= POOL_INIT_BITS) + crng_reseed(); + else if (unlikely(crng_init == 0 && init_bits >= POOL_FAST_INIT_BITS)) { spin_lock_irqsave(&base_crng.lock, flags); if (crng_init == 0) { extract_entropy(base_crng.key, sizeof(base_crng.key)); @@ -968,7 +936,7 @@ static int random_pm_notification(struct notifier_block *nb, unsigned long actio if (crng_ready() && (action == PM_RESTORE_PREPARE || (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) && !IS_ENABLED(CONFIG_ANDROID)))) { - crng_reseed(true); + crng_reseed(); pr_notice("crng reseeded on system resumption\n"); } return 0; @@ -1006,13 +974,10 @@ int __init rand_initialize(void) _mix_pool_bytes(&now, sizeof(now)); _mix_pool_bytes(utsname(), sizeof(*(utsname()))); - extract_entropy(base_crng.key, sizeof(base_crng.key)); - ++base_crng.generation; - - if (arch_init && trust_cpu && !crng_ready()) { - crng_init = 2; - pr_notice("crng init done (trusting CPU's manufacturer)\n"); - } + if (crng_ready()) + crng_reseed(); + else if (arch_init && trust_cpu) + credit_init_bits(BLAKE2S_BLOCK_SIZE * 8); if (ratelimit_disable) { urandom_warning.interval = 0; @@ -1071,6 +1036,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned int nu _mix_pool_bytes(&num, sizeof(num)); spin_unlock_irqrestore(&input_pool.lock, flags); + if (crng_ready()) + return; + /* * Calculate number of bits of randomness we probably added. * We take into account the first, second and third-order deltas @@ -1101,7 +1069,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned int nu * Round down by 1 bit on general principles, * and limit entropy estimate to 12 bits. */ - credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11)); + credit_init_bits(min_t(unsigned int, fls(delta >> 1), 11)); } void add_input_randomness(unsigned int type, unsigned int code, @@ -1154,18 +1122,15 @@ void rand_initialize_disk(struct gendisk *disk) void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy) { + mix_pool_bytes(buffer, count); + credit_init_bits(entropy); + /* - * Throttle writing if we're above the trickle threshold. - * We'll be woken up again once below POOL_MIN_BITS, when - * the calling thread is about to terminate, or once - * CRNG_RESEED_INTERVAL has elapsed. + * Throttle writing to once every CRNG_RESEED_INTERVAL, unless + * we're not yet initialized. */ - wait_event_interruptible_timeout(random_write_wait, - kthread_should_stop() || - input_pool.entropy_count < POOL_MIN_BITS, - CRNG_RESEED_INTERVAL); - mix_pool_bytes(buffer, count); - credit_entropy_bits(entropy); + if (!kthread_should_stop() && crng_ready()) + schedule_timeout_interruptible(CRNG_RESEED_INTERVAL); } EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); @@ -1177,7 +1142,7 @@ void add_bootloader_randomness(const void *buf, size_t size) { mix_pool_bytes(buf, size); if (trust_bootloader) - credit_entropy_bits(size * 8); + credit_init_bits(size * 8); } EXPORT_SYMBOL_GPL(add_bootloader_randomness); @@ -1193,7 +1158,7 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t size) { add_device_randomness(unique_vm_id, size); if (crng_ready()) { - crng_reseed(true); + crng_reseed(); pr_notice("crng reseeded due to virtual machine fork\n"); } blocking_notifier_call_chain(&vmfork_chain, 0, NULL); @@ -1312,7 +1277,7 @@ static void mix_interrupt_randomness(struct work_struct *work) local_irq_enable(); mix_pool_bytes(pool, sizeof(pool)); - credit_entropy_bits(1); + credit_init_bits(1); memzero_explicit(pool, sizeof(pool)); } @@ -1368,7 +1333,7 @@ static void entropy_timer(struct timer_list *timer) struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer); if (++state->samples == state->samples_per_bit) { - credit_entropy_bits(1); + credit_init_bits(1); state->samples = 0; } } @@ -1468,16 +1433,8 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, static __poll_t random_poll(struct file *file, poll_table *wait) { - __poll_t mask; - poll_wait(file, &crng_init_wait, wait); - poll_wait(file, &random_write_wait, wait); - mask = 0; - if (crng_ready()) - mask |= EPOLLIN | EPOLLRDNORM; - if (input_pool.entropy_count < POOL_MIN_BITS) - mask |= EPOLLOUT | EPOLLWRNORM; - return mask; + return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM; } static int write_pool(const char __user *ubuf, size_t count) @@ -1557,7 +1514,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) switch (cmd) { case RNDGETENTCNT: /* Inherently racy, no point locking. */ - if (put_user(input_pool.entropy_count, p)) + if (put_user(input_pool.init_bits, p)) return -EFAULT; return 0; case RNDADDTOENTCNT: @@ -1567,7 +1524,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EFAULT; if (ent_count < 0) return -EINVAL; - credit_entropy_bits(ent_count); + credit_init_bits(ent_count); return 0; case RNDADDENTROPY: if (!capable(CAP_SYS_ADMIN)) @@ -1581,27 +1538,20 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) retval = write_pool((const char __user *)p, size); if (retval < 0) return retval; - credit_entropy_bits(ent_count); + credit_init_bits(ent_count); return 0; case RNDZAPENTCNT: case RNDCLEARPOOL: - /* - * Clear the entropy pool counters. We no longer clear - * the entropy pool, as that's silly. - */ + /* No longer has any effect. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (xchg(&input_pool.entropy_count, 0) >= POOL_MIN_BITS) { - wake_up_interruptible(&random_write_wait); - kill_fasync(&fasync, SIGIO, POLL_OUT); - } return 0; case RNDRESEEDCRNG: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!crng_ready()) return -ENODATA; - crng_reseed(false); + crng_reseed(); return 0; default: return -EINVAL; @@ -1653,7 +1603,7 @@ const struct file_operations urandom_fops = { * * - write_wakeup_threshold - the amount of entropy in the input pool * below which write polls to /dev/random will unblock, requesting - * more entropy, tied to the POOL_MIN_BITS constant. It is writable + * more entropy, tied to the POOL_INIT_BITS constant. It is writable * to avoid breaking old userspaces, but writing to it does not * change any behavior of the RNG. * @@ -1668,7 +1618,7 @@ const struct file_operations urandom_fops = { #include static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ; -static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS; +static int sysctl_random_write_wakeup_bits = POOL_INIT_BITS; static int sysctl_poolsize = POOL_BITS; static u8 sysctl_bootid[UUID_SIZE]; @@ -1724,7 +1674,7 @@ static struct ctl_table random_table[] = { }, { .procname = "entropy_avail", - .data = &input_pool.entropy_count, + .data = &input_pool.init_bits, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, -- cgit v1.2.3 From a4b5c26b79ffdfcfb816c198f2fc2b1e7b5b580f Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Fri, 6 May 2022 18:27:38 +0200 Subject: random: order timer entropy functions below interrupt functions There are no code changes here; this is just a reordering of functions, so that in subsequent commits, the timer entropy functions can call into the interrupt ones. Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 238 +++++++++++++++++++++++++------------------------- 1 file changed, 119 insertions(+), 119 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 9e971fb9e16b..fcdd108c9d05 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -855,14 +855,14 @@ static void credit_init_bits(size_t nbits) * the above entropy accumulation routines: * * void add_device_randomness(const void *buf, size_t size); - * void add_input_randomness(unsigned int type, unsigned int code, - * unsigned int value); - * void add_disk_randomness(struct gendisk *disk); * void add_hwgenerator_randomness(const void *buffer, size_t count, * size_t entropy); * void add_bootloader_randomness(const void *buf, size_t size); * void add_vmfork_randomness(const void *unique_vm_id, size_t size); * void add_interrupt_randomness(int irq); + * void add_input_randomness(unsigned int type, unsigned int code, + * unsigned int value); + * void add_disk_randomness(struct gendisk *disk); * * add_device_randomness() adds data to the input pool that * is likely to differ between two devices (or possibly even per boot). @@ -872,19 +872,6 @@ static void credit_init_bits(size_t nbits) * that might otherwise be identical and have very little entropy * available to them (particularly common in the embedded world). * - * add_input_randomness() uses the input layer interrupt timing, as well - * as the event type information from the hardware. - * - * add_disk_randomness() uses what amounts to the seek time of block - * layer request events, on a per-disk_devt basis, as input to the - * entropy pool. Note that high-speed solid state drives with very low - * seek times do not make for good sources of entropy, as their seek - * times are usually fairly consistent. - * - * The above two routines try to estimate how many bits of entropy - * to credit. They do this by keeping track of the first and second - * order deltas of the event timings. - * * add_hwgenerator_randomness() is for true hardware RNGs, and will credit * entropy as specified by the caller. If the entropy pool is full it will * block until more entropy is needed. @@ -902,6 +889,19 @@ static void credit_init_bits(size_t nbits) * as inputs, it feeds the input pool roughly once a second or after 64 * interrupts, crediting 1 bit of entropy for whichever comes first. * + * add_input_randomness() uses the input layer interrupt timing, as well + * as the event type information from the hardware. + * + * add_disk_randomness() uses what amounts to the seek time of block + * layer request events, on a per-disk_devt basis, as input to the + * entropy pool. Note that high-speed solid state drives with very low + * seek times do not make for good sources of entropy, as their seek + * times are usually fairly consistent. + * + * The last two routines try to estimate how many bits of entropy + * to credit. They do this by keeping track of the first and second + * order deltas of the event timings. + * **********************************************************************/ static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); @@ -1011,109 +1011,6 @@ void add_device_randomness(const void *buf, size_t size) } EXPORT_SYMBOL(add_device_randomness); -/* There is one of these per entropy source */ -struct timer_rand_state { - unsigned long last_time; - long last_delta, last_delta2; -}; - -/* - * This function adds entropy to the entropy "pool" by using timing - * delays. It uses the timer_rand_state structure to make an estimate - * of how many bits of entropy this call has added to the pool. - * - * The number "num" is also added to the pool - it should somehow describe - * the type of event which just happened. This is currently 0-255 for - * keyboard scan codes, and 256 upwards for interrupts. - */ -static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) -{ - unsigned long entropy = random_get_entropy(), now = jiffies, flags; - long delta, delta2, delta3; - - spin_lock_irqsave(&input_pool.lock, flags); - _mix_pool_bytes(&entropy, sizeof(entropy)); - _mix_pool_bytes(&num, sizeof(num)); - spin_unlock_irqrestore(&input_pool.lock, flags); - - if (crng_ready()) - return; - - /* - * Calculate number of bits of randomness we probably added. - * We take into account the first, second and third-order deltas - * in order to make our estimate. - */ - delta = now - READ_ONCE(state->last_time); - WRITE_ONCE(state->last_time, now); - - delta2 = delta - READ_ONCE(state->last_delta); - WRITE_ONCE(state->last_delta, delta); - - delta3 = delta2 - READ_ONCE(state->last_delta2); - WRITE_ONCE(state->last_delta2, delta2); - - if (delta < 0) - delta = -delta; - if (delta2 < 0) - delta2 = -delta2; - if (delta3 < 0) - delta3 = -delta3; - if (delta > delta2) - delta = delta2; - if (delta > delta3) - delta = delta3; - - /* - * delta is now minimum absolute delta. - * Round down by 1 bit on general principles, - * and limit entropy estimate to 12 bits. - */ - credit_init_bits(min_t(unsigned int, fls(delta >> 1), 11)); -} - -void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value) -{ - static unsigned char last_value; - static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; - - /* Ignore autorepeat and the like. */ - if (value == last_value) - return; - - last_value = value; - add_timer_randomness(&input_timer_state, - (type << 4) ^ code ^ (code >> 4) ^ value); -} -EXPORT_SYMBOL_GPL(add_input_randomness); - -#ifdef CONFIG_BLOCK -void add_disk_randomness(struct gendisk *disk) -{ - if (!disk || !disk->random) - return; - /* First major is 1, so we get >= 0x200 here. */ - add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); -} -EXPORT_SYMBOL_GPL(add_disk_randomness); - -void rand_initialize_disk(struct gendisk *disk) -{ - struct timer_rand_state *state; - - /* - * If kzalloc returns null, we just won't use that entropy - * source. - */ - state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); - if (state) { - state->last_time = INITIAL_JIFFIES; - disk->random = state; - } -} -#endif - /* * Interface for in-kernel drivers of true hardware RNGs. * Those devices may produce endless random bits and will be throttled @@ -1309,6 +1206,109 @@ void add_interrupt_randomness(int irq) } EXPORT_SYMBOL_GPL(add_interrupt_randomness); +/* There is one of these per entropy source */ +struct timer_rand_state { + unsigned long last_time; + long last_delta, last_delta2; +}; + +/* + * This function adds entropy to the entropy "pool" by using timing + * delays. It uses the timer_rand_state structure to make an estimate + * of how many bits of entropy this call has added to the pool. + * + * The number "num" is also added to the pool - it should somehow describe + * the type of event which just happened. This is currently 0-255 for + * keyboard scan codes, and 256 upwards for interrupts. + */ +static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) +{ + unsigned long entropy = random_get_entropy(), now = jiffies, flags; + long delta, delta2, delta3; + + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(&entropy, sizeof(entropy)); + _mix_pool_bytes(&num, sizeof(num)); + spin_unlock_irqrestore(&input_pool.lock, flags); + + if (crng_ready()) + return; + + /* + * Calculate number of bits of randomness we probably added. + * We take into account the first, second and third-order deltas + * in order to make our estimate. + */ + delta = now - READ_ONCE(state->last_time); + WRITE_ONCE(state->last_time, now); + + delta2 = delta - READ_ONCE(state->last_delta); + WRITE_ONCE(state->last_delta, delta); + + delta3 = delta2 - READ_ONCE(state->last_delta2); + WRITE_ONCE(state->last_delta2, delta2); + + if (delta < 0) + delta = -delta; + if (delta2 < 0) + delta2 = -delta2; + if (delta3 < 0) + delta3 = -delta3; + if (delta > delta2) + delta = delta2; + if (delta > delta3) + delta = delta3; + + /* + * delta is now minimum absolute delta. + * Round down by 1 bit on general principles, + * and limit entropy estimate to 12 bits. + */ + credit_init_bits(min_t(unsigned int, fls(delta >> 1), 11)); +} + +void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value) +{ + static unsigned char last_value; + static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; + + /* Ignore autorepeat and the like. */ + if (value == last_value) + return; + + last_value = value; + add_timer_randomness(&input_timer_state, + (type << 4) ^ code ^ (code >> 4) ^ value); +} +EXPORT_SYMBOL_GPL(add_input_randomness); + +#ifdef CONFIG_BLOCK +void add_disk_randomness(struct gendisk *disk) +{ + if (!disk || !disk->random) + return; + /* First major is 1, so we get >= 0x200 here. */ + add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); +} +EXPORT_SYMBOL_GPL(add_disk_randomness); + +void rand_initialize_disk(struct gendisk *disk) +{ + struct timer_rand_state *state; + + /* + * If kzalloc returns null, we just won't use that entropy + * source. + */ + state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); + if (state) { + state->last_time = INITIAL_JIFFIES; + disk->random = state; + } +} +#endif + struct entropy_timer_state { unsigned long entropy; struct timer_list timer; -- cgit v1.2.3 From e3e33fc2ea7fcefd0d761db9d6219f83b4248f5c Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Fri, 6 May 2022 18:30:51 +0200 Subject: random: do not use input pool from hard IRQs Years ago, a separate fast pool was added for interrupts, so that the cost associated with taking the input pool spinlocks and mixing into it would be avoided in places where latency is critical. However, one oversight was that add_input_randomness() and add_disk_randomness() still sometimes are called directly from the interrupt handler, rather than being deferred to a thread. This means that some unlucky interrupts will be caught doing a blake2s_compress() call and potentially spinning on input_pool.lock, which can also be taken by unprivileged users by writing into /dev/urandom. In order to fix this, add_timer_randomness() now checks whether it is being called from a hard IRQ and if so, just mixes into the per-cpu IRQ fast pool using fast_mix(), which is much faster and can be done lock-free. A nice consequence of this, as well, is that it means hard IRQ context FPU support is likely no longer useful. The entropy estimation algorithm used by add_timer_randomness() is also somewhat different than the one used for add_interrupt_randomness(). The former looks at deltas of deltas of deltas, while the latter just waits for 64 interrupts for one bit or for one second since the last bit. In order to bridge these, and since add_interrupt_randomness() runs after an add_timer_randomness() that's called from hard IRQ, we add to the fast pool credit the related amount, and then subtract one to account for add_interrupt_randomness()'s contribution. A downside of this, however, is that the num argument is potentially attacker controlled, which puts a bit more pressure on the fast_mix() sponge to do more than it's really intended to do. As a mitigating factor, the first 96 bits of input aren't attacker controlled (a cycle counter followed by zeros), which means it's essentially two rounds of siphash rather than one, which is somewhat better. It's also not that much different from add_interrupt_randomness()'s use of the irq stack instruction pointer register. Cc: Thomas Gleixner Cc: Filipe Manana Cc: Peter Zijlstra Cc: Borislav Petkov Cc: Theodore Ts'o Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 51 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index fcdd108c9d05..e5d9739b81f5 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1156,6 +1156,7 @@ static void mix_interrupt_randomness(struct work_struct *work) * we don't wind up "losing" some. */ unsigned long pool[2]; + unsigned int count; /* Check to see if we're running on the wrong CPU due to hotplug. */ local_irq_disable(); @@ -1169,12 +1170,13 @@ static void mix_interrupt_randomness(struct work_struct *work) * consistent view, before we reenable irqs again. */ memcpy(pool, fast_pool->pool, sizeof(pool)); + count = fast_pool->count; fast_pool->count = 0; fast_pool->last = jiffies; local_irq_enable(); mix_pool_bytes(pool, sizeof(pool)); - credit_init_bits(1); + credit_init_bits(max(1u, (count & U16_MAX) / 64)); memzero_explicit(pool, sizeof(pool)); } @@ -1214,22 +1216,30 @@ struct timer_rand_state { /* * This function adds entropy to the entropy "pool" by using timing - * delays. It uses the timer_rand_state structure to make an estimate - * of how many bits of entropy this call has added to the pool. - * - * The number "num" is also added to the pool - it should somehow describe - * the type of event which just happened. This is currently 0-255 for - * keyboard scan codes, and 256 upwards for interrupts. + * delays. It uses the timer_rand_state structure to make an estimate + * of how many bits of entropy this call has added to the pool. The + * value "num" is also added to the pool; it should somehow describe + * the type of event that just happened. */ static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) { unsigned long entropy = random_get_entropy(), now = jiffies, flags; long delta, delta2, delta3; + unsigned int bits; - spin_lock_irqsave(&input_pool.lock, flags); - _mix_pool_bytes(&entropy, sizeof(entropy)); - _mix_pool_bytes(&num, sizeof(num)); - spin_unlock_irqrestore(&input_pool.lock, flags); + /* + * If we're in a hard IRQ, add_interrupt_randomness() will be called + * sometime after, so mix into the fast pool. + */ + if (in_hardirq()) { + fast_mix(this_cpu_ptr(&irq_randomness)->pool, + (unsigned long[2]){ entropy, num }); + } else { + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(&entropy, sizeof(entropy)); + _mix_pool_bytes(&num, sizeof(num)); + spin_unlock_irqrestore(&input_pool.lock, flags); + } if (crng_ready()) return; @@ -1260,11 +1270,22 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned int nu delta = delta3; /* - * delta is now minimum absolute delta. - * Round down by 1 bit on general principles, - * and limit entropy estimate to 12 bits. + * delta is now minimum absolute delta. Round down by 1 bit + * on general principles, and limit entropy estimate to 11 bits. + */ + bits = min(fls(delta >> 1), 11); + + /* + * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness() + * will run after this, which uses a different crediting scheme of 1 bit + * per every 64 interrupts. In order to let that function do accounting + * close to the one in this function, we credit a full 64/64 bit per bit, + * and then subtract one to account for the extra one added. */ - credit_init_bits(min_t(unsigned int, fls(delta >> 1), 11)); + if (in_hardirq()) + this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1; + else + credit_init_bits(bits); } void add_input_randomness(unsigned int type, unsigned int code, -- cgit v1.2.3 From 791332b3cbb080510954a4c152ce02af8832eac9 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Fri, 6 May 2022 23:19:43 +0200 Subject: random: help compiler out with fast_mix() by using simpler arguments Now that fast_mix() has more than one caller, gcc no longer inlines it. That's fine. But it also doesn't handle the compound literal argument we pass it very efficiently, nor does it handle the loop as well as it could. So just expand the code to spell out this function so that it generates the same code as it did before. Performance-wise, this now behaves as it did before the last commit. The difference in actual code size on x86 is 45 bytes, which is less than a cache line. Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index e5d9739b81f5..8be3efd65ef2 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1101,25 +1101,30 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { * and therefore this has no security on its own. s represents the * four-word SipHash state, while v represents a two-word input. */ -static void fast_mix(unsigned long s[4], const unsigned long v[2]) +static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2) { - size_t i; - - for (i = 0; i < 2; ++i) { - s[3] ^= v[i]; #ifdef CONFIG_64BIT - s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32); - s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2]; - s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0]; - s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32); +#define PERM() do { \ + s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32); \ + s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2]; \ + s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0]; \ + s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32); \ +} while (0) #else - s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16); - s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2]; - s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0]; - s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16); +#define PERM() do { \ + s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16); \ + s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2]; \ + s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0]; \ + s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16); \ +} while (0) #endif - s[0] ^= v[i]; - } + + s[3] ^= v1; + PERM(); + s[0] ^= v1; + s[3] ^= v2; + PERM(); + s[0] ^= v2; } #ifdef CONFIG_SMP @@ -1189,10 +1194,8 @@ void add_interrupt_randomness(int irq) struct pt_regs *regs = get_irq_regs(); unsigned int new_count; - fast_mix(fast_pool->pool, (unsigned long[2]){ - entropy, - (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq) - }); + fast_mix(fast_pool->pool, entropy, + (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq)); new_count = ++fast_pool->count; if (new_count & MIX_INFLIGHT) @@ -1232,8 +1235,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned int nu * sometime after, so mix into the fast pool. */ if (in_hardirq()) { - fast_mix(this_cpu_ptr(&irq_randomness)->pool, - (unsigned long[2]){ entropy, num }); + fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num); } else { spin_lock_irqsave(&input_pool.lock, flags); _mix_pool_bytes(&entropy, sizeof(entropy)); -- cgit v1.2.3 From e73aaae2fa9024832e1f42e30c787c7baf61d014 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sat, 7 May 2022 14:03:46 +0200 Subject: siphash: use one source of truth for siphash permutations The SipHash family of permutations is currently used in three places: - siphash.c itself, used in the ordinary way it was intended. - random32.c, in a construction from an anonymous contributor. - random.c, as part of its fast_mix function. Each one of these places reinvents the wheel with the same C code, same rotation constants, and same symmetry-breaking constants. This commit tidies things up a bit by placing macros for the permutations and constants into siphash.h, where each of the three .c users can access them. It also leaves a note dissuading more users of them from emerging. Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 30 +++++++----------------------- include/linux/prandom.h | 23 +++++++---------------- include/linux/siphash.h | 28 ++++++++++++++++++++++++++++ lib/siphash.c | 32 ++++++++++---------------------- 4 files changed, 52 insertions(+), 61 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 8be3efd65ef2..c1763bcbcaed 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include #include @@ -1086,12 +1087,11 @@ struct fast_pool { static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { #ifdef CONFIG_64BIT - /* SipHash constants */ - .pool = { 0x736f6d6570736575UL, 0x646f72616e646f6dUL, - 0x6c7967656e657261UL, 0x7465646279746573UL } +#define FASTMIX_PERM SIPHASH_PERMUTATION + .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 } #else - /* HalfSipHash constants */ - .pool = { 0, 0, 0x6c796765U, 0x74656462U } +#define FASTMIX_PERM HSIPHASH_PERMUTATION + .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 } #endif }; @@ -1103,27 +1103,11 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { */ static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2) { -#ifdef CONFIG_64BIT -#define PERM() do { \ - s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32); \ - s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2]; \ - s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0]; \ - s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32); \ -} while (0) -#else -#define PERM() do { \ - s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16); \ - s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2]; \ - s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0]; \ - s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16); \ -} while (0) -#endif - s[3] ^= v1; - PERM(); + FASTMIX_PERM(s[0], s[1], s[2], s[3]); s[0] ^= v1; s[3] ^= v2; - PERM(); + FASTMIX_PERM(s[0], s[1], s[2], s[3]); s[0] ^= v2; } diff --git a/include/linux/prandom.h b/include/linux/prandom.h index 056d31317e49..a4aadd2dc153 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -10,6 +10,7 @@ #include #include +#include u32 prandom_u32(void); void prandom_bytes(void *buf, size_t nbytes); @@ -27,15 +28,10 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise); * The core SipHash round function. Each line can be executed in * parallel given enough CPU resources. */ -#define PRND_SIPROUND(v0, v1, v2, v3) ( \ - v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \ - v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \ - v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \ - v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \ -) +#define PRND_SIPROUND(v0, v1, v2, v3) SIPHASH_PERMUTATION(v0, v1, v2, v3) -#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261) -#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573) +#define PRND_K0 (SIPHASH_CONST_0 ^ SIPHASH_CONST_2) +#define PRND_K1 (SIPHASH_CONST_1 ^ SIPHASH_CONST_3) #elif BITS_PER_LONG == 32 /* @@ -43,14 +39,9 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise); * This is weaker, but 32-bit machines are not used for high-traffic * applications, so there is less output for an attacker to analyze. */ -#define PRND_SIPROUND(v0, v1, v2, v3) ( \ - v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \ - v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \ - v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \ - v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \ -) -#define PRND_K0 0x6c796765 -#define PRND_K1 0x74656462 +#define PRND_SIPROUND(v0, v1, v2, v3) HSIPHASH_PERMUTATION(v0, v1, v2, v3) +#define PRND_K0 (HSIPHASH_CONST_0 ^ HSIPHASH_CONST_2) +#define PRND_K1 (HSIPHASH_CONST_1 ^ HSIPHASH_CONST_3) #else #error Unsupported BITS_PER_LONG diff --git a/include/linux/siphash.h b/include/linux/siphash.h index cce8a9acc76c..3af1428da559 100644 --- a/include/linux/siphash.h +++ b/include/linux/siphash.h @@ -138,4 +138,32 @@ static inline u32 hsiphash(const void *data, size_t len, return ___hsiphash_aligned(data, len, key); } +/* + * These macros expose the raw SipHash and HalfSipHash permutations. + * Do not use them directly! If you think you have a use for them, + * be sure to CC the maintainer of this file explaining why. + */ + +#define SIPHASH_PERMUTATION(a, b, c, d) ( \ + (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \ + (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \ + (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \ + (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32)) + +#define SIPHASH_CONST_0 0x736f6d6570736575ULL +#define SIPHASH_CONST_1 0x646f72616e646f6dULL +#define SIPHASH_CONST_2 0x6c7967656e657261ULL +#define SIPHASH_CONST_3 0x7465646279746573ULL + +#define HSIPHASH_PERMUTATION(a, b, c, d) ( \ + (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \ + (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \ + (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \ + (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16)) + +#define HSIPHASH_CONST_0 0U +#define HSIPHASH_CONST_1 0U +#define HSIPHASH_CONST_2 0x6c796765U +#define HSIPHASH_CONST_3 0x74656462U + #endif /* _LINUX_SIPHASH_H */ diff --git a/lib/siphash.c b/lib/siphash.c index 72b9068ab57b..71d315a6ad62 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -18,19 +18,13 @@ #include #endif -#define SIPROUND \ - do { \ - v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ - v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ - v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ - v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ - } while (0) +#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3) #define PREAMBLE(len) \ - u64 v0 = 0x736f6d6570736575ULL; \ - u64 v1 = 0x646f72616e646f6dULL; \ - u64 v2 = 0x6c7967656e657261ULL; \ - u64 v3 = 0x7465646279746573ULL; \ + u64 v0 = SIPHASH_CONST_0; \ + u64 v1 = SIPHASH_CONST_1; \ + u64 v2 = SIPHASH_CONST_2; \ + u64 v3 = SIPHASH_CONST_3; \ u64 b = ((u64)(len)) << 56; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ @@ -389,19 +383,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, } EXPORT_SYMBOL(hsiphash_4u32); #else -#define HSIPROUND \ - do { \ - v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \ - v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \ - v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \ - v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \ - } while (0) +#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3) #define HPREAMBLE(len) \ - u32 v0 = 0; \ - u32 v1 = 0; \ - u32 v2 = 0x6c796765U; \ - u32 v3 = 0x74656462U; \ + u32 v0 = HSIPHASH_CONST_0; \ + u32 v1 = HSIPHASH_CONST_1; \ + u32 v2 = HSIPHASH_CONST_2; \ + u32 v3 = HSIPHASH_CONST_3; \ u32 b = ((u32)(len)) << 24; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ -- cgit v1.2.3 From e3d2c5e79a999aa4e7d6f0127e16d3da5a4ff70d Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sun, 8 May 2022 13:20:30 +0200 Subject: random: use symbolic constants for crng_init states crng_init represents a state machine, with three states, and various rules for transitions. For the longest time, we've been managing these with "0", "1", and "2", and expecting people to figure it out. To make the code more obvious, replace these with proper enum values representing the transition, and then redocument what each of these states mean. Reviewed-by: Dominik Brodowski Cc: Joe Perches Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index c1763bcbcaed..0bb3d7cf33e5 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -71,16 +71,16 @@ *********************************************************************/ /* - * crng_init = 0 --> Uninitialized - * 1 --> Initialized - * 2 --> Initialized from input_pool - * * crng_init is protected by base_crng->lock, and only increases - * its value (from 0->1->2). + * its value (from empty->early->ready). */ -static int crng_init = 0; -#define crng_ready() (likely(crng_init > 1)) -/* Various types of waiters for crng_init->2 transition. */ +static enum { + CRNG_EMPTY = 0, /* Little to no entropy collected */ + CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */ + CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */ +} crng_init = CRNG_EMPTY; +#define crng_ready() (likely(crng_init >= CRNG_READY)) +/* Various types of waiters for crng_init->CRNG_READY transition. */ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); static struct fasync_struct *fasync; static DEFINE_SPINLOCK(random_ready_chain_lock); @@ -283,7 +283,7 @@ static void crng_reseed(void) WRITE_ONCE(base_crng.generation, next_gen); WRITE_ONCE(base_crng.birth, jiffies); if (!crng_ready()) { - crng_init = 2; + crng_init = CRNG_READY; finalize_init = true; } spin_unlock_irqrestore(&base_crng.lock, flags); @@ -377,7 +377,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], * For the fast path, we check whether we're ready, unlocked first, and * then re-check once locked later. In the case where we're really not * ready, we do fast key erasure with the base_crng directly, extracting - * when crng_init==0. + * when crng_init is CRNG_EMPTY. */ if (!crng_ready()) { bool ready; @@ -385,7 +385,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], spin_lock_irqsave(&base_crng.lock, flags); ready = crng_ready(); if (!ready) { - if (crng_init == 0) + if (crng_init == CRNG_EMPTY) extract_entropy(base_crng.key, sizeof(base_crng.key)); crng_fast_key_erasure(base_crng.key, chacha_state, random_data, random_data_len); @@ -739,8 +739,8 @@ EXPORT_SYMBOL(get_random_bytes_arch); enum { POOL_BITS = BLAKE2S_HASH_SIZE * 8, - POOL_INIT_BITS = POOL_BITS, /* No point in settling for less. */ - POOL_FAST_INIT_BITS = POOL_INIT_BITS / 2 + POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */ + POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */ }; static struct { @@ -835,13 +835,13 @@ static void credit_init_bits(size_t nbits) init_bits = min_t(unsigned int, POOL_BITS, orig + add); } while (cmpxchg(&input_pool.init_bits, orig, init_bits) != orig); - if (!crng_ready() && init_bits >= POOL_INIT_BITS) + if (!crng_ready() && init_bits >= POOL_READY_BITS) crng_reseed(); - else if (unlikely(crng_init == 0 && init_bits >= POOL_FAST_INIT_BITS)) { + else if (unlikely(crng_init == CRNG_EMPTY && init_bits >= POOL_EARLY_BITS)) { spin_lock_irqsave(&base_crng.lock, flags); - if (crng_init == 0) { + if (crng_init == CRNG_EMPTY) { extract_entropy(base_crng.key, sizeof(base_crng.key)); - crng_init = 1; + crng_init = CRNG_EARLY; } spin_unlock_irqrestore(&base_crng.lock, flags); } @@ -1610,7 +1610,7 @@ const struct file_operations urandom_fops = { * * - write_wakeup_threshold - the amount of entropy in the input pool * below which write polls to /dev/random will unblock, requesting - * more entropy, tied to the POOL_INIT_BITS constant. It is writable + * more entropy, tied to the POOL_READY_BITS constant. It is writable * to avoid breaking old userspaces, but writing to it does not * change any behavior of the RNG. * @@ -1625,7 +1625,7 @@ const struct file_operations urandom_fops = { #include static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ; -static int sysctl_random_write_wakeup_bits = POOL_INIT_BITS; +static int sysctl_random_write_wakeup_bits = POOL_READY_BITS; static int sysctl_poolsize = POOL_BITS; static u8 sysctl_bootid[UUID_SIZE]; -- cgit v1.2.3 From fed7ef061686cc813b1f3d8d0edc6c35b4d3537b Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Mon, 9 May 2022 13:40:55 +0200 Subject: random: avoid initializing twice in credit race Since all changes of crng_init now go through credit_init_bits(), we can fix a long standing race in which two concurrent callers of credit_init_bits() have the new bit count >= some threshold, but are doing so with crng_init as a lower threshold, checked outside of a lock, resulting in crng_reseed() or similar being called twice. In order to fix this, we can use the original cmpxchg value of the bit count, and only change crng_init when the bit count transitions from below a threshold to meeting the threshold. Reviewed-by: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 0bb3d7cf33e5..8b451e8490b5 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -822,7 +822,7 @@ static void extract_entropy(void *buf, size_t nbytes) static void credit_init_bits(size_t nbits) { - unsigned int init_bits, orig, add; + unsigned int new, orig, add; unsigned long flags; if (crng_ready() || !nbits) @@ -832,12 +832,12 @@ static void credit_init_bits(size_t nbits) do { orig = READ_ONCE(input_pool.init_bits); - init_bits = min_t(unsigned int, POOL_BITS, orig + add); - } while (cmpxchg(&input_pool.init_bits, orig, init_bits) != orig); + new = min_t(unsigned int, POOL_BITS, orig + add); + } while (cmpxchg(&input_pool.init_bits, orig, new) != orig); - if (!crng_ready() && init_bits >= POOL_READY_BITS) + if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) crng_reseed(); - else if (unlikely(crng_init == CRNG_EMPTY && init_bits >= POOL_EARLY_BITS)) { + else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { spin_lock_irqsave(&base_crng.lock, flags); if (crng_init == CRNG_EMPTY) { extract_entropy(base_crng.key, sizeof(base_crng.key)); -- cgit v1.2.3 From 68c9c8b192c6dae9be6278e98ee44029d5da2d31 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Mon, 9 May 2022 13:53:24 +0200 Subject: random: move initialization out of reseeding hot path Initialization happens once -- by way of credit_init_bits() -- and then it never happens again. Therefore, it doesn't need to be in crng_reseed(), which is a hot path that is called multiple times. It also doesn't make sense to have there, as initialization activity is better associated with initialization routines. After the prior commit, crng_reseed() now won't be called by multiple concurrent callers, which means that we can safely move the "finialize_init" logic into crng_init_bits() unconditionally. Reviewed-by: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 42 +++++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 8b451e8490b5..07200b8f4591 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -265,7 +265,6 @@ static void crng_reseed(void) unsigned long flags; unsigned long next_gen; u8 key[CHACHA_KEY_SIZE]; - bool finalize_init = false; extract_entropy(key, sizeof(key)); @@ -282,28 +281,10 @@ static void crng_reseed(void) ++next_gen; WRITE_ONCE(base_crng.generation, next_gen); WRITE_ONCE(base_crng.birth, jiffies); - if (!crng_ready()) { + if (!crng_ready()) crng_init = CRNG_READY; - finalize_init = true; - } spin_unlock_irqrestore(&base_crng.lock, flags); memzero_explicit(key, sizeof(key)); - if (finalize_init) { - process_random_ready_list(); - wake_up_interruptible(&crng_init_wait); - kill_fasync(&fasync, SIGIO, POLL_IN); - pr_notice("crng init done\n"); - if (unseeded_warning.missed) { - pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", - unseeded_warning.missed); - unseeded_warning.missed = 0; - } - if (urandom_warning.missed) { - pr_notice("%d urandom warning(s) missed due to ratelimiting\n", - urandom_warning.missed); - urandom_warning.missed = 0; - } - } } /* @@ -835,10 +816,25 @@ static void credit_init_bits(size_t nbits) new = min_t(unsigned int, POOL_BITS, orig + add); } while (cmpxchg(&input_pool.init_bits, orig, new) != orig); - if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) - crng_reseed(); - else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { + if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { + crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ + process_random_ready_list(); + wake_up_interruptible(&crng_init_wait); + kill_fasync(&fasync, SIGIO, POLL_IN); + pr_notice("crng init done\n"); + if (unseeded_warning.missed) { + pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", + unseeded_warning.missed); + unseeded_warning.missed = 0; + } + if (urandom_warning.missed) { + pr_notice("%d urandom warning(s) missed due to ratelimiting\n", + urandom_warning.missed); + urandom_warning.missed = 0; + } + } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { spin_lock_irqsave(&base_crng.lock, flags); + /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ if (crng_init == CRNG_EMPTY) { extract_entropy(base_crng.key, sizeof(base_crng.key)); crng_init = CRNG_EARLY; -- cgit v1.2.3 From cc1e127bfa95b5fb2f9307e7168bf8b2b45b4c5e Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Mon, 9 May 2022 16:13:18 +0200 Subject: random: remove ratelimiting for in-kernel unseeded randomness The CONFIG_WARN_ALL_UNSEEDED_RANDOM debug option controls whether the kernel warns about all unseeded randomness or just the first instance. There's some complicated rate limiting and comparison to the previous caller, such that even with CONFIG_WARN_ALL_UNSEEDED_RANDOM enabled, developers still don't see all the messages or even an accurate count of how many were missed. This is the result of basically parallel mechanisms aimed at accomplishing more or less the same thing, added at different points in random.c history, which sort of compete with the first-instance-only limiting we have now. It turns out, however, that nobody cares about the first unseeded randomness instance of in-kernel users. The same first user has been there for ages now, and nobody is doing anything about it. It isn't even clear that anybody _can_ do anything about it. Most places that can do something about it have switched over to using get_random_bytes_wait() or wait_for_random_bytes(), which is the right thing to do, but there is still much code that needs randomness sometimes during init, and as a geeneral rule, if you're not using one of the _wait functions or the readiness notifier callback, you're bound to be doing it wrong just based on that fact alone. So warning about this same first user that can't easily change is simply not an effective mechanism for anything at all. Users can't do anything about it, as the Kconfig text points out -- the problem isn't in userspace code -- and kernel developers don't or more often can't react to it. Instead, show the warning for all instances when CONFIG_WARN_ALL_UNSEEDED_RANDOM is set, so that developers can debug things need be, or if it isn't set, don't show a warning at all. At the same time, CONFIG_WARN_ALL_UNSEEDED_RANDOM now implies setting random.ratelimit_disable=1 on by default, since if you care about one you probably care about the other too. And we can clean up usage around the related urandom_warning ratelimiter as well (whose behavior isn't changing), so that it properly counts missed messages after the 10 message threshold is reached. Cc: Theodore Ts'o Cc: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 62 +++++++++++++++------------------------------------ lib/Kconfig.debug | 3 +-- 2 files changed, 19 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 07200b8f4591..3860d534cf05 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -87,11 +87,10 @@ static DEFINE_SPINLOCK(random_ready_chain_lock); static RAW_NOTIFIER_HEAD(random_ready_chain); /* Control how we warn userspace. */ -static struct ratelimit_state unseeded_warning = - RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); static struct ratelimit_state urandom_warning = RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); -static int ratelimit_disable __read_mostly; +static int ratelimit_disable __read_mostly = + IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM); module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); @@ -182,27 +181,15 @@ static void process_random_ready_list(void) spin_unlock_irqrestore(&random_ready_chain_lock, flags); } -#define warn_unseeded_randomness(previous) \ - _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous)) +#define warn_unseeded_randomness() \ + _warn_unseeded_randomness(__func__, (void *)_RET_IP_) -static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous) +static void _warn_unseeded_randomness(const char *func_name, void *caller) { -#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM - const bool print_once = false; -#else - static bool print_once __read_mostly; -#endif - - if (print_once || crng_ready() || - (previous && (caller == READ_ONCE(*previous)))) + if (!IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) || crng_ready()) return; - WRITE_ONCE(*previous, caller); -#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM - print_once = true; -#endif - if (__ratelimit(&unseeded_warning)) - printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", - func_name, caller, crng_init); + printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", + func_name, caller, crng_init); } @@ -455,9 +442,7 @@ static void _get_random_bytes(void *buf, size_t nbytes) */ void get_random_bytes(void *buf, size_t nbytes) { - static void *previous; - - warn_unseeded_randomness(&previous); + warn_unseeded_randomness(); _get_random_bytes(buf, nbytes); } EXPORT_SYMBOL(get_random_bytes); @@ -553,10 +538,9 @@ u64 get_random_u64(void) u64 ret; unsigned long flags; struct batched_entropy *batch; - static void *previous; unsigned long next_gen; - warn_unseeded_randomness(&previous); + warn_unseeded_randomness(); if (!crng_ready()) { _get_random_bytes(&ret, sizeof(ret)); @@ -592,10 +576,9 @@ u32 get_random_u32(void) u32 ret; unsigned long flags; struct batched_entropy *batch; - static void *previous; unsigned long next_gen; - warn_unseeded_randomness(&previous); + warn_unseeded_randomness(); if (!crng_ready()) { _get_random_bytes(&ret, sizeof(ret)); @@ -822,16 +805,9 @@ static void credit_init_bits(size_t nbits) wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); pr_notice("crng init done\n"); - if (unseeded_warning.missed) { - pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", - unseeded_warning.missed); - unseeded_warning.missed = 0; - } - if (urandom_warning.missed) { + if (urandom_warning.missed) pr_notice("%d urandom warning(s) missed due to ratelimiting\n", urandom_warning.missed); - urandom_warning.missed = 0; - } } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { spin_lock_irqsave(&base_crng.lock, flags); /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ @@ -976,11 +952,6 @@ int __init rand_initialize(void) else if (arch_init && trust_cpu) credit_init_bits(BLAKE2S_BLOCK_SIZE * 8); - if (ratelimit_disable) { - urandom_warning.interval = 0; - unseeded_warning.interval = 0; - } - WARN_ON(register_pm_notifier(&pm_notifier)); WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG " @@ -1487,11 +1458,14 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, if (!crng_ready()) try_to_generate_entropy(); - if (!crng_ready() && maxwarn > 0) { - maxwarn--; - if (__ratelimit(&urandom_warning)) + if (!crng_ready()) { + if (!ratelimit_disable && maxwarn <= 0) + ++urandom_warning.missed; + else if (ratelimit_disable || __ratelimit(&urandom_warning)) { + --maxwarn; pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", current->comm, nbytes); + } } return get_random_bytes_user(buf, nbytes); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 075cd25363ac..7e282970177a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1616,8 +1616,7 @@ config WARN_ALL_UNSEEDED_RANDOM so architecture maintainers really need to do what they can to get the CRNG seeded sooner after the system is booted. However, since users cannot do anything actionable to - address this, by default the kernel will issue only a single - warning for the first use of unseeded randomness. + address this, by default this option is disabled. Say Y here if you want to receive warnings for all uses of unseeded randomness. This will be of use primarily for -- cgit v1.2.3 From 8a5b8a4a4ceb353b4dd5bafd09e2b15751bcdb51 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Tue, 10 May 2022 15:20:42 +0200 Subject: random: use proper jiffies comparison macro This expands to exactly the same code that it replaces, but makes things consistent by using the same macro for jiffy comparisons throughout. Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 3860d534cf05..9024aeba2d28 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -325,7 +325,7 @@ static bool crng_has_old_seed(void) interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL, (unsigned int)uptime / 2 * HZ); } - return time_after(jiffies, READ_ONCE(base_crng.birth) + interval); + return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval); } /* -- cgit v1.2.3 From 2f14062bb14b0fcfcc21e6dc7d5b5c0d25966164 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Thu, 5 May 2022 02:20:22 +0200 Subject: random: handle latent entropy and command line from random_init() Currently, start_kernel() adds latent entropy and the command line to the entropy bool *after* the RNG has been initialized, deferring when it's actually used by things like stack canaries until the next time the pool is seeded. This surely is not intended. Rather than splitting up which entropy gets added where and when between start_kernel() and random_init(), just do everything in random_init(), which should eliminate these kinds of bugs in the future. While we're at it, rename the awkwardly titled "rand_initialize()" to the more standard "random_init()" nomenclature. Reviewed-by: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- arch/openrisc/kernel/head.S | 2 +- drivers/char/random.c | 17 ++++++++++------- include/linux/random.h | 15 +++++++-------- init/main.c | 10 +++------- 4 files changed, 21 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S index 871f4c858859..2fa6cefa62ca 100644 --- a/arch/openrisc/kernel/head.S +++ b/arch/openrisc/kernel/head.S @@ -525,7 +525,7 @@ _start: * Start the TTCR as early as possible, so that the RNG can make use of * measurements of boot time from the earliest opportunity. Especially * important is that the TTCR does not return zero by the time we reach - * rand_initialize(). + * random_init(). */ l.movhi r3,hi(SPR_TTMR_CR) l.mtspr r0,r3,SPR_TTMR diff --git a/drivers/char/random.c b/drivers/char/random.c index 9024aeba2d28..95982dc08669 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -919,12 +919,13 @@ static struct notifier_block pm_notifier = { .notifier_call = random_pm_notifica /* * The first collection of entropy occurs at system boot while interrupts - * are still turned off. Here we push in RDSEED, a timestamp, and utsname(). - * Depending on the above configuration knob, RDSEED may be considered - * sufficient for initialization. Note that much earlier setup may already - * have pushed entropy into the input pool by the time we get here. + * are still turned off. Here we push in latent entropy, RDSEED, a timestamp, + * utsname(), and the command line. Depending on the above configuration knob, + * RDSEED may be considered sufficient for initialization. Note that much + * earlier setup may already have pushed entropy into the input pool by the + * time we get here. */ -int __init rand_initialize(void) +int __init random_init(const char *command_line) { size_t i; ktime_t now = ktime_get_real(); @@ -946,6 +947,8 @@ int __init rand_initialize(void) } _mix_pool_bytes(&now, sizeof(now)); _mix_pool_bytes(utsname(), sizeof(*(utsname()))); + _mix_pool_bytes(command_line, strlen(command_line)); + add_latent_entropy(); if (crng_ready()) crng_reseed(); @@ -1685,8 +1688,8 @@ static struct ctl_table random_table[] = { }; /* - * rand_initialize() is called before sysctl_init(), - * so we cannot call register_sysctl_init() in rand_initialize() + * random_init() is called before sysctl_init(), + * so we cannot call register_sysctl_init() in random_init() */ static int __init random_sysctls_init(void) { diff --git a/include/linux/random.h b/include/linux/random.h index f673fbb838b3..b52963955a99 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -14,22 +14,21 @@ struct notifier_block; extern void add_device_randomness(const void *, size_t); extern void add_bootloader_randomness(const void *, size_t); +extern void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value) __latent_entropy; +extern void add_interrupt_randomness(int irq) __latent_entropy; +extern void add_hwgenerator_randomness(const void *buffer, size_t count, + size_t entropy); #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) static inline void add_latent_entropy(void) { - add_device_randomness((const void *)&latent_entropy, - sizeof(latent_entropy)); + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); } #else static inline void add_latent_entropy(void) {} #endif -extern void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value) __latent_entropy; -extern void add_interrupt_randomness(int irq) __latent_entropy; -extern void add_hwgenerator_randomness(const void *buffer, size_t count, - size_t entropy); #if IS_ENABLED(CONFIG_VMGENID) extern void add_vmfork_randomness(const void *unique_vm_id, size_t size); extern int register_random_vmfork_notifier(struct notifier_block *nb); @@ -41,7 +40,7 @@ static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); -extern int __init rand_initialize(void); +extern int __init random_init(const char *command_line); extern bool rng_is_initialized(void); extern int register_random_ready_notifier(struct notifier_block *nb); extern int unregister_random_ready_notifier(struct notifier_block *nb); diff --git a/init/main.c b/init/main.c index 92783732a36f..f057c49f1d9d 100644 --- a/init/main.c +++ b/init/main.c @@ -1040,15 +1040,11 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) /* * For best initial stack canary entropy, prepare it after: * - setup_arch() for any UEFI RNG entropy and boot cmdline access - * - timekeeping_init() for ktime entropy used in rand_initialize() + * - timekeeping_init() for ktime entropy used in random_init() * - time_init() for making random_get_entropy() work on some platforms - * - rand_initialize() to get any arch-specific entropy like RDRAND - * - add_latent_entropy() to get any latent entropy - * - adding command line entropy + * - random_init() to initialize the RNG from from early entropy sources */ - rand_initialize(); - add_latent_entropy(); - add_device_randomness(command_line, strlen(command_line)); + random_init(command_line); boot_init_stack_canary(); perf_event_init(); -- cgit v1.2.3 From 12e45a2a6308105469968951e6d563e8f4fea187 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Thu, 12 May 2022 15:32:26 +0200 Subject: random: credit architectural init the exact amount RDRAND and RDSEED can fail sometimes, which is fine. We currently initialize the RNG with 512 bits of RDRAND/RDSEED. We only need 256 bits of those to succeed in order to initialize the RNG. Instead of the current "all or nothing" approach, actually credit these contributions the amount that is actually contributed. Reviewed-by: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 95982dc08669..29062595b610 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -927,9 +927,8 @@ static struct notifier_block pm_notifier = { .notifier_call = random_pm_notifica */ int __init random_init(const char *command_line) { - size_t i; ktime_t now = ktime_get_real(); - bool arch_init = true; + unsigned int i, arch_bytes; unsigned long rv; #if defined(LATENT_ENTROPY_PLUGIN) @@ -937,11 +936,12 @@ int __init random_init(const char *command_line) _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); #endif - for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) { + for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE; + i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) { if (!arch_get_random_seed_long_early(&rv) && !arch_get_random_long_early(&rv)) { rv = random_get_entropy(); - arch_init = false; + arch_bytes -= sizeof(rv); } _mix_pool_bytes(&rv, sizeof(rv)); } @@ -952,8 +952,8 @@ int __init random_init(const char *command_line) if (crng_ready()) crng_reseed(); - else if (arch_init && trust_cpu) - credit_init_bits(BLAKE2S_BLOCK_SIZE * 8); + else if (trust_cpu) + credit_init_bits(arch_bytes * 8); WARN_ON(register_pm_notifier(&pm_notifier)); -- cgit v1.2.3 From f5bda35fba615ace70a656d4700423fa6c9bebee Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Tue, 3 May 2022 15:30:45 +0200 Subject: random: use static branch for crng_ready() Since crng_ready() is only false briefly during initialization and then forever after becomes true, we don't need to evaluate it after, making it a prime candidate for a static branch. One complication, however, is that it changes state in a particular call to credit_init_bits(), which might be made from atomic context, which means we must kick off a workqueue to change the static key. Further complicating things, credit_init_bits() may be called sufficiently early on in system initialization such that system_wq is NULL. Fortunately, there exists the nice function execute_in_process_context(), which will immediately execute the function if !in_interrupt(), and otherwise defer it to a workqueue. During early init, before workqueues are available, in_interrupt() is always false, because interrupts haven't even been enabled yet, which means the function in that case executes immediately. Later on, after workqueues are available, in_interrupt() might be true, but in that case, the work is queued in system_wq and all goes well. Cc: Theodore Ts'o Cc: Sultan Alsawaf Reviewed-by: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 29062595b610..b2cfd659bae5 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -78,8 +78,9 @@ static enum { CRNG_EMPTY = 0, /* Little to no entropy collected */ CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */ CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */ -} crng_init = CRNG_EMPTY; -#define crng_ready() (likely(crng_init >= CRNG_READY)) +} crng_init __read_mostly = CRNG_EMPTY; +static DEFINE_STATIC_KEY_FALSE(crng_is_ready); +#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY) /* Various types of waiters for crng_init->CRNG_READY transition. */ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); static struct fasync_struct *fasync; @@ -109,6 +110,11 @@ bool rng_is_initialized(void) } EXPORT_SYMBOL(rng_is_initialized); +static void crng_set_ready(struct work_struct *work) +{ + static_branch_enable(&crng_is_ready); +} + /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */ static void try_to_generate_entropy(void); @@ -268,7 +274,7 @@ static void crng_reseed(void) ++next_gen; WRITE_ONCE(base_crng.generation, next_gen); WRITE_ONCE(base_crng.birth, jiffies); - if (!crng_ready()) + if (!static_branch_likely(&crng_is_ready)) crng_init = CRNG_READY; spin_unlock_irqrestore(&base_crng.lock, flags); memzero_explicit(key, sizeof(key)); @@ -786,6 +792,7 @@ static void extract_entropy(void *buf, size_t nbytes) static void credit_init_bits(size_t nbits) { + static struct execute_work set_ready; unsigned int new, orig, add; unsigned long flags; @@ -801,6 +808,7 @@ static void credit_init_bits(size_t nbits) if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ + execute_in_process_context(crng_set_ready, &set_ready); process_random_ready_list(); wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); @@ -1396,7 +1404,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, if (count > INT_MAX) count = INT_MAX; - if (!(flags & GRND_INSECURE) && !crng_ready()) { + if (!crng_ready() && !(flags & GRND_INSECURE)) { int ret; if (flags & GRND_NONBLOCK) -- cgit v1.2.3 From a19402634c435a4eae226df53c141cdbb9922e7b Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Fri, 13 May 2022 13:18:46 +0200 Subject: random: make consistent use of buf and len The current code was a mix of "nbytes", "count", "size", "buffer", "in", and so forth. Instead, let's clean this up by naming input parameters "buf" (or "ubuf") and "len", so that you always understand that you're reading this variety of function argument. Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 199 ++++++++++++++++++++++++------------------------- include/linux/random.h | 12 +-- 2 files changed, 103 insertions(+), 108 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index b2cfd659bae5..0475ed69aa34 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -209,7 +209,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller) * * There are a few exported interfaces for use by other drivers: * - * void get_random_bytes(void *buf, size_t nbytes) + * void get_random_bytes(void *buf, size_t len) * u32 get_random_u32() * u64 get_random_u64() * unsigned int get_random_int() @@ -250,7 +250,7 @@ static DEFINE_PER_CPU(struct crng, crngs) = { }; /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */ -static void extract_entropy(void *buf, size_t nbytes); +static void extract_entropy(void *buf, size_t len); /* This extracts a new crng key from the input pool. */ static void crng_reseed(void) @@ -404,24 +404,24 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], local_unlock_irqrestore(&crngs.lock, flags); } -static void _get_random_bytes(void *buf, size_t nbytes) +static void _get_random_bytes(void *buf, size_t len) { u32 chacha_state[CHACHA_STATE_WORDS]; u8 tmp[CHACHA_BLOCK_SIZE]; - size_t len; + size_t first_block_len; - if (!nbytes) + if (!len) return; - len = min_t(size_t, 32, nbytes); - crng_make_state(chacha_state, buf, len); - nbytes -= len; - buf += len; + first_block_len = min_t(size_t, 32, len); + crng_make_state(chacha_state, buf, first_block_len); + len -= first_block_len; + buf += first_block_len; - while (nbytes) { - if (nbytes < CHACHA_BLOCK_SIZE) { + while (len) { + if (len < CHACHA_BLOCK_SIZE) { chacha20_block(chacha_state, tmp); - memcpy(buf, tmp, nbytes); + memcpy(buf, tmp, len); memzero_explicit(tmp, sizeof(tmp)); break; } @@ -429,7 +429,7 @@ static void _get_random_bytes(void *buf, size_t nbytes) chacha20_block(chacha_state, buf); if (unlikely(chacha_state[12] == 0)) ++chacha_state[13]; - nbytes -= CHACHA_BLOCK_SIZE; + len -= CHACHA_BLOCK_SIZE; buf += CHACHA_BLOCK_SIZE; } @@ -446,20 +446,20 @@ static void _get_random_bytes(void *buf, size_t nbytes) * wait_for_random_bytes() should be called and return 0 at least once * at any point prior. */ -void get_random_bytes(void *buf, size_t nbytes) +void get_random_bytes(void *buf, size_t len) { warn_unseeded_randomness(); - _get_random_bytes(buf, nbytes); + _get_random_bytes(buf, len); } EXPORT_SYMBOL(get_random_bytes); -static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) +static ssize_t get_random_bytes_user(void __user *ubuf, size_t len) { - size_t len, left, ret = 0; + size_t block_len, left, ret = 0; u32 chacha_state[CHACHA_STATE_WORDS]; u8 output[CHACHA_BLOCK_SIZE]; - if (!nbytes) + if (!len) return 0; /* @@ -473,8 +473,8 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) * use chacha_state after, so we can simply return those bytes to * the user directly. */ - if (nbytes <= CHACHA_KEY_SIZE) { - ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes); + if (len <= CHACHA_KEY_SIZE) { + ret = len - copy_to_user(ubuf, &chacha_state[4], len); goto out_zero_chacha; } @@ -483,17 +483,17 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) if (unlikely(chacha_state[12] == 0)) ++chacha_state[13]; - len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE); - left = copy_to_user(buf, output, len); + block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE); + left = copy_to_user(ubuf, output, block_len); if (left) { - ret += len - left; + ret += block_len - left; break; } - buf += len; - ret += len; - nbytes -= len; - if (!nbytes) + ubuf += block_len; + ret += block_len; + len -= block_len; + if (!len) break; BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0); @@ -667,24 +667,24 @@ unsigned long randomize_page(unsigned long start, unsigned long range) * use. Use get_random_bytes() instead. It returns the number of * bytes filled in. */ -size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes) +size_t __must_check get_random_bytes_arch(void *buf, size_t len) { - size_t left = nbytes; + size_t left = len; u8 *p = buf; while (left) { unsigned long v; - size_t chunk = min_t(size_t, left, sizeof(unsigned long)); + size_t block_len = min_t(size_t, left, sizeof(unsigned long)); if (!arch_get_random_long(&v)) break; - memcpy(p, &v, chunk); - p += chunk; - left -= chunk; + memcpy(p, &v, block_len); + p += block_len; + left -= block_len; } - return nbytes - left; + return len - left; } EXPORT_SYMBOL(get_random_bytes_arch); @@ -695,15 +695,15 @@ EXPORT_SYMBOL(get_random_bytes_arch); * * Callers may add entropy via: * - * static void mix_pool_bytes(const void *in, size_t nbytes) + * static void mix_pool_bytes(const void *buf, size_t len) * * After which, if added entropy should be credited: * - * static void credit_init_bits(size_t nbits) + * static void credit_init_bits(size_t bits) * * Finally, extract entropy via: * - * static void extract_entropy(void *buf, size_t nbytes) + * static void extract_entropy(void *buf, size_t len) * **********************************************************************/ @@ -725,9 +725,9 @@ static struct { .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), }; -static void _mix_pool_bytes(const void *in, size_t nbytes) +static void _mix_pool_bytes(const void *buf, size_t len) { - blake2s_update(&input_pool.hash, in, nbytes); + blake2s_update(&input_pool.hash, buf, len); } /* @@ -735,12 +735,12 @@ static void _mix_pool_bytes(const void *in, size_t nbytes) * update the initialization bit counter; the caller should call * credit_init_bits if this is appropriate. */ -static void mix_pool_bytes(const void *in, size_t nbytes) +static void mix_pool_bytes(const void *buf, size_t len) { unsigned long flags; spin_lock_irqsave(&input_pool.lock, flags); - _mix_pool_bytes(in, nbytes); + _mix_pool_bytes(buf, len); spin_unlock_irqrestore(&input_pool.lock, flags); } @@ -748,7 +748,7 @@ static void mix_pool_bytes(const void *in, size_t nbytes) * This is an HKDF-like construction for using the hashed collected entropy * as a PRF key, that's then expanded block-by-block. */ -static void extract_entropy(void *buf, size_t nbytes) +static void extract_entropy(void *buf, size_t len) { unsigned long flags; u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE]; @@ -777,12 +777,12 @@ static void extract_entropy(void *buf, size_t nbytes) spin_unlock_irqrestore(&input_pool.lock, flags); memzero_explicit(next_key, sizeof(next_key)); - while (nbytes) { - i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE); + while (len) { + i = min_t(size_t, len, BLAKE2S_HASH_SIZE); /* output = HASHPRF(seed, RDSEED || ++counter) */ ++block.counter; blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); - nbytes -= i; + len -= i; buf += i; } @@ -790,16 +790,16 @@ static void extract_entropy(void *buf, size_t nbytes) memzero_explicit(&block, sizeof(block)); } -static void credit_init_bits(size_t nbits) +static void credit_init_bits(size_t bits) { static struct execute_work set_ready; unsigned int new, orig, add; unsigned long flags; - if (crng_ready() || !nbits) + if (crng_ready() || !bits) return; - add = min_t(size_t, nbits, POOL_BITS); + add = min_t(size_t, bits, POOL_BITS); do { orig = READ_ONCE(input_pool.init_bits); @@ -835,14 +835,12 @@ static void credit_init_bits(size_t nbits) * The following exported functions are used for pushing entropy into * the above entropy accumulation routines: * - * void add_device_randomness(const void *buf, size_t size); - * void add_hwgenerator_randomness(const void *buffer, size_t count, - * size_t entropy); - * void add_bootloader_randomness(const void *buf, size_t size); - * void add_vmfork_randomness(const void *unique_vm_id, size_t size); + * void add_device_randomness(const void *buf, size_t len); + * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); + * void add_bootloader_randomness(const void *buf, size_t len); + * void add_vmfork_randomness(const void *unique_vm_id, size_t len); * void add_interrupt_randomness(int irq); - * void add_input_randomness(unsigned int type, unsigned int code, - * unsigned int value); + * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); * void add_disk_randomness(struct gendisk *disk); * * add_device_randomness() adds data to the input pool that @@ -937,7 +935,7 @@ int __init random_init(const char *command_line) { ktime_t now = ktime_get_real(); unsigned int i, arch_bytes; - unsigned long rv; + unsigned long entropy; #if defined(LATENT_ENTROPY_PLUGIN) static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy; @@ -945,13 +943,13 @@ int __init random_init(const char *command_line) #endif for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE; - i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) { - if (!arch_get_random_seed_long_early(&rv) && - !arch_get_random_long_early(&rv)) { - rv = random_get_entropy(); - arch_bytes -= sizeof(rv); + i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) { + if (!arch_get_random_seed_long_early(&entropy) && + !arch_get_random_long_early(&entropy)) { + entropy = random_get_entropy(); + arch_bytes -= sizeof(entropy); } - _mix_pool_bytes(&rv, sizeof(rv)); + _mix_pool_bytes(&entropy, sizeof(entropy)); } _mix_pool_bytes(&now, sizeof(now)); _mix_pool_bytes(utsname(), sizeof(*(utsname()))); @@ -978,14 +976,14 @@ int __init random_init(const char *command_line) * the entropy pool having similar initial state across largely * identical devices. */ -void add_device_randomness(const void *buf, size_t size) +void add_device_randomness(const void *buf, size_t len) { unsigned long entropy = random_get_entropy(); unsigned long flags; spin_lock_irqsave(&input_pool.lock, flags); _mix_pool_bytes(&entropy, sizeof(entropy)); - _mix_pool_bytes(buf, size); + _mix_pool_bytes(buf, len); spin_unlock_irqrestore(&input_pool.lock, flags); } EXPORT_SYMBOL(add_device_randomness); @@ -995,10 +993,9 @@ EXPORT_SYMBOL(add_device_randomness); * Those devices may produce endless random bits and will be throttled * when our pool is full. */ -void add_hwgenerator_randomness(const void *buffer, size_t count, - size_t entropy) +void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy) { - mix_pool_bytes(buffer, count); + mix_pool_bytes(buf, len); credit_init_bits(entropy); /* @@ -1014,11 +1011,11 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); * Handle random seed passed by bootloader, and credit it if * CONFIG_RANDOM_TRUST_BOOTLOADER is set. */ -void add_bootloader_randomness(const void *buf, size_t size) +void add_bootloader_randomness(const void *buf, size_t len) { - mix_pool_bytes(buf, size); + mix_pool_bytes(buf, len); if (trust_bootloader) - credit_init_bits(size * 8); + credit_init_bits(len * 8); } EXPORT_SYMBOL_GPL(add_bootloader_randomness); @@ -1030,9 +1027,9 @@ static BLOCKING_NOTIFIER_HEAD(vmfork_chain); * don't credit it, but we do immediately force a reseed after so * that it's used by the crng posthaste. */ -void add_vmfork_randomness(const void *unique_vm_id, size_t size) +void add_vmfork_randomness(const void *unique_vm_id, size_t len) { - add_device_randomness(unique_vm_id, size); + add_device_randomness(unique_vm_id, len); if (crng_ready()) { crng_reseed(); pr_notice("crng reseeded due to virtual machine fork\n"); @@ -1252,8 +1249,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned int nu credit_init_bits(bits); } -void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value) +void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) { static unsigned char last_value; static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; @@ -1388,8 +1384,7 @@ static void try_to_generate_entropy(void) * **********************************************************************/ -SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, - flags) +SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags) { if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) return -EINVAL; @@ -1401,8 +1396,8 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) return -EINVAL; - if (count > INT_MAX) - count = INT_MAX; + if (len > INT_MAX) + len = INT_MAX; if (!crng_ready() && !(flags & GRND_INSECURE)) { int ret; @@ -1413,7 +1408,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, if (unlikely(ret)) return ret; } - return get_random_bytes_user(buf, count); + return get_random_bytes_user(ubuf, len); } static __poll_t random_poll(struct file *file, poll_table *wait) @@ -1422,21 +1417,21 @@ static __poll_t random_poll(struct file *file, poll_table *wait) return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM; } -static int write_pool(const char __user *ubuf, size_t count) +static int write_pool(const char __user *ubuf, size_t len) { - size_t len; + size_t block_len; int ret = 0; u8 block[BLAKE2S_BLOCK_SIZE]; - while (count) { - len = min(count, sizeof(block)); - if (copy_from_user(block, ubuf, len)) { + while (len) { + block_len = min(len, sizeof(block)); + if (copy_from_user(block, ubuf, block_len)) { ret = -EFAULT; goto out; } - count -= len; - ubuf += len; - mix_pool_bytes(block, len); + len -= block_len; + ubuf += block_len; + mix_pool_bytes(block, block_len); cond_resched(); } @@ -1445,20 +1440,20 @@ out: return ret; } -static ssize_t random_write(struct file *file, const char __user *buffer, - size_t count, loff_t *ppos) +static ssize_t random_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *ppos) { int ret; - ret = write_pool(buffer, count); + ret = write_pool(ubuf, len); if (ret) return ret; - return (ssize_t)count; + return (ssize_t)len; } -static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) +static ssize_t urandom_read(struct file *file, char __user *ubuf, + size_t len, loff_t *ppos) { static int maxwarn = 10; @@ -1475,22 +1470,22 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, else if (ratelimit_disable || __ratelimit(&urandom_warning)) { --maxwarn; pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", - current->comm, nbytes); + current->comm, len); } } - return get_random_bytes_user(buf, nbytes); + return get_random_bytes_user(ubuf, len); } -static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) +static ssize_t random_read(struct file *file, char __user *ubuf, + size_t len, loff_t *ppos) { int ret; ret = wait_for_random_bytes(); if (ret != 0) return ret; - return get_random_bytes_user(buf, nbytes); + return get_random_bytes_user(ubuf, len); } static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) @@ -1615,7 +1610,7 @@ static u8 sysctl_bootid[UUID_SIZE]; * UUID. The difference is in whether table->data is NULL; if it is, * then a new UUID is generated and returned to the user. */ -static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, +static int proc_do_uuid(struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { u8 tmp_uuid[UUID_SIZE], *uuid; @@ -1642,14 +1637,14 @@ static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, } snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid); - return proc_dostring(&fake_table, 0, buffer, lenp, ppos); + return proc_dostring(&fake_table, 0, buf, lenp, ppos); } /* The same as proc_dointvec, but writes don't change anything. */ -static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer, +static int proc_do_rointvec(struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { - return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos); + return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos); } static struct ctl_table random_table[] = { diff --git a/include/linux/random.h b/include/linux/random.h index 883bf9a23d84..fc82f1dc36f1 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -12,12 +12,12 @@ struct notifier_block; -void add_device_randomness(const void *, size_t); -void add_bootloader_randomness(const void *, size_t); +void add_device_randomness(const void *buf, size_t len); +void add_bootloader_randomness(const void *buf, size_t len); void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) __latent_entropy; void add_interrupt_randomness(int irq) __latent_entropy; -void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy); +void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) static inline void add_latent_entropy(void) @@ -29,7 +29,7 @@ static inline void add_latent_entropy(void) { } #endif #if IS_ENABLED(CONFIG_VMGENID) -void add_vmfork_randomness(const void *unique_vm_id, size_t size); +void add_vmfork_randomness(const void *unique_vm_id, size_t len); int register_random_vmfork_notifier(struct notifier_block *nb); int unregister_random_vmfork_notifier(struct notifier_block *nb); #else @@ -37,8 +37,8 @@ static inline int register_random_vmfork_notifier(struct notifier_block *nb) { r static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; } #endif -void get_random_bytes(void *buf, size_t nbytes); -size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes); +void get_random_bytes(void *buf, size_t len); +size_t __must_check get_random_bytes_arch(void *buf, size_t len); u32 get_random_u32(void); u64 get_random_u64(void); static inline unsigned int get_random_int(void) -- cgit v1.2.3 From 560181c27b582557d633ecb608110075433383af Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Fri, 13 May 2022 16:17:12 +0200 Subject: random: move initialization functions out of hot pages Much of random.c is devoted to initializing the rng and accounting for when a sufficient amount of entropy has been added. In a perfect world, this would all happen during init, and so we could mark these functions as __init. But in reality, this isn't the case: sometimes the rng only finishes initializing some seconds after system init is finished. For this reason, at the moment, a whole host of functions that are only used relatively close to system init and then never again are intermixed with functions that are used in hot code all the time. This creates more cache misses than necessary. In order to pack the hot code closer together, this commit moves the initialization functions that can't be marked as __init into .text.unlikely by way of the __cold attribute. Of particular note is moving credit_init_bits() into a macro wrapper that inlines the crng_ready() static branch check. This avoids a function call to a nop+ret, and most notably prevents extra entropy arithmetic from being computed in mix_interrupt_randomness(). Reviewed-by: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 46 +++++++++++++++++++++------------------------- 1 file changed, 21 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 0475ed69aa34..7ec700683e42 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -110,7 +110,7 @@ bool rng_is_initialized(void) } EXPORT_SYMBOL(rng_is_initialized); -static void crng_set_ready(struct work_struct *work) +static void __cold crng_set_ready(struct work_struct *work) { static_branch_enable(&crng_is_ready); } @@ -149,7 +149,7 @@ EXPORT_SYMBOL(wait_for_random_bytes); * returns: 0 if callback is successfully added * -EALREADY if pool is already initialised (callback not called) */ -int register_random_ready_notifier(struct notifier_block *nb) +int __cold register_random_ready_notifier(struct notifier_block *nb) { unsigned long flags; int ret = -EALREADY; @@ -167,7 +167,7 @@ int register_random_ready_notifier(struct notifier_block *nb) /* * Delete a previously registered readiness callback function. */ -int unregister_random_ready_notifier(struct notifier_block *nb) +int __cold unregister_random_ready_notifier(struct notifier_block *nb) { unsigned long flags; int ret; @@ -178,7 +178,7 @@ int unregister_random_ready_notifier(struct notifier_block *nb) return ret; } -static void process_random_ready_list(void) +static void __cold process_random_ready_list(void) { unsigned long flags; @@ -188,15 +188,9 @@ static void process_random_ready_list(void) } #define warn_unseeded_randomness() \ - _warn_unseeded_randomness(__func__, (void *)_RET_IP_) - -static void _warn_unseeded_randomness(const char *func_name, void *caller) -{ - if (!IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) || crng_ready()) - return; - printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", - func_name, caller, crng_init); -} + if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \ + printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \ + __func__, (void *)_RET_IP_, crng_init) /********************************************************************* @@ -615,7 +609,7 @@ EXPORT_SYMBOL(get_random_u32); * This function is called when the CPU is coming up, with entry * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. */ -int random_prepare_cpu(unsigned int cpu) +int __cold random_prepare_cpu(unsigned int cpu) { /* * When the cpu comes back online, immediately invalidate both @@ -790,13 +784,15 @@ static void extract_entropy(void *buf, size_t len) memzero_explicit(&block, sizeof(block)); } -static void credit_init_bits(size_t bits) +#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits) + +static void __cold _credit_init_bits(size_t bits) { static struct execute_work set_ready; unsigned int new, orig, add; unsigned long flags; - if (crng_ready() || !bits) + if (!bits) return; add = min_t(size_t, bits, POOL_BITS); @@ -1011,7 +1007,7 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); * Handle random seed passed by bootloader, and credit it if * CONFIG_RANDOM_TRUST_BOOTLOADER is set. */ -void add_bootloader_randomness(const void *buf, size_t len) +void __cold add_bootloader_randomness(const void *buf, size_t len) { mix_pool_bytes(buf, len); if (trust_bootloader) @@ -1027,7 +1023,7 @@ static BLOCKING_NOTIFIER_HEAD(vmfork_chain); * don't credit it, but we do immediately force a reseed after so * that it's used by the crng posthaste. */ -void add_vmfork_randomness(const void *unique_vm_id, size_t len) +void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len) { add_device_randomness(unique_vm_id, len); if (crng_ready()) { @@ -1040,13 +1036,13 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t len) EXPORT_SYMBOL_GPL(add_vmfork_randomness); #endif -int register_random_vmfork_notifier(struct notifier_block *nb) +int __cold register_random_vmfork_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&vmfork_chain, nb); } EXPORT_SYMBOL_GPL(register_random_vmfork_notifier); -int unregister_random_vmfork_notifier(struct notifier_block *nb) +int __cold unregister_random_vmfork_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&vmfork_chain, nb); } @@ -1091,7 +1087,7 @@ static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2) * This function is called when the CPU has just come online, with * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. */ -int random_online_cpu(unsigned int cpu) +int __cold random_online_cpu(unsigned int cpu) { /* * During CPU shutdown and before CPU onlining, add_interrupt_ @@ -1246,7 +1242,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned int nu if (in_hardirq()) this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1; else - credit_init_bits(bits); + _credit_init_bits(bits); } void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) @@ -1274,7 +1270,7 @@ void add_disk_randomness(struct gendisk *disk) } EXPORT_SYMBOL_GPL(add_disk_randomness); -void rand_initialize_disk(struct gendisk *disk) +void __cold rand_initialize_disk(struct gendisk *disk) { struct timer_rand_state *state; @@ -1309,7 +1305,7 @@ struct entropy_timer_state { * * So the re-arming always happens in the entropy loop itself. */ -static void entropy_timer(struct timer_list *timer) +static void __cold entropy_timer(struct timer_list *timer) { struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer); @@ -1323,7 +1319,7 @@ static void entropy_timer(struct timer_list *timer) * If we have an actual cycle counter, see if we can * generate enough entropy with timing noise */ -static void try_to_generate_entropy(void) +static void __cold try_to_generate_entropy(void) { enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 32 }; struct entropy_timer_state stack; -- cgit v1.2.3 From 248561ad25a8ba4ecbc7df42f9a5a82fd5fbb4f6 Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sat, 14 May 2022 13:09:17 +0200 Subject: random: remove get_random_bytes_arch() and add rng_has_arch_random() The RNG incorporates RDRAND into its state at boot and every time it reseeds, so there's no reason for callers to use it directly. The hashing that the RNG does on it is preferable to using the bytes raw. The only current use case of get_random_bytes_arch() is vsprintf's siphash key for pointer hashing, which uses it to initialize the pointer secret earlier than usual if RDRAND is available. In order to replace this narrow use case, just expose whether RDRAND is mixed into the RNG, with a new function called rng_has_arch_random(). With that taken care of, there are no users of get_random_bytes_arch() left, so it can be removed. Later, if trust_cpu gets turned on by default (as most distros are doing), this one use of rng_has_arch_random() can probably go away as well. Cc: Steven Rostedt Cc: Sergey Senozhatsky Acked-by: Petr Mladek # for vsprintf.c Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 49 ++++++++++++++++--------------------------------- include/linux/random.h | 2 +- lib/vsprintf.c | 7 +++---- 3 files changed, 20 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 7ec700683e42..6b8c89378954 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -433,12 +433,9 @@ static void _get_random_bytes(void *buf, size_t len) /* * This function is the exported kernel interface. It returns some * number of good random numbers, suitable for key generation, seeding - * TCP sequence numbers, etc. It does not rely on the hardware random - * number generator. For random bytes direct from the hardware RNG - * (when available), use get_random_bytes_arch(). In order to ensure - * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once - * at any point prior. + * TCP sequence numbers, etc. In order to ensure that the randomness + * by this function is okay, the function wait_for_random_bytes() + * should be called and return 0 at least once at any point prior. */ void get_random_bytes(void *buf, size_t len) { @@ -655,33 +652,6 @@ unsigned long randomize_page(unsigned long start, unsigned long range) return start + (get_random_long() % range << PAGE_SHIFT); } -/* - * This function will use the architecture-specific hardware random - * number generator if it is available. It is not recommended for - * use. Use get_random_bytes() instead. It returns the number of - * bytes filled in. - */ -size_t __must_check get_random_bytes_arch(void *buf, size_t len) -{ - size_t left = len; - u8 *p = buf; - - while (left) { - unsigned long v; - size_t block_len = min_t(size_t, left, sizeof(unsigned long)); - - if (!arch_get_random_long(&v)) - break; - - memcpy(p, &v, block_len); - p += block_len; - left -= block_len; - } - - return len - left; -} -EXPORT_SYMBOL(get_random_bytes_arch); - /********************************************************************** * @@ -879,6 +849,7 @@ static void __cold _credit_init_bits(size_t bits) * **********************************************************************/ +static bool used_arch_random; static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); static int __init parse_trust_cpu(char *arg) @@ -956,6 +927,7 @@ int __init random_init(const char *command_line) crng_reseed(); else if (trust_cpu) credit_init_bits(arch_bytes * 8); + used_arch_random = arch_bytes * 8 >= POOL_READY_BITS; WARN_ON(register_pm_notifier(&pm_notifier)); @@ -964,6 +936,17 @@ int __init random_init(const char *command_line) return 0; } +/* + * Returns whether arch randomness has been mixed into the initial + * state of the RNG, regardless of whether or not that randomness + * was credited. Knowing this is only good for a very limited set + * of uses, such as early init printk pointer obfuscation. + */ +bool rng_has_arch_random(void) +{ + return used_arch_random; +} + /* * Add device- or boot-specific data to the input pool to help * initialize it. diff --git a/include/linux/random.h b/include/linux/random.h index fc82f1dc36f1..6af130c6edb9 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -38,7 +38,6 @@ static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { #endif void get_random_bytes(void *buf, size_t len); -size_t __must_check get_random_bytes_arch(void *buf, size_t len); u32 get_random_u32(void); u64 get_random_u64(void); static inline unsigned int get_random_int(void) @@ -77,6 +76,7 @@ unsigned long randomize_page(unsigned long start, unsigned long range); int __init random_init(const char *command_line); bool rng_is_initialized(void); +bool rng_has_arch_random(void); int wait_for_random_bytes(void); int register_random_ready_notifier(struct notifier_block *nb); int unregister_random_ready_notifier(struct notifier_block *nb); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 40d26a07a133..20e9887faaaa 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -776,12 +776,11 @@ static struct notifier_block random_ready = { static int __init initialize_ptr_random(void) { - int key_size = sizeof(ptr_key); int ret; - /* Use hw RNG if available. */ - if (get_random_bytes_arch(&ptr_key, key_size) == key_size) { - static_branch_disable(¬_filled_random_ptr_key); + /* Don't bother waiting for RNG to be ready if RDRAND is mixed in already. */ + if (rng_has_arch_random()) { + enable_ptr_key_workfn(&enable_ptr_key_work); return 0; } -- cgit v1.2.3 From 6701de6c51c172b5de5633374479503c81fefc0b Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sun, 15 May 2022 15:06:18 +0200 Subject: random: remove mostly unused async readiness notifier The register_random_ready_notifier() notifier is somewhat complicated, and was already recently rewritten to use notifier blocks. It is only used now by one consumer in the kernel, vsprintf.c, for which the async mechanism is really overly complex for what it actually needs. This commit removes register_random_ready_notifier() and unregister_random_ ready_notifier(), because it just adds complication with little utility, and changes vsprintf.c to just check on `!rng_is_initialized() && !rng_has_arch_random()`, which will eventually be true. Performance- wise, that code was already using a static branch, so there's basically no overhead at all to this change. Cc: Steven Rostedt Cc: Sergey Senozhatsky Acked-by: Petr Mladek # for vsprintf.c Reviewed-by: Petr Mladek Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 48 ------------------------------------ include/linux/random.h | 2 -- lib/vsprintf.c | 66 +++++++++++++++++--------------------------------- 3 files changed, 22 insertions(+), 94 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 6b8c89378954..16b39d2dead7 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -84,8 +84,6 @@ static DEFINE_STATIC_KEY_FALSE(crng_is_ready); /* Various types of waiters for crng_init->CRNG_READY transition. */ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); static struct fasync_struct *fasync; -static DEFINE_SPINLOCK(random_ready_chain_lock); -static RAW_NOTIFIER_HEAD(random_ready_chain); /* Control how we warn userspace. */ static struct ratelimit_state urandom_warning = @@ -142,51 +140,6 @@ int wait_for_random_bytes(void) } EXPORT_SYMBOL(wait_for_random_bytes); -/* - * Add a callback function that will be invoked when the input - * pool is initialised. - * - * returns: 0 if callback is successfully added - * -EALREADY if pool is already initialised (callback not called) - */ -int __cold register_random_ready_notifier(struct notifier_block *nb) -{ - unsigned long flags; - int ret = -EALREADY; - - if (crng_ready()) - return ret; - - spin_lock_irqsave(&random_ready_chain_lock, flags); - if (!crng_ready()) - ret = raw_notifier_chain_register(&random_ready_chain, nb); - spin_unlock_irqrestore(&random_ready_chain_lock, flags); - return ret; -} - -/* - * Delete a previously registered readiness callback function. - */ -int __cold unregister_random_ready_notifier(struct notifier_block *nb) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&random_ready_chain_lock, flags); - ret = raw_notifier_chain_unregister(&random_ready_chain, nb); - spin_unlock_irqrestore(&random_ready_chain_lock, flags); - return ret; -} - -static void __cold process_random_ready_list(void) -{ - unsigned long flags; - - spin_lock_irqsave(&random_ready_chain_lock, flags); - raw_notifier_call_chain(&random_ready_chain, 0, NULL); - spin_unlock_irqrestore(&random_ready_chain_lock, flags); -} - #define warn_unseeded_randomness() \ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \ @@ -775,7 +728,6 @@ static void __cold _credit_init_bits(size_t bits) if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ execute_in_process_context(crng_set_ready, &set_ready); - process_random_ready_list(); wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); pr_notice("crng init done\n"); diff --git a/include/linux/random.h b/include/linux/random.h index 6af130c6edb9..d2360b2825b6 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -78,8 +78,6 @@ int __init random_init(const char *command_line); bool rng_is_initialized(void); bool rng_has_arch_random(void); int wait_for_random_bytes(void); -int register_random_ready_notifier(struct notifier_block *nb); -int unregister_random_ready_notifier(struct notifier_block *nb); /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). * Returns the result of the call to wait_for_random_bytes. */ diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 20e9887faaaa..fb77f7bfd126 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -750,60 +750,38 @@ static int __init debug_boot_weak_hash_enable(char *str) } early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable); -static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key); -static siphash_key_t ptr_key __read_mostly; +static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key); static void enable_ptr_key_workfn(struct work_struct *work) { - get_random_bytes(&ptr_key, sizeof(ptr_key)); - /* Needs to run from preemptible context */ - static_branch_disable(¬_filled_random_ptr_key); + static_branch_enable(&filled_random_ptr_key); } -static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); - -static int fill_random_ptr_key(struct notifier_block *nb, - unsigned long action, void *data) -{ - /* This may be in an interrupt handler. */ - queue_work(system_unbound_wq, &enable_ptr_key_work); - return 0; -} - -static struct notifier_block random_ready = { - .notifier_call = fill_random_ptr_key -}; - -static int __init initialize_ptr_random(void) -{ - int ret; - - /* Don't bother waiting for RNG to be ready if RDRAND is mixed in already. */ - if (rng_has_arch_random()) { - enable_ptr_key_workfn(&enable_ptr_key_work); - return 0; - } - - ret = register_random_ready_notifier(&random_ready); - if (!ret) { - return 0; - } else if (ret == -EALREADY) { - /* This is in preemptible context */ - enable_ptr_key_workfn(&enable_ptr_key_work); - return 0; - } - - return ret; -} -early_initcall(initialize_ptr_random); - /* Maps a pointer to a 32 bit unique identifier. */ static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out) { + static siphash_key_t ptr_key __read_mostly; unsigned long hashval; - if (static_branch_unlikely(¬_filled_random_ptr_key)) - return -EAGAIN; + if (!static_branch_likely(&filled_random_ptr_key)) { + static bool filled = false; + static DEFINE_SPINLOCK(filling); + static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); + unsigned long flags; + + if (!system_unbound_wq || + (!rng_is_initialized() && !rng_has_arch_random()) || + !spin_trylock_irqsave(&filling, flags)) + return -EAGAIN; + + if (!filled) { + get_random_bytes(&ptr_key, sizeof(ptr_key)); + queue_work(system_unbound_wq, &enable_ptr_key_work); + filled = true; + } + spin_unlock_irqrestore(&filling, flags); + } + #ifdef CONFIG_64BIT hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key); -- cgit v1.2.3 From 5ad7dd882e45d7fe432c32e896e2aaa0b21746ea Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sat, 14 May 2022 13:59:30 +0200 Subject: random: move randomize_page() into mm where it belongs randomize_page is an mm function. It is documented like one. It contains the history of one. It has the naming convention of one. It looks just like another very similar function in mm, randomize_stack_top(). And it has always been maintained and updated by mm people. There is no need for it to be in random.c. In the "which shape does not look like the other ones" test, pointing to randomize_page() is correct. So move randomize_page() into mm/util.c, right next to the similar randomize_stack_top() function. This commit contains no actual code changes. Cc: Andrew Morton Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 32 -------------------------------- include/linux/mm.h | 1 + include/linux/random.h | 2 -- mm/util.c | 32 ++++++++++++++++++++++++++++++++ 4 files changed, 33 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 16b39d2dead7..36183b0cf923 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -573,38 +573,6 @@ int __cold random_prepare_cpu(unsigned int cpu) } #endif -/** - * randomize_page - Generate a random, page aligned address - * @start: The smallest acceptable address the caller will take. - * @range: The size of the area, starting at @start, within which the - * random address must fall. - * - * If @start + @range would overflow, @range is capped. - * - * NOTE: Historical use of randomize_range, which this replaces, presumed that - * @start was already page aligned. We now align it regardless. - * - * Return: A page aligned address within [start, start + range). On error, - * @start is returned. - */ -unsigned long randomize_page(unsigned long start, unsigned long range) -{ - if (!PAGE_ALIGNED(start)) { - range -= PAGE_ALIGN(start) - start; - start = PAGE_ALIGN(start); - } - - if (start > ULONG_MAX - range) - range = ULONG_MAX - start; - - range >>= PAGE_SHIFT; - - if (range == 0) - return start; - - return start + (get_random_long() % range << PAGE_SHIFT); -} - /********************************************************************** * diff --git a/include/linux/mm.h b/include/linux/mm.h index 9f44254af8ce..b0183450e484 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2677,6 +2677,7 @@ extern int install_special_mapping(struct mm_struct *mm, unsigned long flags, struct page **pages); unsigned long randomize_stack_top(unsigned long stack_top); +unsigned long randomize_page(unsigned long start, unsigned long range); extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/include/linux/random.h b/include/linux/random.h index d2360b2825b6..fae0c84027fd 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -72,8 +72,6 @@ static inline unsigned long get_random_canary(void) return get_random_long() & CANARY_MASK; } -unsigned long randomize_page(unsigned long start, unsigned long range); - int __init random_init(const char *command_line); bool rng_is_initialized(void); bool rng_has_arch_random(void); diff --git a/mm/util.c b/mm/util.c index 3492a9e81aa3..ac63e5ca8b21 100644 --- a/mm/util.c +++ b/mm/util.c @@ -343,6 +343,38 @@ unsigned long randomize_stack_top(unsigned long stack_top) #endif } +/** + * randomize_page - Generate a random, page aligned address + * @start: The smallest acceptable address the caller will take. + * @range: The size of the area, starting at @start, within which the + * random address must fall. + * + * If @start + @range would overflow, @range is capped. + * + * NOTE: Historical use of randomize_range, which this replaces, presumed that + * @start was already page aligned. We now align it regardless. + * + * Return: A page aligned address within [start, start + range). On error, + * @start is returned. + */ +unsigned long randomize_page(unsigned long start, unsigned long range) +{ + if (!PAGE_ALIGNED(start)) { + range -= PAGE_ALIGN(start) - start; + start = PAGE_ALIGN(start); + } + + if (start > ULONG_MAX - range) + range = ULONG_MAX - start; + + range >>= PAGE_SHIFT; + + if (range == 0) + return start; + + return start + (get_random_long() % range << PAGE_SHIFT); +} + #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT unsigned long arch_randomize_brk(struct mm_struct *mm) { -- cgit v1.2.3 From 3092adcef3ffd2ef59634998297ca8358461ebce Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sun, 15 May 2022 00:22:05 +0200 Subject: random: unify batched entropy implementations There are currently two separate batched entropy implementations, for u32 and u64, with nearly identical code, with the goal of avoiding unaligned memory accesses and letting the buffers be used more efficiently. Having to maintain these two functions independently is a bit of a hassle though, considering that they always need to be kept in sync. This commit factors them out into a type-generic macro, so that the expansion produces the same code as before, such that diffing the assembly shows no differences. This will also make it easier in the future to add u16 and u8 batches. This was initially tested using an always_inline function and letting gcc constant fold the type size in, but the code gen was less efficient, and in general it was more verbose and harder to follow. So this patch goes with the boring macro solution, similar to what's already done for the _wait functions in random.h. Cc: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 147 +++++++++++++++++++------------------------------- 1 file changed, 55 insertions(+), 92 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 36183b0cf923..0958fa91a964 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -460,99 +460,62 @@ out_zero_chacha: * provided by this function is okay, the function wait_for_random_bytes() * should be called and return 0 at least once at any point prior. */ -struct batched_entropy { - union { - /* - * We make this 1.5x a ChaCha block, so that we get the - * remaining 32 bytes from fast key erasure, plus one full - * block from the detached ChaCha state. We can increase - * the size of this later if needed so long as we keep the - * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. - */ - u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))]; - u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))]; - }; - local_lock_t lock; - unsigned long generation; - unsigned int position; -}; - - -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { - .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock), - .position = UINT_MAX -}; - -u64 get_random_u64(void) -{ - u64 ret; - unsigned long flags; - struct batched_entropy *batch; - unsigned long next_gen; - - warn_unseeded_randomness(); - - if (!crng_ready()) { - _get_random_bytes(&ret, sizeof(ret)); - return ret; - } - - local_lock_irqsave(&batched_entropy_u64.lock, flags); - batch = raw_cpu_ptr(&batched_entropy_u64); - - next_gen = READ_ONCE(base_crng.generation); - if (batch->position >= ARRAY_SIZE(batch->entropy_u64) || - next_gen != batch->generation) { - _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64)); - batch->position = 0; - batch->generation = next_gen; - } - ret = batch->entropy_u64[batch->position]; - batch->entropy_u64[batch->position] = 0; - ++batch->position; - local_unlock_irqrestore(&batched_entropy_u64.lock, flags); - return ret; -} -EXPORT_SYMBOL(get_random_u64); - -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { - .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock), - .position = UINT_MAX -}; - -u32 get_random_u32(void) -{ - u32 ret; - unsigned long flags; - struct batched_entropy *batch; - unsigned long next_gen; - - warn_unseeded_randomness(); - - if (!crng_ready()) { - _get_random_bytes(&ret, sizeof(ret)); - return ret; - } - - local_lock_irqsave(&batched_entropy_u32.lock, flags); - batch = raw_cpu_ptr(&batched_entropy_u32); - - next_gen = READ_ONCE(base_crng.generation); - if (batch->position >= ARRAY_SIZE(batch->entropy_u32) || - next_gen != batch->generation) { - _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32)); - batch->position = 0; - batch->generation = next_gen; - } - - ret = batch->entropy_u32[batch->position]; - batch->entropy_u32[batch->position] = 0; - ++batch->position; - local_unlock_irqrestore(&batched_entropy_u32.lock, flags); - return ret; -} -EXPORT_SYMBOL(get_random_u32); +#define DEFINE_BATCHED_ENTROPY(type) \ +struct batch_ ##type { \ + /* \ + * We make this 1.5x a ChaCha block, so that we get the \ + * remaining 32 bytes from fast key erasure, plus one full \ + * block from the detached ChaCha state. We can increase \ + * the size of this later if needed so long as we keep the \ + * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \ + */ \ + type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \ + local_lock_t lock; \ + unsigned long generation; \ + unsigned int position; \ +}; \ + \ +static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \ + .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \ + .position = UINT_MAX \ +}; \ + \ +type get_random_ ##type(void) \ +{ \ + type ret; \ + unsigned long flags; \ + struct batch_ ##type *batch; \ + unsigned long next_gen; \ + \ + warn_unseeded_randomness(); \ + \ + if (!crng_ready()) { \ + _get_random_bytes(&ret, sizeof(ret)); \ + return ret; \ + } \ + \ + local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \ + batch = raw_cpu_ptr(&batched_entropy_##type); \ + \ + next_gen = READ_ONCE(base_crng.generation); \ + if (batch->position >= ARRAY_SIZE(batch->entropy) || \ + next_gen != batch->generation) { \ + _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \ + batch->position = 0; \ + batch->generation = next_gen; \ + } \ + \ + ret = batch->entropy[batch->position]; \ + batch->entropy[batch->position] = 0; \ + ++batch->position; \ + local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \ + return ret; \ +} \ +EXPORT_SYMBOL(get_random_ ##type); + +DEFINE_BATCHED_ENTROPY(u64) +DEFINE_BATCHED_ENTROPY(u32) #ifdef CONFIG_SMP /* -- cgit v1.2.3 From 1b388e7765f2eaa137cf5d92b47ef5925ad83ced Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 May 2022 17:31:36 -0600 Subject: random: convert to using fops->read_iter() This is a pre-requisite to wiring up splice() again for the random and urandom drivers. It also allows us to remove the INT_MAX check in getrandom(), because import_single_range() applies capping internally. Signed-off-by: Jens Axboe [Jason: rewrote get_random_bytes_user() to simplify and also incorporate additional suggestions from Al.] Cc: Al Viro Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 65 +++++++++++++++++++++++---------------------------- 1 file changed, 29 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 0958fa91a964..34b7737a707e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -397,13 +397,13 @@ void get_random_bytes(void *buf, size_t len) } EXPORT_SYMBOL(get_random_bytes); -static ssize_t get_random_bytes_user(void __user *ubuf, size_t len) +static ssize_t get_random_bytes_user(struct iov_iter *iter) { - size_t block_len, left, ret = 0; u32 chacha_state[CHACHA_STATE_WORDS]; - u8 output[CHACHA_BLOCK_SIZE]; + u8 block[CHACHA_BLOCK_SIZE]; + size_t ret = 0, copied; - if (!len) + if (unlikely(!iov_iter_count(iter))) return 0; /* @@ -417,30 +417,22 @@ static ssize_t get_random_bytes_user(void __user *ubuf, size_t len) * use chacha_state after, so we can simply return those bytes to * the user directly. */ - if (len <= CHACHA_KEY_SIZE) { - ret = len - copy_to_user(ubuf, &chacha_state[4], len); + if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { + ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter); goto out_zero_chacha; } for (;;) { - chacha20_block(chacha_state, output); + chacha20_block(chacha_state, block); if (unlikely(chacha_state[12] == 0)) ++chacha_state[13]; - block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE); - left = copy_to_user(ubuf, output, block_len); - if (left) { - ret += block_len - left; - break; - } - - ubuf += block_len; - ret += block_len; - len -= block_len; - if (!len) + copied = copy_to_iter(block, sizeof(block), iter); + ret += copied; + if (!iov_iter_count(iter) || copied != sizeof(block)) break; - BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0); + BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); if (ret % PAGE_SIZE == 0) { if (signal_pending(current)) break; @@ -448,7 +440,7 @@ static ssize_t get_random_bytes_user(void __user *ubuf, size_t len) } } - memzero_explicit(output, sizeof(output)); + memzero_explicit(block, sizeof(block)); out_zero_chacha: memzero_explicit(chacha_state, sizeof(chacha_state)); return ret ? ret : -EFAULT; @@ -1248,6 +1240,10 @@ static void __cold try_to_generate_entropy(void) SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags) { + struct iov_iter iter; + struct iovec iov; + int ret; + if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) return -EINVAL; @@ -1258,19 +1254,18 @@ SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) return -EINVAL; - if (len > INT_MAX) - len = INT_MAX; - if (!crng_ready() && !(flags & GRND_INSECURE)) { - int ret; - if (flags & GRND_NONBLOCK) return -EAGAIN; ret = wait_for_random_bytes(); if (unlikely(ret)) return ret; } - return get_random_bytes_user(ubuf, len); + + ret = import_single_range(READ, ubuf, len, &iov, &iter); + if (unlikely(ret)) + return ret; + return get_random_bytes_user(&iter); } static __poll_t random_poll(struct file *file, poll_table *wait) @@ -1314,8 +1309,7 @@ static ssize_t random_write(struct file *file, const char __user *ubuf, return (ssize_t)len; } -static ssize_t urandom_read(struct file *file, char __user *ubuf, - size_t len, loff_t *ppos) +static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) { static int maxwarn = 10; @@ -1331,23 +1325,22 @@ static ssize_t urandom_read(struct file *file, char __user *ubuf, ++urandom_warning.missed; else if (ratelimit_disable || __ratelimit(&urandom_warning)) { --maxwarn; - pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", - current->comm, len); + pr_notice("%s: uninitialized urandom read (%zu bytes read)\n", + current->comm, iov_iter_count(iter)); } } - return get_random_bytes_user(ubuf, len); + return get_random_bytes_user(iter); } -static ssize_t random_read(struct file *file, char __user *ubuf, - size_t len, loff_t *ppos) +static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter) { int ret; ret = wait_for_random_bytes(); if (ret != 0) return ret; - return get_random_bytes_user(ubuf, len); + return get_random_bytes_user(iter); } static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) @@ -1409,7 +1402,7 @@ static int random_fasync(int fd, struct file *filp, int on) } const struct file_operations random_fops = { - .read = random_read, + .read_iter = random_read_iter, .write = random_write, .poll = random_poll, .unlocked_ioctl = random_ioctl, @@ -1419,7 +1412,7 @@ const struct file_operations random_fops = { }; const struct file_operations urandom_fops = { - .read = urandom_read, + .read_iter = urandom_read_iter, .write = random_write, .unlocked_ioctl = random_ioctl, .compat_ioctl = compat_ptr_ioctl, -- cgit v1.2.3 From 22b0a222af4df8ee9bb8e07013ab44da9511b047 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 May 2022 17:43:15 -0600 Subject: random: convert to using fops->write_iter() Now that the read side has been converted to fix a regression with splice, convert the write side as well to have some symmetry in the interface used (and help deprecate ->write()). Signed-off-by: Jens Axboe [Jason: cleaned up random_ioctl a bit, require full writes in RNDADDENTROPY since it's crediting entropy, simplify control flow of write_pool(), and incorporate suggestions from Al.] Cc: Al Viro Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 67 +++++++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 34b7737a707e..599a95214531 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1274,39 +1274,31 @@ static __poll_t random_poll(struct file *file, poll_table *wait) return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM; } -static int write_pool(const char __user *ubuf, size_t len) +static ssize_t write_pool(struct iov_iter *iter) { - size_t block_len; - int ret = 0; u8 block[BLAKE2S_BLOCK_SIZE]; + ssize_t ret = 0; + size_t copied; - while (len) { - block_len = min(len, sizeof(block)); - if (copy_from_user(block, ubuf, block_len)) { - ret = -EFAULT; - goto out; - } - len -= block_len; - ubuf += block_len; - mix_pool_bytes(block, block_len); + if (unlikely(!iov_iter_count(iter))) + return 0; + + for (;;) { + copied = copy_from_iter(block, sizeof(block), iter); + ret += copied; + mix_pool_bytes(block, copied); + if (!iov_iter_count(iter) || copied != sizeof(block)) + break; cond_resched(); } -out: memzero_explicit(block, sizeof(block)); - return ret; + return ret ? ret : -EFAULT; } -static ssize_t random_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *ppos) +static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter) { - int ret; - - ret = write_pool(ubuf, len); - if (ret) - return ret; - - return (ssize_t)len; + return write_pool(iter); } static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) @@ -1345,9 +1337,8 @@ static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter) static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { - int size, ent_count; int __user *p = (int __user *)arg; - int retval; + int ent_count; switch (cmd) { case RNDGETENTCNT: @@ -1364,20 +1355,32 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EINVAL; credit_init_bits(ent_count); return 0; - case RNDADDENTROPY: + case RNDADDENTROPY: { + struct iov_iter iter; + struct iovec iov; + ssize_t ret; + int len; + if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(ent_count, p++)) return -EFAULT; if (ent_count < 0) return -EINVAL; - if (get_user(size, p++)) + if (get_user(len, p++)) + return -EFAULT; + ret = import_single_range(WRITE, p, len, &iov, &iter); + if (unlikely(ret)) + return ret; + ret = write_pool(&iter); + if (unlikely(ret < 0)) + return ret; + /* Since we're crediting, enforce that it was all written into the pool. */ + if (unlikely(ret != len)) return -EFAULT; - retval = write_pool((const char __user *)p, size); - if (retval < 0) - return retval; credit_init_bits(ent_count); return 0; + } case RNDZAPENTCNT: case RNDCLEARPOOL: /* No longer has any effect. */ @@ -1403,7 +1406,7 @@ static int random_fasync(int fd, struct file *filp, int on) const struct file_operations random_fops = { .read_iter = random_read_iter, - .write = random_write, + .write_iter = random_write_iter, .poll = random_poll, .unlocked_ioctl = random_ioctl, .compat_ioctl = compat_ptr_ioctl, @@ -1413,7 +1416,7 @@ const struct file_operations random_fops = { const struct file_operations urandom_fops = { .read_iter = urandom_read_iter, - .write = random_write, + .write_iter = random_write_iter, .unlocked_ioctl = random_ioctl, .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, -- cgit v1.2.3 From 79025e727a846be6fd215ae9cdb654368ac3f9a6 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 May 2022 17:31:37 -0600 Subject: random: wire up fops->splice_{read,write}_iter() Now that random/urandom is using {read,write}_iter, we can wire it up to using the generic splice handlers. Fixes: 36e2c7421f02 ("fs: don't allow splice read/write without explicit ops") Signed-off-by: Jens Axboe [Jason: added the splice_write path. Note that sendfile() and such still does not work for read, though it does for write, because of a file type restriction in splice_direct_to_actor(), which I'll address separately.] Cc: Al Viro Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 599a95214531..dc2f2c24c6ec 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1412,6 +1412,8 @@ const struct file_operations random_fops = { .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, + .splice_read = generic_file_splice_read, + .splice_write = iter_file_splice_write, }; const struct file_operations urandom_fops = { @@ -1421,6 +1423,8 @@ const struct file_operations urandom_fops = { .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, + .splice_read = generic_file_splice_read, + .splice_write = iter_file_splice_write, }; -- cgit v1.2.3 From 1ce6c8d68f8ac587f54d0a271ac594d3d51f3efb Mon Sep 17 00:00:00 2001 From: Jason A. Donenfeld Date: Sun, 22 May 2022 22:25:41 +0200 Subject: random: check for signals after page of pool writes get_random_bytes_user() checks for signals after producing a PAGE_SIZE worth of output, just like /dev/zero does. write_pool() is doing basically the same work (actually, slightly more expensive), and so should stop to check for signals in the same way. Let's also name it write_pool_user() to match get_random_bytes_user(), so this won't be misused in the future. Before this patch, massive writes to /dev/urandom would tie up the process for an extremely long time and make it unterminatable. After, it can be successfully interrupted. The following test program can be used to see this works as intended: #include #include #include #include static unsigned char x[~0U]; static void handle(int) { } int main(int argc, char *argv[]) { pid_t pid = getpid(), child; int fd; signal(SIGUSR1, handle); if (!(child = fork())) { for (;;) kill(pid, SIGUSR1); } fd = open("/dev/urandom", O_WRONLY); pause(); printf("interrupted after writing %zd bytes\n", write(fd, x, sizeof(x))); close(fd); kill(child, SIGTERM); return 0; } Result before: "interrupted after writing 2147479552 bytes" Result after: "interrupted after writing 4096 bytes" Cc: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index dc2f2c24c6ec..b691b9d59503 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1274,7 +1274,7 @@ static __poll_t random_poll(struct file *file, poll_table *wait) return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM; } -static ssize_t write_pool(struct iov_iter *iter) +static ssize_t write_pool_user(struct iov_iter *iter) { u8 block[BLAKE2S_BLOCK_SIZE]; ssize_t ret = 0; @@ -1289,7 +1289,13 @@ static ssize_t write_pool(struct iov_iter *iter) mix_pool_bytes(block, copied); if (!iov_iter_count(iter) || copied != sizeof(block)) break; - cond_resched(); + + BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); + if (ret % PAGE_SIZE == 0) { + if (signal_pending(current)) + break; + cond_resched(); + } } memzero_explicit(block, sizeof(block)); @@ -1298,7 +1304,7 @@ static ssize_t write_pool(struct iov_iter *iter) static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter) { - return write_pool(iter); + return write_pool_user(iter); } static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) @@ -1372,7 +1378,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) ret = import_single_range(WRITE, p, len, &iov, &iter); if (unlikely(ret)) return ret; - ret = write_pool(&iter); + ret = write_pool_user(&iter); if (unlikely(ret < 0)) return ret; /* Since we're crediting, enforce that it was all written into the pool. */ -- cgit v1.2.3