aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds2022-04-07 06:02:55 -1000
committerLinus Torvalds2022-04-07 06:02:55 -1000
commit3638bd90df9930511fa85f9a811d02feee4e0b97 (patch)
tree9aaf6f642560dddfe33b058878b34e0d8b5f7d69 /drivers
parent640b5037da8edfaa98d102183ccfe3af5898f94b (diff)
parente3c1c4fd9e6d14059ed93ebfe15e1c57793b1a05 (diff)
Merge tag 'random-5.18-rc2-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random
Pull random number generator fixes from Jason Donenfeld: - Another fixup to the fast_init/crng_init split, this time in how much entropy is being credited, from Jan Varho. - As discussed, we now opportunistically call try_to_generate_entropy() in /dev/urandom reads, as a replacement for the reverted commit. I opted to not do the more invasive wait_for_random_bytes() change at least for now, preferring to do something smaller and more obvious for the time being, but maybe that can be revisited as things evolve later. - Userspace can use FUSE or userfaultfd or simply move a process to idle priority in order to make a read from the random device never complete, which breaks forward secrecy, fixed by overwriting sensitive bytes early on in the function. - Jann Horn noticed that /dev/urandom reads were only checking for pending signals if need_resched() was true, a bug going back to the genesis commit, now fixed by always checking for signal_pending() and calling cond_resched(). This explains various noticeable signal delivery delays I've seen in programs over the years that do long reads from /dev/urandom. - In order to be more like other devices (e.g. /dev/zero) and to mitigate the impact of fixing the above bug, which has been around forever (users have never really needed to check the return value of read() for medium-sized reads and so perhaps many didn't), we now move signal checking to the bottom part of the loop, and do so every PAGE_SIZE-bytes. * tag 'random-5.18-rc2-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random: random: check for signals every PAGE_SIZE chunk of /dev/[u]random random: check for signal_pending() outside of need_resched() check random: do not allow user to keep crng key around on stack random: opportunistically initialize on /dev/urandom reads random: do not split fast init input in add_hwgenerator_randomness()
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/random.c74
1 files changed, 39 insertions, 35 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1d8242969751..e15063d61460 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -437,11 +437,8 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
* This shouldn't be set by functions like add_device_randomness(),
* where we can't trust the buffer passed to it is guaranteed to be
* unpredictable (so it might not have any entropy at all).
- *
- * Returns the number of bytes processed from input, which is bounded
- * by CRNG_INIT_CNT_THRESH if account is true.
*/
-static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
+static void crng_pre_init_inject(const void *input, size_t len, bool account)
{
static int crng_init_cnt = 0;
struct blake2s_state hash;
@@ -452,18 +449,15 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
spin_lock_irqsave(&base_crng.lock, flags);
if (crng_init != 0) {
spin_unlock_irqrestore(&base_crng.lock, flags);
- return 0;
+ return;
}
- if (account)
- len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
-
blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
blake2s_update(&hash, input, len);
blake2s_final(&hash, base_crng.key);
if (account) {
- crng_init_cnt += len;
+ crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
++base_crng.generation;
crng_init = 1;
@@ -474,8 +468,6 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
if (crng_init == 1)
pr_notice("fast init done\n");
-
- return len;
}
static void _get_random_bytes(void *buf, size_t nbytes)
@@ -531,7 +523,6 @@ EXPORT_SYMBOL(get_random_bytes);
static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
{
- bool large_request = nbytes > 256;
ssize_t ret = 0;
size_t len;
u32 chacha_state[CHACHA_STATE_WORDS];
@@ -540,22 +531,23 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
if (!nbytes)
return 0;
- len = min_t(size_t, 32, nbytes);
- crng_make_state(chacha_state, output, len);
-
- if (copy_to_user(buf, output, len))
- return -EFAULT;
- nbytes -= len;
- buf += len;
- ret += len;
-
- while (nbytes) {
- if (large_request && need_resched()) {
- if (signal_pending(current))
- break;
- schedule();
- }
+ /*
+ * Immediately overwrite the ChaCha key at index 4 with random
+ * bytes, in case userspace causes copy_to_user() below to sleep
+ * forever, so that we still retain forward secrecy in that case.
+ */
+ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+ /*
+ * However, if we're doing a read of len <= 32, we don't need to
+ * use chacha_state after, so we can simply return those bytes to
+ * the user directly.
+ */
+ if (nbytes <= CHACHA_KEY_SIZE) {
+ ret = copy_to_user(buf, &chacha_state[4], nbytes) ? -EFAULT : nbytes;
+ goto out_zero_chacha;
+ }
+ do {
chacha20_block(chacha_state, output);
if (unlikely(chacha_state[12] == 0))
++chacha_state[13];
@@ -569,10 +561,18 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
nbytes -= len;
buf += len;
ret += len;
- }
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
+ if (!(ret % PAGE_SIZE) && nbytes) {
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
+ } while (nbytes);
+
memzero_explicit(output, sizeof(output));
+out_zero_chacha:
+ memzero_explicit(chacha_state, sizeof(chacha_state));
return ret;
}
@@ -1141,12 +1141,9 @@ void add_hwgenerator_randomness(const void *buffer, size_t count,
size_t entropy)
{
if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) {
- size_t ret = crng_pre_init_inject(buffer, count, true);
- mix_pool_bytes(buffer, ret);
- count -= ret;
- buffer += ret;
- if (!count || crng_init == 0)
- return;
+ crng_pre_init_inject(buffer, count, true);
+ mix_pool_bytes(buffer, count);
+ return;
}
/*
@@ -1545,6 +1542,13 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
{
static int maxwarn = 10;
+ /*
+ * Opportunistically attempt to initialize the RNG on platforms that
+ * have fast cycle counters, but don't (for now) require it to succeed.
+ */
+ if (!crng_ready())
+ try_to_generate_entropy();
+
if (!crng_ready() && maxwarn > 0) {
maxwarn--;
if (__ratelimit(&urandom_warning))