diff options
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/Kconfig | 2 | ||||
-rw-r--r-- | drivers/char/random.c | 84 |
2 files changed, 51 insertions, 35 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index fea084e0909b..d4665fe9ccd2 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -474,7 +474,7 @@ endmenu config RANDOM_TRUST_CPU bool "Trust the CPU manufacturer to initialize Linux's CRNG" - depends on X86 || S390 || PPC + depends on ARCH_RANDOM default n help Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or diff --git a/drivers/char/random.c b/drivers/char/random.c index c7f9584de2c8..0d10e31fd342 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -781,27 +781,55 @@ static int __init parse_trust_cpu(char *arg) } early_param("random.trust_cpu", parse_trust_cpu); -static void crng_initialize(struct crng_state *crng) +static bool crng_init_try_arch(struct crng_state *crng) { int i; - int arch_init = 1; + bool arch_init = true; unsigned long rv; - memcpy(&crng->state[0], "expand 32-byte k", 16); - if (crng == &primary_crng) - _extract_entropy(&input_pool, &crng->state[4], - sizeof(__u32) * 12, 0); - else - _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); for (i = 4; i < 16; i++) { if (!arch_get_random_seed_long(&rv) && !arch_get_random_long(&rv)) { rv = random_get_entropy(); - arch_init = 0; + arch_init = false; + } + crng->state[i] ^= rv; + } + + return arch_init; +} + +static bool __init crng_init_try_arch_early(struct crng_state *crng) +{ + int i; + bool arch_init = true; + unsigned long rv; + + for (i = 4; i < 16; i++) { + if (!arch_get_random_seed_long_early(&rv) && + !arch_get_random_long_early(&rv)) { + rv = random_get_entropy(); + arch_init = false; } crng->state[i] ^= rv; } - if (trust_cpu && arch_init && crng == &primary_crng) { + + return arch_init; +} + +static void __maybe_unused crng_initialize_secondary(struct crng_state *crng) +{ + memcpy(&crng->state[0], "expand 32-byte k", 16); + _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); + crng_init_try_arch(crng); + crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; +} + +static void __init crng_initialize_primary(struct crng_state *crng) +{ + memcpy(&crng->state[0], "expand 32-byte k", 16); + _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); + if (crng_init_try_arch_early(crng) && trust_cpu) { invalidate_batched_entropy(); numa_crng_init(); crng_init = 2; @@ -822,7 +850,7 @@ static void do_numa_crng_init(struct work_struct *work) crng = kmalloc_node(sizeof(struct crng_state), GFP_KERNEL | __GFP_NOFAIL, i); spin_lock_init(&crng->lock); - crng_initialize(crng); + crng_initialize_secondary(crng); pool[i] = crng; } mb(); @@ -1142,14 +1170,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * We take into account the first, second and third-order deltas * in order to make our estimate. */ - delta = sample.jiffies - state->last_time; - state->last_time = sample.jiffies; + delta = sample.jiffies - READ_ONCE(state->last_time); + WRITE_ONCE(state->last_time, sample.jiffies); - delta2 = delta - state->last_delta; - state->last_delta = delta; + delta2 = delta - READ_ONCE(state->last_delta); + WRITE_ONCE(state->last_delta, delta); - delta3 = delta2 - state->last_delta2; - state->last_delta2 = delta2; + delta3 = delta2 - READ_ONCE(state->last_delta2); + WRITE_ONCE(state->last_delta2, delta2); if (delta < 0) delta = -delta; @@ -1771,7 +1799,7 @@ static void __init init_std_data(struct entropy_store *r) int __init rand_initialize(void) { init_std_data(&input_pool); - crng_initialize(&primary_crng); + crng_initialize_primary(&primary_crng); crng_global_init_time = jiffies; if (ratelimit_disable) { urandom_warning.interval = 0; @@ -2149,11 +2177,11 @@ struct batched_entropy { /* * Get a random word for internal kernel use only. The quality of the random - * number is either as good as RDRAND or as good as /dev/urandom, with the - * goal of being quite fast and not depleting entropy. In order to ensure + * number is good as /dev/urandom, but there is no backtrack protection, with + * the goal of being quite fast and not depleting entropy. In order to ensure * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once - * at any point prior. + * wait_for_random_bytes() should be called and return 0 at least once at any + * point prior. */ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), @@ -2166,15 +2194,6 @@ u64 get_random_u64(void) struct batched_entropy *batch; static void *previous; -#if BITS_PER_LONG == 64 - if (arch_get_random_long((unsigned long *)&ret)) - return ret; -#else - if (arch_get_random_long((unsigned long *)&ret) && - arch_get_random_long((unsigned long *)&ret + 1)) - return ret; -#endif - warn_unseeded_randomness(&previous); batch = raw_cpu_ptr(&batched_entropy_u64); @@ -2199,9 +2218,6 @@ u32 get_random_u32(void) struct batched_entropy *batch; static void *previous; - if (arch_get_random_int(&ret)) - return ret; - warn_unseeded_randomness(&previous); batch = raw_cpu_ptr(&batched_entropy_u32); |