diff options
author | Linus Torvalds | 2024-09-16 06:28:28 +0200 |
---|---|---|
committer | Linus Torvalds | 2024-09-16 06:28:28 +0200 |
commit | 85ffc6e4ed3712f8b3fedb3fbe42afae644a699c (patch) | |
tree | 294e5220ea434ce796e0e02a4da89edf034e072d /arch/arm | |
parent | 9410645520e9b820069761f3450ef6661418e279 (diff) | |
parent | ce212d2afca47acd366a2e74c76fe82c31f785ab (diff) |
Merge tag 'v6.12-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu"
"API:
- Make self-test asynchronous
Algorithms:
- Remove MPI functions added for SM3
- Add allocation error checks to remaining MPI functions (introduced
for SM3)
- Set default Jitter RNG OSR to 3
Drivers:
- Add hwrng driver for Rockchip RK3568 SoC
- Allow disabling SR-IOV VFs through sysfs in qat
- Fix device reset bugs in hisilicon
- Fix authenc key parsing by using generic helper in octeontx*
Others:
- Fix xor benchmarking on parisc"
* tag 'v6.12-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (96 commits)
crypto: n2 - Set err to EINVAL if snprintf fails for hmac
crypto: camm/qi - Use ERR_CAST() to return error-valued pointer
crypto: mips/crc32 - Clean up useless assignment operations
crypto: qcom-rng - rename *_of_data to *_match_data
crypto: qcom-rng - fix support for ACPI-based systems
dt-bindings: crypto: qcom,prng: document support for SA8255p
crypto: aegis128 - Fix indentation issue in crypto_aegis128_process_crypt()
crypto: octeontx* - Select CRYPTO_AUTHENC
crypto: testmgr - Hide ENOENT errors
crypto: qat - Remove trailing space after \n newline
crypto: hisilicon/sec - Remove trailing space after \n newline
crypto: algboss - Pass instance creation error up
crypto: api - Fix generic algorithm self-test races
crypto: hisilicon/qm - inject error before stopping queue
crypto: hisilicon/hpre - mask cluster timeout error
crypto: hisilicon/qm - reset device before enabling it
crypto: hisilicon/trng - modifying the order of header files
crypto: hisilicon - add a lock for the qp send operation
crypto: hisilicon - fix missed error branch
crypto: ccp - do not request interrupt on cmd completion when irqs disabled
...
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/crypto/Kconfig | 14 | ||||
-rw-r--r-- | arch/arm/crypto/aes-ce-glue.c | 2 | ||||
-rw-r--r-- | arch/arm/crypto/aes-cipher-glue.c | 5 | ||||
-rw-r--r-- | arch/arm/crypto/aes-cipher.h | 13 | ||||
-rw-r--r-- | arch/arm/crypto/aes-neonbs-glue.c | 133 |
5 files changed, 69 insertions, 98 deletions
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 847b7a003356..5ff49a5e9afc 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -166,10 +166,9 @@ config CRYPTO_AES_ARM config CRYPTO_AES_ARM_BS tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)" depends on KERNEL_MODE_NEON + select CRYPTO_AES_ARM select CRYPTO_SKCIPHER select CRYPTO_LIB_AES - select CRYPTO_AES - select CRYPTO_CBC select CRYPTO_SIMD help Length-preserving ciphers: AES cipher algorithms (FIPS-197) @@ -183,8 +182,15 @@ config CRYPTO_AES_ARM_BS Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode and for XTS mode encryption, CBC and XTS mode decryption speedup is around 25%. (CBC encryption speed is not affected by this driver.) - This implementation does not rely on any lookup tables so it is - believed to be invulnerable to cache timing attacks. + + The bit sliced AES code does not use lookup tables, so it is believed + to be invulnerable to cache timing attacks. However, since the bit + sliced AES code cannot process single blocks efficiently, in certain + cases table-based code with some countermeasures against cache timing + attacks will still be used as a fallback method; specifically CBC + encryption (not CBC decryption), the encryption of XTS tweaks, XTS + ciphertext stealing when the message isn't a multiple of 16 bytes, and + CTR when invoked in a context in which NEON instructions are unusable. config CRYPTO_AES_ARM_CE tristate "Ciphers: AES, modes: ECB/CBC/CTS/CTR/XTS (ARMv8 Crypto Extensions)" diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index b668c97663ec..f5b66f4cf45d 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c @@ -711,7 +711,7 @@ static int __init aes_init(void) algname = aes_algs[i].base.cra_name + 2; drvname = aes_algs[i].base.cra_driver_name + 2; basename = aes_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; diff --git a/arch/arm/crypto/aes-cipher-glue.c b/arch/arm/crypto/aes-cipher-glue.c index 6dfaef2d8f91..29efb7833960 100644 --- a/arch/arm/crypto/aes-cipher-glue.c +++ b/arch/arm/crypto/aes-cipher-glue.c @@ -9,9 +9,10 @@ #include <crypto/aes.h> #include <crypto/algapi.h> #include <linux/module.h> +#include "aes-cipher.h" -asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out); -asmlinkage void __aes_arm_decrypt(u32 *rk, int rounds, const u8 *in, u8 *out); +EXPORT_SYMBOL_GPL(__aes_arm_encrypt); +EXPORT_SYMBOL_GPL(__aes_arm_decrypt); static void aes_arm_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { diff --git a/arch/arm/crypto/aes-cipher.h b/arch/arm/crypto/aes-cipher.h new file mode 100644 index 000000000000..d5db2b87eb69 --- /dev/null +++ b/arch/arm/crypto/aes-cipher.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef ARM_CRYPTO_AES_CIPHER_H +#define ARM_CRYPTO_AES_CIPHER_H + +#include <linux/linkage.h> +#include <linux/types.h> + +asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds, + const u8 *in, u8 *out); +asmlinkage void __aes_arm_decrypt(const u32 rk[], int rounds, + const u8 *in, u8 *out); + +#endif /* ARM_CRYPTO_AES_CIPHER_H */ diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index 201eb35dde37..f6be80b5938b 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -9,24 +9,22 @@ #include <asm/simd.h> #include <crypto/aes.h> #include <crypto/ctr.h> -#include <crypto/internal/cipher.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <crypto/xts.h> #include <linux/module.h> +#include "aes-cipher.h" MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_DESCRIPTION("Bit sliced AES using NEON instructions"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("ecb(aes)"); -MODULE_ALIAS_CRYPTO("cbc(aes)-all"); +MODULE_ALIAS_CRYPTO("cbc(aes)"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); - asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], @@ -52,13 +50,13 @@ struct aesbs_ctx { struct aesbs_cbc_ctx { struct aesbs_ctx key; - struct crypto_skcipher *enc_tfm; + struct crypto_aes_ctx fallback; }; struct aesbs_xts_ctx { struct aesbs_ctx key; - struct crypto_cipher *cts_tfm; - struct crypto_cipher *tweak_tfm; + struct crypto_aes_ctx fallback; + struct crypto_aes_ctx tweak_key; }; struct aesbs_ctr_ctx { @@ -129,37 +127,49 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_aes_ctx rk; int err; - err = aes_expandkey(&rk, in_key, key_len); + err = aes_expandkey(&ctx->fallback, in_key, key_len); if (err) return err; ctx->key.rounds = 6 + key_len / 4; kernel_neon_begin(); - aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds); + aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds); kernel_neon_end(); - memzero_explicit(&rk, sizeof(rk)); - return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len); + return 0; } static int cbc_encrypt(struct skcipher_request *req) { - struct skcipher_request *subreq = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + const struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; - skcipher_request_set_tfm(subreq, ctx->enc_tfm); - skcipher_request_set_callback(subreq, - skcipher_request_flags(req), - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); + err = skcipher_walk_virt(&walk, req, false); - return crypto_skcipher_encrypt(subreq); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 *prev = walk.iv; + + do { + crypto_xor_cpy(dst, src, prev, AES_BLOCK_SIZE); + __aes_arm_encrypt(ctx->fallback.key_enc, + ctx->key.rounds, dst, dst); + prev = dst; + src += AES_BLOCK_SIZE; + dst += AES_BLOCK_SIZE; + nbytes -= AES_BLOCK_SIZE; + } while (nbytes >= AES_BLOCK_SIZE); + memcpy(walk.iv, prev, AES_BLOCK_SIZE); + err = skcipher_walk_done(&walk, nbytes); + } + return err; } static int cbc_decrypt(struct skcipher_request *req) @@ -190,30 +200,6 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } -static int cbc_init(struct crypto_skcipher *tfm) -{ - struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - unsigned int reqsize; - - ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->enc_tfm)) - return PTR_ERR(ctx->enc_tfm); - - reqsize = sizeof(struct skcipher_request); - reqsize += crypto_skcipher_reqsize(ctx->enc_tfm); - crypto_skcipher_set_reqsize(tfm, reqsize); - - return 0; -} - -static void cbc_exit(struct crypto_skcipher *tfm) -{ - struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_skcipher(ctx->enc_tfm); -} - static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { @@ -271,16 +257,8 @@ static int ctr_encrypt(struct skcipher_request *req) static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); - unsigned long flags; - - /* - * Temporarily disable interrupts to avoid races where - * cachelines are evicted when the CPU is interrupted - * to do something else. - */ - local_irq_save(flags); - aes_encrypt(&ctx->fallback, dst, src); - local_irq_restore(flags); + + __aes_arm_encrypt(ctx->fallback.key_enc, ctx->key.rounds, src, dst); } static int ctr_encrypt_sync(struct skcipher_request *req) @@ -302,45 +280,23 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, return err; key_len /= 2; - err = crypto_cipher_setkey(ctx->cts_tfm, in_key, key_len); + err = aes_expandkey(&ctx->fallback, in_key, key_len); if (err) return err; - err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len); + err = aes_expandkey(&ctx->tweak_key, in_key + key_len, key_len); if (err) return err; return aesbs_setkey(tfm, in_key, key_len); } -static int xts_init(struct crypto_skcipher *tfm) -{ - struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(ctx->cts_tfm)) - return PTR_ERR(ctx->cts_tfm); - - ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(ctx->tweak_tfm)) - crypto_free_cipher(ctx->cts_tfm); - - return PTR_ERR_OR_ZERO(ctx->tweak_tfm); -} - -static void xts_exit(struct crypto_skcipher *tfm) -{ - struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_cipher(ctx->tweak_tfm); - crypto_free_cipher(ctx->cts_tfm); -} - static int __xts_crypt(struct skcipher_request *req, bool encrypt, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[], int)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + const int rounds = ctx->key.rounds; int tail = req->cryptlen % AES_BLOCK_SIZE; struct skcipher_request subreq; u8 buf[2 * AES_BLOCK_SIZE]; @@ -364,7 +320,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, if (err) return err; - crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); + __aes_arm_encrypt(ctx->tweak_key.key_enc, rounds, walk.iv, walk.iv); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -378,7 +334,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, kernel_neon_begin(); fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, - ctx->key.rounds, blocks, walk.iv, reorder_last_tweak); + rounds, blocks, walk.iv, reorder_last_tweak); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); @@ -396,9 +352,9 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, crypto_xor(buf, req->iv, AES_BLOCK_SIZE); if (encrypt) - crypto_cipher_encrypt_one(ctx->cts_tfm, buf, buf); + __aes_arm_encrypt(ctx->fallback.key_enc, rounds, buf, buf); else - crypto_cipher_decrypt_one(ctx->cts_tfm, buf, buf); + __aes_arm_decrypt(ctx->fallback.key_dec, rounds, buf, buf); crypto_xor(buf, req->iv, AES_BLOCK_SIZE); @@ -439,8 +395,7 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL | - CRYPTO_ALG_NEED_FALLBACK, + .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -449,8 +404,6 @@ static struct skcipher_alg aes_algs[] = { { .setkey = aesbs_cbc_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - .init = cbc_init, - .exit = cbc_exit, }, { .base.cra_name = "__ctr(aes)", .base.cra_driver_name = "__ctr-aes-neonbs", @@ -500,8 +453,6 @@ static struct skcipher_alg aes_algs[] = { { .setkey = aesbs_xts_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, - .init = xts_init, - .exit = xts_exit, } }; static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; @@ -540,7 +491,7 @@ static int __init aes_init(void) algname = aes_algs[i].base.cra_name + 2; drvname = aes_algs[i].base.cra_driver_name + 2; basename = aes_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; |