aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/crypto/ghash-ce-glue.c
diff options
context:
space:
mode:
authorArd Biesheuvel2019-07-02 21:41:28 +0200
committerHerbert Xu2019-07-26 14:56:04 +1000
commitfe3b99b64909e8994f2606120e4703c9a1e8c080 (patch)
treed87362a60295f6ff7e647979c2ad57875caf892c /arch/arm64/crypto/ghash-ce-glue.c
parent363a90c2d517e69776dcf71cc3d6fcaee9fef868 (diff)
crypto: arm64/ghash - switch to AES library
The GHASH code uses the generic AES key expansion routines, and calls directly into the scalar table based AES cipher for arm64 from the fallback path, and since this implementation is known to be non-time invariant, doing so from a time invariant SIMD cipher is a bit nasty. So let's switch to the AES library - this makes the code more robust, and drops the dependency on the generic AES cipher, allowing us to omit it entirely in the future. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/arm64/crypto/ghash-ce-glue.c')
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c30
1 files changed, 10 insertions, 20 deletions
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 16c5da9be9fb..70b1469783f9 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -70,8 +70,6 @@ asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
u32 const rk[], int rounds);
-asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
-
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
@@ -309,14 +307,13 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
u8 key[GHASH_BLOCK_SIZE];
int ret;
- ret = crypto_aes_expand_key(&ctx->aes_key, inkey, keylen);
+ ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
if (ret) {
tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
- __aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){},
- num_rounds(&ctx->aes_key));
+ aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
}
@@ -467,7 +464,7 @@ static int gcm_encrypt(struct aead_request *req)
rk = ctx->aes_key.key_enc;
} while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else {
- __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
+ aes_encrypt(&ctx->aes_key, tag, iv);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
@@ -478,8 +475,7 @@ static int gcm_encrypt(struct aead_request *req)
int remaining = blocks;
do {
- __aes_arm64_encrypt(ctx->aes_key.key_enc,
- ks, iv, nrounds);
+ aes_encrypt(&ctx->aes_key, ks, iv);
crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE);
@@ -495,13 +491,10 @@ static int gcm_encrypt(struct aead_request *req)
walk.nbytes % (2 * AES_BLOCK_SIZE));
}
if (walk.nbytes) {
- __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
- nrounds);
+ aes_encrypt(&ctx->aes_key, ks, iv);
if (walk.nbytes > AES_BLOCK_SIZE) {
crypto_inc(iv, AES_BLOCK_SIZE);
- __aes_arm64_encrypt(ctx->aes_key.key_enc,
- ks + AES_BLOCK_SIZE, iv,
- nrounds);
+ aes_encrypt(&ctx->aes_key, ks + AES_BLOCK_SIZE, iv);
}
}
}
@@ -605,7 +598,7 @@ static int gcm_decrypt(struct aead_request *req)
rk = ctx->aes_key.key_enc;
} while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else {
- __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
+ aes_encrypt(&ctx->aes_key, tag, iv);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
@@ -618,8 +611,7 @@ static int gcm_decrypt(struct aead_request *req)
pmull_ghash_update_p64);
do {
- __aes_arm64_encrypt(ctx->aes_key.key_enc,
- buf, iv, nrounds);
+ aes_encrypt(&ctx->aes_key, buf, iv);
crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE);
@@ -637,11 +629,9 @@ static int gcm_decrypt(struct aead_request *req)
memcpy(iv2, iv, AES_BLOCK_SIZE);
crypto_inc(iv2, AES_BLOCK_SIZE);
- __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2,
- iv2, nrounds);
+ aes_encrypt(&ctx->aes_key, iv2, iv2);
}
- __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
- nrounds);
+ aes_encrypt(&ctx->aes_key, iv, iv);
}
}