aboutsummaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds2016-08-01 14:28:42 -0400
committerLinus Torvalds2016-08-01 14:28:42 -0400
commitf38d2e5313f0af9d9b66c02a5d49c71deb994b85 (patch)
treeeb4749c402e929b74572ded777fbe5bede3b59e1 /drivers/crypto
parentaeb35d6b74174ed08daab84e232b456bbd89d1d9 (diff)
parent8cf740ae85df69fb6376f31b42eb2ac7a138721f (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: "This fixes a number of regressions in the marvell cesa driver caused by the chaining work, and a regression in lib/mpi that leads to a GFP_KERNEL allocation with preemption disabled" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: marvell - Don't copy IV vectors from the _process op for ciphers lib/mpi: Fix SG miter leak crypto: marvell - Update cache with input sg only when it is unmapped crypto: marvell - Don't chain at DMA level when backlog is disabled crypto: marvell - Fix memory leaks in TDMA chain for cipher requests
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/marvell/cesa.c7
-rw-r--r--drivers/crypto/marvell/cipher.c25
-rw-r--r--drivers/crypto/marvell/hash.c12
3 files changed, 17 insertions, 27 deletions
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index e373cc6557c6..d64af8625d7e 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -180,10 +180,11 @@ int mv_cesa_queue_req(struct crypto_async_request *req,
struct mv_cesa_engine *engine = creq->engine;
spin_lock_bh(&engine->lock);
- if (mv_cesa_req_get_type(creq) == CESA_DMA_REQ)
- mv_cesa_tdma_chain(engine, creq);
-
ret = crypto_enqueue_request(&engine->queue, req);
+ if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
+ (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ mv_cesa_tdma_chain(engine, creq);
spin_unlock_bh(&engine->lock);
if (ret != -EINPROGRESS)
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 48df03a06066..d19dc9614e6e 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -139,20 +139,11 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
struct mv_cesa_req *basereq = &creq->base;
- unsigned int ivsize;
- int ret;
if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
return mv_cesa_ablkcipher_std_process(ablkreq, status);
- ret = mv_cesa_dma_process(basereq, status);
- if (ret)
- return ret;
-
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
- memcpy_fromio(ablkreq->info, basereq->chain.last->data, ivsize);
-
- return 0;
+ return mv_cesa_dma_process(basereq, status);
}
static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
@@ -320,7 +311,6 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
GFP_KERNEL : GFP_ATOMIC;
struct mv_cesa_req *basereq = &creq->base;
struct mv_cesa_ablkcipher_dma_iter iter;
- struct mv_cesa_tdma_chain chain;
bool skip_ctx = false;
int ret;
unsigned int ivsize;
@@ -347,13 +337,13 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
return -ENOMEM;
}
- mv_cesa_tdma_desc_iter_init(&chain);
+ mv_cesa_tdma_desc_iter_init(&basereq->chain);
mv_cesa_ablkcipher_req_iter_init(&iter, req);
do {
struct mv_cesa_op_ctx *op;
- op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags);
+ op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
goto err_free_tdma;
@@ -363,18 +353,18 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
mv_cesa_set_crypt_op_len(op, iter.base.op_len);
/* Add input transfers */
- ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
+ ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
&iter.src, flags);
if (ret)
goto err_free_tdma;
/* Add dummy desc to launch the crypto operation */
- ret = mv_cesa_dma_add_dummy_launch(&chain, flags);
+ ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
if (ret)
goto err_free_tdma;
/* Add output transfers */
- ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
+ ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
&iter.dst, flags);
if (ret)
goto err_free_tdma;
@@ -383,13 +373,12 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
/* Add output data for IV */
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- ret = mv_cesa_dma_add_iv_op(&chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
+ ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
- basereq->chain = chain;
basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
return 0;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index c35912b4fffb..82e0f4e6eb1c 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -315,12 +315,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
for (i = 0; i < digsize / 4; i++)
creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
- if (creq->cache_ptr)
- sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
- creq->cache,
- creq->cache_ptr,
- ahashreq->nbytes - creq->cache_ptr);
-
if (creq->last_req) {
/*
* Hardware's MD5 digest is in little endian format, but
@@ -365,6 +359,12 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
mv_cesa_ahash_last_cleanup(ahashreq);
mv_cesa_ahash_cleanup(ahashreq);
+
+ if (creq->cache_ptr)
+ sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
+ creq->cache,
+ creq->cache_ptr,
+ ahashreq->nbytes - creq->cache_ptr);
}
static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {