diff options
author | Horia Geantă | 2018-03-28 15:39:17 +0300 |
---|---|---|
committer | Herbert Xu | 2018-04-21 00:58:24 +0800 |
commit | a38acd236cac914aafffd80af79b9556fc2c3934 (patch) | |
tree | 591dea48cbd3c4bc6abf9451d67bd40ed9e082aa /drivers/crypto/caam | |
parent | eea0d3ea7546961f69f55b26714ac8fd71c7c020 (diff) |
crypto: caam - fix DMA mapping dir for generated IV
In case of GIVCIPHER, IV is generated by the device.
Fix the DMA mapping direction.
Cc: <stable@vger.kernel.org> # 3.19+
Fixes: 7222d1a34103 ("crypto: caam - add support for givencrypt cbc(aes) and rfc3686(ctr(aes))")
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 29 |
1 files changed, 17 insertions, 12 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 7207a535942d..3e18b9266027 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -769,6 +769,7 @@ struct aead_edesc { * @src_nents: number of segments in input s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist * @iv_dma: dma address of iv for checking continuity and link table + * @iv_dir: DMA mapping direction for IV * @sec4_sg_bytes: length of dma mapped sec4_sg space * @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg: pointer to h/w link table @@ -778,6 +779,7 @@ struct ablkcipher_edesc { int src_nents; int dst_nents; dma_addr_t iv_dma; + enum dma_data_direction iv_dir; int sec4_sg_bytes; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; @@ -787,7 +789,8 @@ struct ablkcipher_edesc { static void caam_unmap(struct device *dev, struct scatterlist *src, struct scatterlist *dst, int src_nents, int dst_nents, - dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, + dma_addr_t iv_dma, int ivsize, + enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma, int sec4_sg_bytes) { if (dst != src) { @@ -799,7 +802,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, } if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + dma_unmap_single(dev, iv_dma, ivsize, iv_dir); if (sec4_sg_bytes) dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, DMA_TO_DEVICE); @@ -810,7 +813,7 @@ static void aead_unmap(struct device *dev, struct aead_request *req) { caam_unmap(dev, req->src, req->dst, - edesc->src_nents, edesc->dst_nents, 0, 0, + edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } @@ -823,7 +826,7 @@ static void ablkcipher_unmap(struct device *dev, caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, + edesc->iv_dma, ivsize, edesc->iv_dir, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } @@ -1287,7 +1290,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, GFP_DMA | flags); if (!edesc) { caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1550,7 +1553,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request if (dma_mapping_error(jrdev, iv_dma)) { dev_err(jrdev, "unable to map IV\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1572,7 +1575,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1581,6 +1584,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; + edesc->iv_dir = DMA_TO_DEVICE; if (!in_contig) { dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); @@ -1598,7 +1602,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); kfree(edesc); return ERR_PTR(-ENOMEM); } @@ -1756,11 +1760,11 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( * Check if iv can be contiguous with source and destination. * If so, include it. If not, create scatterlist. */ - iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); + iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_FROM_DEVICE); if (dma_mapping_error(jrdev, iv_dma)) { dev_err(jrdev, "unable to map IV\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1781,7 +1785,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1790,6 +1794,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; + edesc->iv_dir = DMA_FROM_DEVICE; if (mapped_src_nents > 1) sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg, @@ -1807,7 +1812,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0); kfree(edesc); return ERR_PTR(-ENOMEM); } |