aboutsummaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds2013-11-14 07:55:21 +0900
committerLinus Torvalds2013-11-14 07:55:21 +0900
commit8ceafbfa91ffbdbb2afaea5c24ccb519ffb8b587 (patch)
tree98c9ea93362536f1ddd73175b13b7847583350df /drivers/crypto
parent42a2d923cc349583ebf6fdd52a7d35e1c2f7e6bd (diff)
parent26ba47b18318abe7dadbe9294a611c0e932651d8 (diff)
Merge branch 'for-linus-dma-masks' of git://git.linaro.org/people/rmk/linux-arm
Pull DMA mask updates from Russell King: "This series cleans up the handling of DMA masks in a lot of drivers, fixing some bugs as we go. Some of the more serious errors include: - drivers which only set their coherent DMA mask if the attempt to set the streaming mask fails. - drivers which test for a NULL dma mask pointer, and then set the dma mask pointer to a location in their module .data section - which will cause problems if the module is reloaded. To counter these, I have introduced two helper functions: - dma_set_mask_and_coherent() takes care of setting both the streaming and coherent masks at the same time, with the correct error handling as specified by the API. - dma_coerce_mask_and_coherent() which resolves the problem of drivers forcefully setting DMA masks. This is more a marker for future work to further clean these locations up - the code which creates the devices really should be initialising these, but to fix that in one go along with this change could potentially be very disruptive. The last thing this series does is prise away some of Linux's addition to "DMA addresses are physical addresses and RAM always starts at zero". We have ARM LPAE systems where all system memory is above 4GB physical, hence having DMA masks interpreted by (eg) the block layers as describing physical addresses in the range 0..DMAMASK fails on these platforms. Santosh Shilimkar addresses this in this series; the patches were copied to the appropriate people multiple times but were ignored. Fixing this also gets rid of some ARM weirdness in the setup of the max*pfn variables, and brings ARM into line with every other Linux architecture as far as those go" * 'for-linus-dma-masks' of git://git.linaro.org/people/rmk/linux-arm: (52 commits) ARM: 7805/1: mm: change max*pfn to include the physical offset of memory ARM: 7797/1: mmc: Use dma_max_pfn(dev) helper for bounce_limit calculations ARM: 7796/1: scsi: Use dma_max_pfn(dev) helper for bounce_limit calculations ARM: 7795/1: mm: dma-mapping: Add dma_max_pfn(dev) helper function ARM: 7794/1: block: Rename parameter dma_mask to max_addr for blk_queue_bounce_limit() ARM: DMA-API: better handing of DMA masks for coherent allocations ARM: 7857/1: dma: imx-sdma: setup dma mask DMA-API: firmware/google/gsmi.c: avoid direct access to DMA masks DMA-API: dcdbas: update DMA mask handing DMA-API: dma: edma.c: no need to explicitly initialize DMA masks DMA-API: usb: musb: use platform_device_register_full() to avoid directly messing with dma masks DMA-API: crypto: remove last references to 'static struct device *dev' DMA-API: crypto: fix ixp4xx crypto platform device support DMA-API: others: use dma_set_coherent_mask() DMA-API: staging: use dma_set_coherent_mask() DMA-API: usb: use new dma_coerce_mask_and_coherent() DMA-API: usb: use dma_set_coherent_mask() DMA-API: parport: parport_pc.c: use dma_coerce_mask_and_coherent() DMA-API: net: octeon: use dma_coerce_mask_and_coherent() DMA-API: net: nxp/lpc_eth: use dma_coerce_mask_and_coherent() ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/ixp4xx_crypto.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 21180d6cad6e..214357e12dc0 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -218,23 +218,9 @@ static dma_addr_t crypt_phys;
static int support_aes = 1;
-static void dev_release(struct device *dev)
-{
- return;
-}
-
#define DRIVER_NAME "ixp4xx_crypto"
-static struct platform_device pseudo_dev = {
- .name = DRIVER_NAME,
- .id = 0,
- .num_resources = 0,
- .dev = {
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .release = dev_release,
- }
-};
-static struct device *dev = &pseudo_dev.dev;
+static struct platform_device *pdev;
static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
{
@@ -263,6 +249,7 @@ static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
static int setup_crypt_desc(void)
{
+ struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
@@ -363,6 +350,7 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
static void one_packet(dma_addr_t phys)
{
+ struct device *dev = &pdev->dev;
struct crypt_ctl *crypt;
struct ixp_ctx *ctx;
int failed;
@@ -432,7 +420,7 @@ static void crypto_done_action(unsigned long arg)
tasklet_schedule(&crypto_done_tasklet);
}
-static int init_ixp_crypto(void)
+static int init_ixp_crypto(struct device *dev)
{
int ret = -ENODEV;
u32 msg[2] = { 0, 0 };
@@ -519,7 +507,7 @@ err:
return ret;
}
-static void release_ixp_crypto(void)
+static void release_ixp_crypto(struct device *dev)
{
qmgr_disable_irq(RECV_QID);
tasklet_kill(&crypto_done_tasklet);
@@ -886,6 +874,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
struct buffer_desc src_hook;
+ struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1010,6 +999,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
unsigned int cryptlen;
struct buffer_desc *buf, src_hook;
struct aead_ctx *req_ctx = aead_request_ctx(req);
+ struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1418,20 +1408,30 @@ static struct ixp_alg ixp4xx_algos[] = {
} };
#define IXP_POSTFIX "-ixp4xx"
+
+static const struct platform_device_info ixp_dev_info __initdata = {
+ .name = DRIVER_NAME,
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
- int i,err ;
+ int i, err ;
- if (platform_device_register(&pseudo_dev))
- return -ENODEV;
+ pdev = platform_device_register_full(&ixp_dev_info);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ dev = &pdev->dev;
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
- err = init_ixp_crypto();
+ err = init_ixp_crypto(&pdev->dev);
if (err) {
- platform_device_unregister(&pseudo_dev);
+ platform_device_unregister(pdev);
return err;
}
for (i=0; i< num; i++) {
@@ -1495,8 +1495,8 @@ static void __exit ixp_module_exit(void)
if (ixp4xx_algos[i].registered)
crypto_unregister_alg(&ixp4xx_algos[i].crypto);
}
- release_ixp_crypto();
- platform_device_unregister(&pseudo_dev);
+ release_ixp_crypto(&pdev->dev);
+ platform_device_unregister(pdev);
}
module_init(ixp_module_init);