aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig27
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/cn10k-rng.c63
-rw-r--r--drivers/char/hw_random/histb-rng.c (renamed from drivers/crypto/hisilicon/trng/trng-stb.c)83
-rw-r--r--drivers/char/hw_random/imx-rngc.c53
-rw-r--r--drivers/char/hw_random/st-rng.c21
-rw-r--r--drivers/char/hw_random/virtio-rng.c10
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/atmel-ecc.c2
-rw-r--r--drivers/crypto/atmel-sha204a.c2
-rw-r--r--drivers/crypto/caam/Kconfig9
-rw-r--r--drivers/crypto/caam/caamrng.c48
-rw-r--r--drivers/crypto/caam/ctrl.c274
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/regs.h14
-rw-r--r--drivers/crypto/ccp/platform-access.c5
-rw-r--r--drivers/crypto/ccp/sp-pci.c43
-rw-r--r--drivers/crypto/hisilicon/Kconfig7
-rw-r--r--drivers/crypto/hisilicon/Makefile2
-rw-r--r--drivers/crypto/hisilicon/trng/Makefile3
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c229
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c45
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c12
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c12
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c12
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_engine.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.c24
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c69
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.h29
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c60
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_asym_algs.c14
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c8
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c12
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c12
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h15
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c34
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h33
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h7
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c41
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c247
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c8
-rw-r--r--drivers/crypto/n2_core.c9
-rw-r--r--drivers/crypto/nx/Makefile2
-rw-r--r--drivers/crypto/nx/nx.h4
-rw-r--r--drivers/crypto/sa2ul.h2
-rw-r--r--drivers/crypto/starfive/Kconfig20
-rw-r--r--drivers/crypto/starfive/Makefile4
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c258
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.h172
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c899
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c617
-rw-r--r--drivers/md/dm-crypt.c15
71 files changed, 3231 insertions, 440 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 4fdf07ae3c54..e0b3786ca51b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -335,9 +335,20 @@ config HW_RANDOM_HISI
If unsure, say Y.
+config HW_RANDOM_HISTB
+ tristate "Hisilicon STB Random Number Generator support"
+ depends on ARCH_HISI || COMPILE_TEST
+ default ARCH_HISI
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Hisilicon Hi37xx SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called histb-rng.
+
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
- depends on HW_RANDOM && ARCH_STI
+ depends on HW_RANDOM && (ARCH_STI || COMPILE_TEST)
help
This driver provides kernel-side support for the Random Number
Generator hardware found on STi series of SoCs.
@@ -400,9 +411,9 @@ config HW_RANDOM_POLARFIRE_SOC
config HW_RANDOM_MESON
tristate "Amlogic Meson Random Number Generator support"
- depends on HW_RANDOM
depends on ARCH_MESON || COMPILE_TEST
- default y
+ depends on HAS_IOMEM && OF
+ default HW_RANDOM if ARCH_MESON
help
This driver provides kernel-side support for the Random Number
Generator hardware found on Amlogic Meson SoCs.
@@ -427,9 +438,9 @@ config HW_RANDOM_CAVIUM
config HW_RANDOM_MTK
tristate "Mediatek Random Number Generator support"
- depends on HW_RANDOM
depends on ARCH_MEDIATEK || COMPILE_TEST
- default y
+ depends on HAS_IOMEM && OF
+ default HW_RANDOM if ARCH_MEDIATEK
help
This driver provides kernel-side support for the Random Number
Generator hardware found on Mediatek SoCs.
@@ -456,7 +467,8 @@ config HW_RANDOM_S390
config HW_RANDOM_EXYNOS
tristate "Samsung Exynos True Random Number Generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
- default HW_RANDOM
+ depends on HAS_IOMEM
+ default HW_RANDOM if ARCH_EXYNOS
help
This driver provides support for the True Random Number
Generator available in Exynos SoCs.
@@ -483,7 +495,8 @@ config HW_RANDOM_OPTEE
config HW_RANDOM_NPCM
tristate "NPCM Random Number Generator support"
depends on ARCH_NPCM || COMPILE_TEST
- default HW_RANDOM
+ depends on HAS_IOMEM
+ default HW_RANDOM if ARCH_NPCM
help
This driver provides support for the Random Number
Generator hardware available in Nuvoton NPCM SoCs.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 09bde4a0f971..32549a1186dc 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
+obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
index c1193f85982c..0cd7e1a8e499 100644
--- a/drivers/char/hw_random/cn10k-rng.c
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -23,14 +23,49 @@
#define RNM_PF_RANDOM 0x400
#define RNM_TRNG_RESULT 0x408
+/* Extended TRNG Read and Status Registers */
+#define RNM_PF_TRNG_DAT 0x1000
+#define RNM_PF_TRNG_RES 0x1008
+
struct cn10k_rng {
void __iomem *reg_base;
struct hwrng ops;
struct pci_dev *pdev;
+ /* Octeon CN10K-A A0/A1, CNF10K-A A0/A1 and CNF10K-B A0/B0
+ * does not support extended TRNG registers
+ */
+ bool extended_trng_regs;
};
#define PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE 0xc2000b0f
+#define PCI_SUBSYS_DEVID_CN10K_A_RNG 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_RNG 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_RNG 0xBC00
+
+static bool cn10k_is_extended_trng_regs_supported(struct pci_dev *pdev)
+{
+ /* CN10K-A A0/A1 */
+ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RNG) &&
+ (!pdev->revision || (pdev->revision & 0xff) == 0x50 ||
+ (pdev->revision & 0xff) == 0x51))
+ return false;
+
+ /* CNF10K-A A0 */
+ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_RNG) &&
+ (!pdev->revision || (pdev->revision & 0xff) == 0x60 ||
+ (pdev->revision & 0xff) == 0x61))
+ return false;
+
+ /* CNF10K-B A0/B0 */
+ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_RNG) &&
+ (!pdev->revision || (pdev->revision & 0xff) == 0x70 ||
+ (pdev->revision & 0xff) == 0x74))
+ return false;
+
+ return true;
+}
+
static unsigned long reset_rng_health_state(struct cn10k_rng *rng)
{
struct arm_smccc_res res;
@@ -63,9 +98,23 @@ static int check_rng_health(struct cn10k_rng *rng)
return 0;
}
-static void cn10k_read_trng(struct cn10k_rng *rng, u64 *value)
+/* Returns true when valid data available otherwise return false */
+static bool cn10k_read_trng(struct cn10k_rng *rng, u64 *value)
{
+ u16 retry_count = 0;
u64 upper, lower;
+ u64 status;
+
+ if (rng->extended_trng_regs) {
+ do {
+ *value = readq(rng->reg_base + RNM_PF_TRNG_DAT);
+ if (*value)
+ return true;
+ status = readq(rng->reg_base + RNM_PF_TRNG_RES);
+ if (!status && (retry_count++ > 0x1000))
+ return false;
+ } while (!status);
+ }
*value = readq(rng->reg_base + RNM_PF_RANDOM);
@@ -82,6 +131,7 @@ static void cn10k_read_trng(struct cn10k_rng *rng, u64 *value)
*value = (upper & 0xFFFFFFFF00000000) | (lower & 0xFFFFFFFF);
}
+ return true;
}
static int cn10k_rng_read(struct hwrng *hwrng, void *data,
@@ -100,7 +150,8 @@ static int cn10k_rng_read(struct hwrng *hwrng, void *data,
size = max;
while (size >= 8) {
- cn10k_read_trng(rng, &value);
+ if (!cn10k_read_trng(rng, &value))
+ goto out;
*((u64 *)pos) = value;
size -= 8;
@@ -108,7 +159,8 @@ static int cn10k_rng_read(struct hwrng *hwrng, void *data,
}
if (size > 0) {
- cn10k_read_trng(rng, &value);
+ if (!cn10k_read_trng(rng, &value))
+ goto out;
while (size > 0) {
*pos = (u8)value;
@@ -118,6 +170,7 @@ static int cn10k_rng_read(struct hwrng *hwrng, void *data,
}
}
+out:
return max - size;
}
@@ -144,9 +197,11 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!rng->ops.name)
return -ENOMEM;
- rng->ops.read = cn10k_rng_read;
+ rng->ops.read = cn10k_rng_read;
rng->ops.priv = (unsigned long)rng;
+ rng->extended_trng_regs = cn10k_is_extended_trng_regs_supported(pdev);
+
reset_rng_health_state(rng);
err = devm_hwrng_register(&pdev->dev, &rng->ops);
diff --git a/drivers/crypto/hisilicon/trng/trng-stb.c b/drivers/char/hw_random/histb-rng.c
index 29200a7d3d81..f652e1135e4b 100644
--- a/drivers/crypto/hisilicon/trng/trng-stb.c
+++ b/drivers/char/hw_random/histb-rng.c
@@ -1,31 +1,27 @@
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
- * Device driver for True RNG in HiSTB SoCs
- *
* Copyright (c) 2023 David Yang
*/
-#include <crypto/internal/rng.h>
-#include <linux/device.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
-#define HISTB_TRNG_CTRL 0x0
+#define RNG_CTRL 0x0
#define RNG_SOURCE GENMASK(1, 0)
#define DROP_ENABLE BIT(5)
#define POST_PROCESS_ENABLE BIT(7)
#define POST_PROCESS_DEPTH GENMASK(15, 8)
-#define HISTB_TRNG_NUMBER 0x4
-#define HISTB_TRNG_STAT 0x8
+#define RNG_NUMBER 0x4
+#define RNG_STAT 0x8
#define DATA_COUNT GENMASK(2, 0) /* max 4 */
-struct histb_trng_priv {
+struct histb_rng_priv {
struct hwrng rng;
void __iomem *base;
};
@@ -35,19 +31,19 @@ struct histb_trng_priv {
* depth = 1 -> ~1ms
* depth = 255 -> ~16ms
*/
-static int histb_trng_wait(void __iomem *base)
+static int histb_rng_wait(void __iomem *base)
{
u32 val;
- return readl_relaxed_poll_timeout(base + HISTB_TRNG_STAT, val,
+ return readl_relaxed_poll_timeout(base + RNG_STAT, val,
val & DATA_COUNT, 1000, 30 * 1000);
}
-static void histb_trng_init(void __iomem *base, unsigned int depth)
+static void histb_rng_init(void __iomem *base, unsigned int depth)
{
u32 val;
- val = readl_relaxed(base + HISTB_TRNG_CTRL);
+ val = readl_relaxed(base + RNG_CTRL);
val &= ~RNG_SOURCE;
val |= 2;
@@ -58,72 +54,72 @@ static void histb_trng_init(void __iomem *base, unsigned int depth)
val |= POST_PROCESS_ENABLE;
val |= DROP_ENABLE;
- writel_relaxed(val, base + HISTB_TRNG_CTRL);
+ writel_relaxed(val, base + RNG_CTRL);
}
-static int histb_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+static int histb_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
- struct histb_trng_priv *priv = container_of(rng, typeof(*priv), rng);
+ struct histb_rng_priv *priv = container_of(rng, typeof(*priv), rng);
void __iomem *base = priv->base;
for (int i = 0; i < max; i += sizeof(u32)) {
- if (!(readl_relaxed(base + HISTB_TRNG_STAT) & DATA_COUNT)) {
+ if (!(readl_relaxed(base + RNG_STAT) & DATA_COUNT)) {
if (!wait)
return i;
- if (histb_trng_wait(base)) {
+ if (histb_rng_wait(base)) {
pr_err("failed to generate random number, generated %d\n",
i);
return i ? i : -ETIMEDOUT;
}
}
- *(u32 *) (data + i) = readl_relaxed(base + HISTB_TRNG_NUMBER);
+ *(u32 *) (data + i) = readl_relaxed(base + RNG_NUMBER);
}
return max;
}
-static unsigned int histb_trng_get_depth(void __iomem *base)
+static unsigned int histb_rng_get_depth(void __iomem *base)
{
- return (readl_relaxed(base + HISTB_TRNG_CTRL) & POST_PROCESS_DEPTH) >> 8;
+ return (readl_relaxed(base + RNG_CTRL) & POST_PROCESS_DEPTH) >> 8;
}
static ssize_t
depth_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct histb_trng_priv *priv = dev_get_drvdata(dev);
+ struct histb_rng_priv *priv = dev_get_drvdata(dev);
void __iomem *base = priv->base;
- return sprintf(buf, "%d\n", histb_trng_get_depth(base));
+ return sprintf(buf, "%d\n", histb_rng_get_depth(base));
}
static ssize_t
depth_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct histb_trng_priv *priv = dev_get_drvdata(dev);
+ struct histb_rng_priv *priv = dev_get_drvdata(dev);
void __iomem *base = priv->base;
unsigned int depth;
if (kstrtouint(buf, 0, &depth))
return -ERANGE;
- histb_trng_init(base, depth);
+ histb_rng_init(base, depth);
return count;
}
static DEVICE_ATTR_RW(depth);
-static struct attribute *histb_trng_attrs[] = {
+static struct attribute *histb_rng_attrs[] = {
&dev_attr_depth.attr,
NULL,
};
-ATTRIBUTE_GROUPS(histb_trng);
+ATTRIBUTE_GROUPS(histb_rng);
-static int histb_trng_probe(struct platform_device *pdev)
+static int histb_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct histb_trng_priv *priv;
+ struct histb_rng_priv *priv;
void __iomem *base;
int ret;
@@ -133,17 +129,17 @@ static int histb_trng_probe(struct platform_device *pdev)
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
- return -ENOMEM;
+ return PTR_ERR(base);
- histb_trng_init(base, 144);
- if (histb_trng_wait(base)) {
+ histb_rng_init(base, 144);
+ if (histb_rng_wait(base)) {
dev_err(dev, "cannot bring up device\n");
return -ENODEV;
}
priv->base = base;
priv->rng.name = pdev->name;
- priv->rng.read = histb_trng_read;
+ priv->rng.read = histb_rng_read;
ret = devm_hwrng_register(dev, &priv->rng);
if (ret) {
dev_err(dev, "failed to register hwrng: %d\n", ret);
@@ -155,22 +151,23 @@ static int histb_trng_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id histb_trng_of_match[] = {
- { .compatible = "hisilicon,histb-trng", },
+static const struct of_device_id histb_rng_of_match[] = {
+ { .compatible = "hisilicon,histb-rng", },
{ }
};
+MODULE_DEVICE_TABLE(of, histb_rng_of_match);
-static struct platform_driver histb_trng_driver = {
- .probe = histb_trng_probe,
+static struct platform_driver histb_rng_driver = {
+ .probe = histb_rng_probe,
.driver = {
- .name = "histb-trng",
- .of_match_table = histb_trng_of_match,
- .dev_groups = histb_trng_groups,
+ .name = "histb-rng",
+ .of_match_table = histb_rng_of_match,
+ .dev_groups = histb_rng_groups,
},
};
-module_platform_driver(histb_trng_driver);
+module_platform_driver(histb_rng_driver);
-MODULE_DESCRIPTION("HiSTB True RNG");
+MODULE_DESCRIPTION("Hisilicon STB random number generator driver");
MODULE_LICENSE("Dual MIT/GPL");
MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>");
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index a1c24148ed31..bf07f17f78c8 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -17,6 +17,7 @@
#include <linux/hw_random.h>
#include <linux/completion.h>
#include <linux/io.h>
+#include <linux/bitfield.h>
#define RNGC_VER_ID 0x0000
#define RNGC_COMMAND 0x0004
@@ -26,7 +27,7 @@
#define RNGC_FIFO 0x0014
/* the fields in the ver id register */
-#define RNGC_TYPE_SHIFT 28
+#define RNG_TYPE GENMASK(31, 28)
#define RNGC_VER_MAJ_SHIFT 8
/* the rng_type field */
@@ -34,20 +35,19 @@
#define RNGC_TYPE_RNGC 0x2
-#define RNGC_CMD_CLR_ERR 0x00000020
-#define RNGC_CMD_CLR_INT 0x00000010
-#define RNGC_CMD_SEED 0x00000002
-#define RNGC_CMD_SELF_TEST 0x00000001
+#define RNGC_CMD_CLR_ERR BIT(5)
+#define RNGC_CMD_CLR_INT BIT(4)
+#define RNGC_CMD_SEED BIT(1)
+#define RNGC_CMD_SELF_TEST BIT(0)
-#define RNGC_CTRL_MASK_ERROR 0x00000040
-#define RNGC_CTRL_MASK_DONE 0x00000020
-#define RNGC_CTRL_AUTO_SEED 0x00000010
+#define RNGC_CTRL_MASK_ERROR BIT(6)
+#define RNGC_CTRL_MASK_DONE BIT(5)
+#define RNGC_CTRL_AUTO_SEED BIT(4)
-#define RNGC_STATUS_ERROR 0x00010000
-#define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00
-#define RNGC_STATUS_FIFO_LEVEL_SHIFT 8
-#define RNGC_STATUS_SEED_DONE 0x00000020
-#define RNGC_STATUS_ST_DONE 0x00000010
+#define RNGC_STATUS_ERROR BIT(16)
+#define RNGC_STATUS_FIFO_LEVEL_MASK GENMASK(11, 8)
+#define RNGC_STATUS_SEED_DONE BIT(5)
+#define RNGC_STATUS_ST_DONE BIT(4)
#define RNGC_ERROR_STATUS_STAT_ERR 0x00000008
@@ -110,7 +110,7 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
- ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
imx_rngc_irq_mask_clear(rngc);
if (!ret)
return -ETIMEDOUT;
@@ -122,7 +122,6 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
unsigned int status;
- unsigned int level;
int retval = 0;
while (max >= sizeof(u32)) {
@@ -132,11 +131,7 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
if (status & RNGC_STATUS_ERROR)
break;
- /* how many random numbers are in FIFO? [0-16] */
- level = (status & RNGC_STATUS_FIFO_LEVEL_MASK) >>
- RNGC_STATUS_FIFO_LEVEL_SHIFT;
-
- if (level) {
+ if (status & RNGC_STATUS_FIFO_LEVEL_MASK) {
/* retrieve a random number from FIFO */
*(u32 *)data = readl(rngc->base + RNGC_FIFO);
@@ -187,9 +182,7 @@ static int imx_rngc_init(struct hwrng *rng)
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
- ret = wait_for_completion_timeout(&rngc->rng_op_done,
- RNGC_TIMEOUT);
-
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
if (!ret) {
ret = -ETIMEDOUT;
goto err;
@@ -229,7 +222,7 @@ static void imx_rngc_cleanup(struct hwrng *rng)
imx_rngc_irq_mask_clear(rngc);
}
-static int imx_rngc_probe(struct platform_device *pdev)
+static int __init imx_rngc_probe(struct platform_device *pdev)
{
struct imx_rngc *rngc;
int ret;
@@ -256,7 +249,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
return irq;
ver_id = readl(rngc->base + RNGC_VER_ID);
- rng_type = ver_id >> RNGC_TYPE_SHIFT;
+ rng_type = FIELD_GET(RNG_TYPE, ver_id);
/*
* This driver supports only RNGC and RNGB. (There's a different
* driver for RNGA.)
@@ -305,7 +298,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused imx_rngc_suspend(struct device *dev)
+static int imx_rngc_suspend(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -314,7 +307,7 @@ static int __maybe_unused imx_rngc_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused imx_rngc_resume(struct device *dev)
+static int imx_rngc_resume(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -323,10 +316,10 @@ static int __maybe_unused imx_rngc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
static const struct of_device_id imx_rngc_dt_ids[] = {
- { .compatible = "fsl,imx25-rngb", .data = NULL, },
+ { .compatible = "fsl,imx25-rngb" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
@@ -334,7 +327,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .pm = &imx_rngc_pm_ops,
+ .pm = pm_sleep_ptr(&imx_rngc_pm_ops),
.of_match_table = imx_rngc_dt_ids,
},
};
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
index 15ba1e6fae4d..6e9dfac9fc9f 100644
--- a/drivers/char/hw_random/st-rng.c
+++ b/drivers/char/hw_random/st-rng.c
@@ -42,7 +42,6 @@
struct st_rng_data {
void __iomem *base;
- struct clk *clk;
struct hwrng ops;
};
@@ -85,26 +84,18 @@ static int st_rng_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
ddata->ops.priv = (unsigned long)ddata;
ddata->ops.read = st_rng_read;
ddata->ops.name = pdev->name;
ddata->base = base;
- ddata->clk = clk;
-
- dev_set_drvdata(&pdev->dev, ddata);
ret = devm_hwrng_register(&pdev->dev, &ddata->ops);
if (ret) {
dev_err(&pdev->dev, "Failed to register HW RNG\n");
- clk_disable_unprepare(clk);
return ret;
}
@@ -113,15 +104,6 @@ static int st_rng_probe(struct platform_device *pdev)
return 0;
}
-static int st_rng_remove(struct platform_device *pdev)
-{
- struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev);
-
- clk_disable_unprepare(ddata->clk);
-
- return 0;
-}
-
static const struct of_device_id st_rng_match[] __maybe_unused = {
{ .compatible = "st,rng" },
{},
@@ -134,7 +116,6 @@ static struct platform_driver st_rng_driver = {
.of_match_table = of_match_ptr(st_rng_match),
},
.probe = st_rng_probe,
- .remove = st_rng_remove
};
module_platform_driver(st_rng_driver);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index f7690e0f92ed..e41a84e6b4b5 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -4,6 +4,7 @@
* Copyright (C) 2007, 2008 Rusty Russell IBM Corporation
*/
+#include <asm/barrier.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/scatterlist.h>
@@ -37,13 +38,13 @@ struct virtrng_info {
static void random_recv_done(struct virtqueue *vq)
{
struct virtrng_info *vi = vq->vdev->priv;
+ unsigned int len;
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
- if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
+ if (!virtqueue_get_buf(vi->vq, &len))
return;
- vi->data_idx = 0;
-
+ smp_store_release(&vi->data_avail, len);
complete(&vi->have_data);
}
@@ -52,7 +53,6 @@ static void request_entropy(struct virtrng_info *vi)
struct scatterlist sg;
reinit_completion(&vi->have_data);
- vi->data_avail = 0;
vi->data_idx = 0;
sg_init_one(&sg, vi->data, sizeof(vi->data));
@@ -88,7 +88,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
read = 0;
/* copy available data */
- if (vi->data_avail) {
+ if (smp_load_acquire(&vi->data_avail)) {
chunk = copy_data(vi, buf, size);
size -= chunk;
read += chunk;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9c440cd0fed0..9f5b2d28bff5 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -807,5 +807,6 @@ config CRYPTO_DEV_SA2UL
acceleration for cryptographic algorithms on these devices.
source "drivers/crypto/aspeed/Kconfig"
+source "drivers/crypto/starfive/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 51d36701e785..d859d6a5f3a4 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -50,3 +50,4 @@ obj-y += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += intel/
+obj-y += starfive/
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index aac64b555204..432beabd79e6 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -389,7 +389,7 @@ static struct i2c_driver atmel_ecc_driver = {
.name = "atmel-ecc",
.of_match_table = of_match_ptr(atmel_ecc_dt_ids),
},
- .probe_new = atmel_ecc_probe,
+ .probe = atmel_ecc_probe,
.remove = atmel_ecc_remove,
.id_table = atmel_ecc_id,
};
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 44a185a84760..c77f482d2a97 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -141,7 +141,7 @@ static const struct i2c_device_id atmel_sha204a_id[] = {
MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);
static struct i2c_driver atmel_sha204a_driver = {
- .probe_new = atmel_sha204a_probe,
+ .probe = atmel_sha204a_probe,
.remove = atmel_sha204a_remove,
.id_table = atmel_sha204a_id,
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index ec6a9e6ad4d2..c631f99e415f 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -162,6 +162,15 @@ config CRYPTO_DEV_FSL_CAAM_PRNG_API
config CRYPTO_DEV_FSL_CAAM_BLOB_GEN
bool
+config CRYPTO_DEV_FSL_CAAM_RNG_TEST
+ bool "Test caam rng"
+ select CRYPTO_DEV_FSL_CAAM_RNG_API
+ help
+ Selecting this will enable a self-test to run for the
+ caam RNG.
+ This test is several minutes long and executes
+ just before the RNG is registered with the hw_random API.
+
endif # CRYPTO_DEV_FSL_CAAM_JR
endif # CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 50eb55da45c2..b3d14a7f4dd1 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -172,6 +172,50 @@ static void caam_cleanup(struct hwrng *rng)
kfifo_free(&ctx->fifo);
}
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
+static inline void test_len(struct hwrng *rng, size_t len, bool wait)
+{
+ u8 *buf;
+ int read_len;
+ struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
+ struct device *dev = ctx->ctrldev;
+
+ buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL);
+
+ while (len > 0) {
+ read_len = rng->read(rng, buf, len, wait);
+
+ if (read_len < 0 || (read_len == 0 && wait)) {
+ dev_err(dev, "RNG Read FAILED received %d bytes\n",
+ read_len);
+ kfree(buf);
+ return;
+ }
+
+ print_hex_dump_debug("random bytes@: ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ buf, read_len, 1);
+
+ len = len - read_len;
+ }
+
+ kfree(buf);
+}
+
+static inline void test_mode_once(struct hwrng *rng, bool wait)
+{
+ test_len(rng, 32, wait);
+ test_len(rng, 64, wait);
+ test_len(rng, 128, wait);
+}
+
+static void self_test(struct hwrng *rng)
+{
+ pr_info("Executing RNG SELF-TEST with wait\n");
+ test_mode_once(rng, true);
+}
+#endif
+
static int caam_init(struct hwrng *rng)
{
struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
@@ -258,6 +302,10 @@ int caam_rng_init(struct device *ctrldev)
return ret;
}
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
+ self_test(&ctx->rng);
+#endif
+
devres_close_group(ctrldev, caam_rng_init);
return 0;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bedcc2ab3a00..ff9ddbbca377 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -79,6 +79,15 @@ static void build_deinstantiation_desc(u32 *desc, int handle)
append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
}
+static const struct of_device_id imx8m_machine_match[] = {
+ { .compatible = "fsl,imx8mm", },
+ { .compatible = "fsl,imx8mn", },
+ { .compatible = "fsl,imx8mp", },
+ { .compatible = "fsl,imx8mq", },
+ { .compatible = "fsl,imx8ulp", },
+ { }
+};
+
/*
* run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
* the software (no JR/QI used).
@@ -105,10 +114,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
* Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
* and the following steps should be performed regardless
*/
- of_machine_is_compatible("fsl,imx8mq") ||
- of_machine_is_compatible("fsl,imx8mm") ||
- of_machine_is_compatible("fsl,imx8mn") ||
- of_machine_is_compatible("fsl,imx8mp")) {
+ of_match_node(imx8m_machine_match, of_root)) {
clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
@@ -344,16 +350,15 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
/*
* kick_trng - sets the various parameters for enabling the initialization
* of the RNG4 block in CAAM
- * @pdev - pointer to the platform device
+ * @dev - pointer to the controller device
* @ent_delay - Defines the length (in system clocks) of each entropy sample.
*/
-static void kick_trng(struct platform_device *pdev, int ent_delay)
+static void kick_trng(struct device *dev, int ent_delay)
{
- struct device *ctrldev = &pdev->dev;
- struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
struct caam_ctrl __iomem *ctrl;
struct rng4tst __iomem *r4tst;
- u32 val;
+ u32 val, rtsdctl;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
r4tst = &ctrl->r4tst[0];
@@ -369,26 +374,38 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
* Performance-wise, it does not make sense to
* set the delay to a value that is lower
* than the last one that worked (i.e. the state handles
- * were instantiated properly. Thus, instead of wasting
- * time trying to set the values controlling the sample
- * frequency, the function simply returns.
+ * were instantiated properly).
+ */
+ rtsdctl = rd_reg32(&r4tst->rtsdctl);
+ val = (rtsdctl & RTSDCTL_ENT_DLY_MASK) >> RTSDCTL_ENT_DLY_SHIFT;
+ if (ent_delay > val) {
+ val = ent_delay;
+ /* min. freq. count, equal to 1/4 of the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmin, val >> 2);
+ /* max. freq. count, equal to 16 times the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmax, val << 4);
+ }
+
+ wr_reg32(&r4tst->rtsdctl, (val << RTSDCTL_ENT_DLY_SHIFT) |
+ RTSDCTL_SAMP_SIZE_VAL);
+
+ /*
+ * To avoid reprogramming the self-test parameters over and over again,
+ * use RTSDCTL[SAMP_SIZE] as an indicator.
*/
- val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
- >> RTSDCTL_ENT_DLY_SHIFT;
- if (ent_delay <= val)
- goto start_rng;
-
- val = rd_reg32(&r4tst->rtsdctl);
- val = (val & ~RTSDCTL_ENT_DLY_MASK) |
- (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
- wr_reg32(&r4tst->rtsdctl, val);
- /* min. freq. count, equal to 1/4 of the entropy sample length */
- wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
- /* disable maximum frequency count */
- wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
- /* read the control register */
- val = rd_reg32(&r4tst->rtmctl);
-start_rng:
+ if ((rtsdctl & RTSDCTL_SAMP_SIZE_MASK) != RTSDCTL_SAMP_SIZE_VAL) {
+ wr_reg32(&r4tst->rtscmisc, (2 << 16) | 32);
+ wr_reg32(&r4tst->rtpkrrng, 570);
+ wr_reg32(&r4tst->rtpkrmax, 1600);
+ wr_reg32(&r4tst->rtscml, (122 << 16) | 317);
+ wr_reg32(&r4tst->rtscrl[0], (80 << 16) | 107);
+ wr_reg32(&r4tst->rtscrl[1], (57 << 16) | 62);
+ wr_reg32(&r4tst->rtscrl[2], (39 << 16) | 39);
+ wr_reg32(&r4tst->rtscrl[3], (27 << 16) | 26);
+ wr_reg32(&r4tst->rtscrl[4], (19 << 16) | 18);
+ wr_reg32(&r4tst->rtscrl[5], (18 << 16) | 17);
+ }
+
/*
* select raw sampling in both entropy shifter
* and statistical checker; ; put RNG4 into run mode
@@ -618,10 +635,115 @@ static bool needs_entropy_delay_adjustment(void)
return false;
}
+static int caam_ctrl_rng_init(struct device *dev)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ int ret, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+ u8 rng_vid;
+
+ if (ctrlpriv->era < 10) {
+ struct caam_perfmon __iomem *perfmon;
+
+ perfmon = ctrlpriv->total_jobrs ?
+ (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon :
+ (struct caam_perfmon __iomem *)&ctrl->perfmon;
+
+ rng_vid = (rd_reg32(&perfmon->cha_id_ls) &
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
+ } else {
+ struct version_regs __iomem *vreg;
+
+ vreg = ctrlpriv->total_jobrs ?
+ (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
+ (struct version_regs __iomem *)&ctrl->vreg;
+
+ rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >>
+ CHA_VER_VID_SHIFT;
+ }
+
+ /*
+ * If SEC has RNG version >= 4 and RNG state handle has not been
+ * already instantiated, do RNG instantiation
+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+ */
+ if (!(ctrlpriv->mc_en && ctrlpriv->pr_support) && rng_vid >= 4) {
+ ctrlpriv->rng4_sh_init =
+ rd_reg32(&ctrl->r4tst[0].rdsta);
+ /*
+ * If the secure keys (TDKEK, JDKEK, TDSK), were already
+ * generated, signal this to the function that is instantiating
+ * the state handles. An error would occur if RNG4 attempts
+ * to regenerate these keys before the next POR.
+ */
+ gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+ ctrlpriv->rng4_sh_init &= RDSTA_MASK;
+ do {
+ int inst_handles =
+ rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
+ /*
+ * If either SH were instantiated by somebody else
+ * (e.g. u-boot) then it is assumed that the entropy
+ * parameters are properly set and thus the function
+ * setting these (kick_trng(...)) is skipped.
+ * Also, if a handle was instantiated, do not change
+ * the TRNG parameters.
+ */
+ if (needs_entropy_delay_adjustment())
+ ent_delay = 12000;
+ if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ dev_info(dev,
+ "Entropy delay = %u\n",
+ ent_delay);
+ kick_trng(dev, ent_delay);
+ ent_delay += 400;
+ }
+ /*
+ * if instantiate_rng(...) fails, the loop will rerun
+ * and the kick_trng(...) function will modify the
+ * upper and lower limits of the entropy sampling
+ * interval, leading to a successful initialization of
+ * the RNG.
+ */
+ ret = instantiate_rng(dev, inst_handles,
+ gen_sk);
+ /*
+ * Entropy delay is determined via TRNG characterization.
+ * TRNG characterization is run across different voltages
+ * and temperatures.
+ * If worst case value for ent_dly is identified,
+ * the loop can be skipped for that platform.
+ */
+ if (needs_entropy_delay_adjustment())
+ break;
+ if (ret == -EAGAIN)
+ /*
+ * if here, the loop will rerun,
+ * so don't hog the CPU
+ */
+ cpu_relax();
+ } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
+ if (ret) {
+ dev_err(dev, "failed to instantiate RNG");
+ return ret;
+ }
+ /*
+ * Set handles initialized by this module as the complement of
+ * the already initialized ones
+ */
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
+
+ /* Enable RDB bit so that RNG works faster */
+ clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
+ }
+
+ return 0;
+}
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
- int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+ int ret, ring;
u64 caam_id;
const struct soc_device_attribute *imx_soc_match;
struct device *dev;
@@ -631,10 +753,8 @@ static int caam_probe(struct platform_device *pdev)
struct caam_perfmon __iomem *perfmon;
struct dentry *dfs_root;
u32 scfgr, comp_params;
- u8 rng_vid;
int pg_size;
int BLOCK_OFFSET = 0;
- bool pr_support = false;
bool reg_access = true;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
@@ -646,6 +766,9 @@ static int caam_probe(struct platform_device *pdev)
nprop = pdev->dev.of_node;
imx_soc_match = soc_device_match(caam_imx_soc_table);
+ if (!imx_soc_match && of_match_node(imx8m_machine_match, of_root))
+ return -EPROBE_DEFER;
+
caam_imx = (bool)imx_soc_match;
if (imx_soc_match) {
@@ -770,7 +893,8 @@ static int caam_probe(struct platform_device *pdev)
mc_version = fsl_mc_get_version();
if (mc_version)
- pr_support = check_version(mc_version, 10, 20, 0);
+ ctrlpriv->pr_support = check_version(mc_version, 10, 20,
+ 0);
else
return -EPROBE_DEFER;
}
@@ -861,9 +985,6 @@ set_dma_mask:
return -ENOMEM;
}
- if (!reg_access)
- goto report_live;
-
comp_params = rd_reg32(&perfmon->comp_parms_ls);
ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB);
@@ -873,8 +994,6 @@ set_dma_mask:
* check both here.
*/
if (ctrlpriv->era < 10) {
- rng_vid = (rd_reg32(&perfmon->cha_id_ls) &
- CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
ctrlpriv->blob_present = ctrlpriv->blob_present &&
(rd_reg32(&perfmon->cha_num_ls) & CHA_ID_LS_AES_MASK);
} else {
@@ -884,91 +1003,16 @@ set_dma_mask:
(struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
(struct version_regs __iomem *)&ctrl->vreg;
- rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >>
- CHA_VER_VID_SHIFT;
ctrlpriv->blob_present = ctrlpriv->blob_present &&
(rd_reg32(&vreg->aesa) & CHA_VER_MISC_AES_NUM_MASK);
}
- /*
- * If SEC has RNG version >= 4 and RNG state handle has not been
- * already instantiated, do RNG instantiation
- * In case of SoCs with Management Complex, RNG is managed by MC f/w.
- */
- if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) {
- ctrlpriv->rng4_sh_init =
- rd_reg32(&ctrl->r4tst[0].rdsta);
- /*
- * If the secure keys (TDKEK, JDKEK, TDSK), were already
- * generated, signal this to the function that is instantiating
- * the state handles. An error would occur if RNG4 attempts
- * to regenerate these keys before the next POR.
- */
- gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
- ctrlpriv->rng4_sh_init &= RDSTA_MASK;
- do {
- int inst_handles =
- rd_reg32(&ctrl->r4tst[0].rdsta) &
- RDSTA_MASK;
- /*
- * If either SH were instantiated by somebody else
- * (e.g. u-boot) then it is assumed that the entropy
- * parameters are properly set and thus the function
- * setting these (kick_trng(...)) is skipped.
- * Also, if a handle was instantiated, do not change
- * the TRNG parameters.
- */
- if (needs_entropy_delay_adjustment())
- ent_delay = 12000;
- if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
- dev_info(dev,
- "Entropy delay = %u\n",
- ent_delay);
- kick_trng(pdev, ent_delay);
- ent_delay += 400;
- }
- /*
- * if instantiate_rng(...) fails, the loop will rerun
- * and the kick_trng(...) function will modify the
- * upper and lower limits of the entropy sampling
- * interval, leading to a successful initialization of
- * the RNG.
- */
- ret = instantiate_rng(dev, inst_handles,
- gen_sk);
- /*
- * Entropy delay is determined via TRNG characterization.
- * TRNG characterization is run across different voltages
- * and temperatures.
- * If worst case value for ent_dly is identified,
- * the loop can be skipped for that platform.
- */
- if (needs_entropy_delay_adjustment())
- break;
- if (ret == -EAGAIN)
- /*
- * if here, the loop will rerun,
- * so don't hog the CPU
- */
- cpu_relax();
- } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
- if (ret) {
- dev_err(dev, "failed to instantiate RNG");
+ if (reg_access) {
+ ret = caam_ctrl_rng_init(dev);
+ if (ret)
return ret;
- }
- /*
- * Set handles initialized by this module as the complement of
- * the already initialized ones
- */
- ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
-
- /* Enable RDB bit so that RNG works faster */
- clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
}
-report_live:
- /* NOTE: RTIC detection ought to go here, around Si time */
-
caam_id = (u64)rd_reg32(&perfmon->caam_id_ms) << 32 |
(u64)rd_reg32(&perfmon->caam_id_ls);
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 86ed1b91c22d..b4f7bf77f487 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -95,6 +95,7 @@ struct caam_drv_private {
u8 blob_present; /* Nonzero if BLOB support present in device */
u8 mc_en; /* Nonzero if MC f/w is active */
u8 optee_en; /* Nonzero if OP-TEE f/w is active */
+ bool pr_support; /* RNG prediction resistance available */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
int era; /* CAAM Era (internal HW revision) */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 66928f8a0c4b..189e74c21f0c 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -3,7 +3,7 @@
* CAAM hardware register-level view
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018, 2023 NXP
*/
#ifndef REGS_H
@@ -523,6 +523,8 @@ struct rng4tst {
#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
#define RTSDCTL_ENT_DLY_MIN 3200
#define RTSDCTL_ENT_DLY_MAX 12800
+#define RTSDCTL_SAMP_SIZE_MASK 0xffff
+#define RTSDCTL_SAMP_SIZE_VAL 512
u32 rtsdctl; /* seed control register */
union {
u32 rtsblim; /* PRGM=1: sparse bit limit register */
@@ -534,7 +536,15 @@ struct rng4tst {
u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
u32 rtfrqcnt; /* PRGM=0: freq. count register */
};
- u32 rsvd1[40];
+ union {
+ u32 rtscmc; /* statistical check run monobit count */
+ u32 rtscml; /* statistical check run monobit limit */
+ };
+ union {
+ u32 rtscrc[6]; /* statistical check run length count */
+ u32 rtscrl[6]; /* statistical check run length limit */
+ };
+ u32 rsvd1[33];
#define RDSTA_SKVT 0x80000000
#define RDSTA_SKVN 0x40000000
#define RDSTA_PR0 BIT(4)
diff --git a/drivers/crypto/ccp/platform-access.c b/drivers/crypto/ccp/platform-access.c
index 939c924fc383..94367bc49e35 100644
--- a/drivers/crypto/ccp/platform-access.c
+++ b/drivers/crypto/ccp/platform-access.c
@@ -67,6 +67,11 @@ int psp_send_platform_access_msg(enum psp_platform_access_msg msg,
return -ENODEV;
pa_dev = psp->platform_access_data;
+
+ if (!pa_dev->vdata->cmdresp_reg || !pa_dev->vdata->cmdbuff_addr_lo_reg ||
+ !pa_dev->vdata->cmdbuff_addr_hi_reg)
+ return -ENODEV;
+
cmd = psp->io_regs + pa_dev->vdata->cmdresp_reg;
lo = psp->io_regs + pa_dev->vdata->cmdbuff_addr_lo_reg;
hi = psp->io_regs + pa_dev->vdata->cmdbuff_addr_hi_reg;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index aa15bc4cac2b..b603ad9b8341 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -361,6 +361,14 @@ static const struct tee_vdata teev1 = {
.ring_rptr_reg = 0x10554, /* C2PMSG_21 */
};
+static const struct tee_vdata teev2 = {
+ .cmdresp_reg = 0x10944, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .ring_wptr_reg = 0x10950, /* C2PMSG_20 */
+ .ring_rptr_reg = 0x10954, /* C2PMSG_21 */
+};
+
static const struct platform_access_vdata pa_v1 = {
.cmdresp_reg = 0x10570, /* C2PMSG_28 */
.cmdbuff_addr_lo_reg = 0x10574, /* C2PMSG_29 */
@@ -369,6 +377,11 @@ static const struct platform_access_vdata pa_v1 = {
.doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */
};
+static const struct platform_access_vdata pa_v2 = {
+ .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */
+ .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */
+};
+
static const struct psp_vdata pspv1 = {
.sev = &sevv1,
.feature_reg = 0x105fc, /* C2PMSG_63 */
@@ -399,6 +412,22 @@ static const struct psp_vdata pspv4 = {
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
};
+static const struct psp_vdata pspv5 = {
+ .tee = &teev2,
+ .platform_access = &pa_v2,
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10510, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */
+};
+
+static const struct psp_vdata pspv6 = {
+ .sev = &sevv2,
+ .tee = &teev2,
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10510, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */
+};
+
#endif
static const struct sp_dev_vdata dev_vdata[] = {
@@ -453,6 +482,18 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv3,
#endif
},
+ { /* 7 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv5,
+#endif
+ },
+ { /* 8 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv6,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
@@ -463,6 +504,8 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
+ { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index e8690c223584..4137a8bf131f 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -82,10 +82,3 @@ config CRYPTO_DEV_HISI_TRNG
select CRYPTO_RNG
help
Support for HiSilicon TRNG Driver.
-
-config CRYPTO_DEV_HISTB_TRNG
- tristate "Support for HiSTB TRNG Driver"
- depends on ARCH_HISI || COMPILE_TEST
- select HW_RANDOM
- help
- Support for HiSTB TRNG Driver.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
index fc51e0edec69..8595a5a5d228 100644
--- a/drivers/crypto/hisilicon/Makefile
+++ b/drivers/crypto/hisilicon/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
hisi_qm-objs = qm.o sgl.o debugfs.o
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
-obj-y += trng/
+obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += trng/
diff --git a/drivers/crypto/hisilicon/trng/Makefile b/drivers/crypto/hisilicon/trng/Makefile
index cf20b057c66b..d909079f351c 100644
--- a/drivers/crypto/hisilicon/trng/Makefile
+++ b/drivers/crypto/hisilicon/trng/Makefile
@@ -1,5 +1,2 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += hisi-trng-v2.o
hisi-trng-v2-objs = trng.o
-
-obj-$(CONFIG_CRYPTO_DEV_HISTB_TRNG) += histb-trng.o
-histb-trng-objs += trng-stb.o
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index ed15379a9818..4a18095ae5d8 100644
--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -1175,9 +1175,9 @@ static int aead_perform(struct aead_request *req, int encrypt,
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma);
- crypt->icv_rev_aes = dma;
if (unlikely(!req_ctx->hmac_virt))
goto free_buf_dst;
+ crypt->icv_rev_aes = dma;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 7324b86a4f40..e543a9e24a06 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -11,37 +11,76 @@
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
+enum adf_fw_objs {
+ ADF_FW_SYM_OBJ,
+ ADF_FW_ASYM_OBJ,
+ ADF_FW_DC_OBJ,
+ ADF_FW_ADMIN_OBJ,
+};
+
+static const char * const adf_4xxx_fw_objs[] = {
+ [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ,
+ [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_4XXX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_4XXX_ADMIN_OBJ,
+};
+
+static const char * const adf_402xx_fw_objs[] = {
+ [ADF_FW_SYM_OBJ] = ADF_402XX_SYM_OBJ,
+ [ADF_FW_ASYM_OBJ] = ADF_402XX_ASYM_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_402XX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ,
+};
+
struct adf_fw_config {
u32 ae_mask;
- char *obj_name;
+ enum adf_fw_objs obj;
};
-static struct adf_fw_config adf_4xxx_fw_cy_config[] = {
- {0xF0, ADF_4XXX_SYM_OBJ},
- {0xF, ADF_4XXX_ASYM_OBJ},
- {0x100, ADF_4XXX_ADMIN_OBJ},
+static const struct adf_fw_config adf_fw_cy_config[] = {
+ {0xF0, ADF_FW_SYM_OBJ},
+ {0xF, ADF_FW_ASYM_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
};
-static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
- {0xF0, ADF_4XXX_DC_OBJ},
- {0xF, ADF_4XXX_DC_OBJ},
- {0x100, ADF_4XXX_ADMIN_OBJ},
+static const struct adf_fw_config adf_fw_dc_config[] = {
+ {0xF0, ADF_FW_DC_OBJ},
+ {0xF, ADF_FW_DC_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
};
-static struct adf_fw_config adf_402xx_fw_cy_config[] = {
- {0xF0, ADF_402XX_SYM_OBJ},
- {0xF, ADF_402XX_ASYM_OBJ},
- {0x100, ADF_402XX_ADMIN_OBJ},
+static const struct adf_fw_config adf_fw_sym_config[] = {
+ {0xF0, ADF_FW_SYM_OBJ},
+ {0xF, ADF_FW_SYM_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
};
-static struct adf_fw_config adf_402xx_fw_dc_config[] = {
- {0xF0, ADF_402XX_DC_OBJ},
- {0xF, ADF_402XX_DC_OBJ},
- {0x100, ADF_402XX_ADMIN_OBJ},
+static const struct adf_fw_config adf_fw_asym_config[] = {
+ {0xF0, ADF_FW_ASYM_OBJ},
+ {0xF, ADF_FW_ASYM_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
};
+static const struct adf_fw_config adf_fw_asym_dc_config[] = {
+ {0xF0, ADF_FW_ASYM_OBJ},
+ {0xF, ADF_FW_DC_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_sym_dc_config[] = {
+ {0xF0, ADF_FW_SYM_OBJ},
+ {0xF, ADF_FW_DC_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
+};
+
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
+
/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map_cy[ADF_4XXX_MAX_ACCELENGINES] = {
+static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
0x5555555, 0x5555555, 0x5555555, 0x5555555,
0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
0x0
@@ -61,12 +100,26 @@ static struct adf_hw_device_class adf_4xxx_class = {
enum dev_services {
SVC_CY = 0,
+ SVC_CY2,
SVC_DC,
+ SVC_SYM,
+ SVC_ASYM,
+ SVC_DC_ASYM,
+ SVC_ASYM_DC,
+ SVC_DC_SYM,
+ SVC_SYM_DC,
};
static const char *const dev_cfg_services[] = {
[SVC_CY] = ADF_CFG_CY,
+ [SVC_CY2] = ADF_CFG_ASYM_SYM,
[SVC_DC] = ADF_CFG_DC,
+ [SVC_SYM] = ADF_CFG_SYM,
+ [SVC_ASYM] = ADF_CFG_ASYM,
+ [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
+ [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
+ [SVC_DC_SYM] = ADF_CFG_DC_SYM,
+ [SVC_SYM_DC] = ADF_CFG_SYM_DC,
};
static int get_service_enabled(struct adf_accel_dev *accel_dev)
@@ -156,45 +209,50 @@ static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
- u32 capabilities_cy, capabilities_dc;
+ u32 capabilities_sym, capabilities_asym, capabilities_dc;
u32 fusectl1;
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
- capabilities_cy = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
- ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CIPHER |
ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
ICP_ACCEL_CAPABILITIES_SHA3 |
ICP_ACCEL_CAPABILITIES_SHA3_EXT |
ICP_ACCEL_CAPABILITIES_HKDF |
- ICP_ACCEL_CAPABILITIES_ECEDMONT |
ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
ICP_ACCEL_CAPABILITIES_AES_V2;
/* A set bit in fusectl1 means the feature is OFF in this SKU */
if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_HKDF;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
+
if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
+
if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
+
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
}
capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
@@ -211,12 +269,23 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
switch (get_service_enabled(accel_dev)) {
case SVC_CY:
- return capabilities_cy;
+ case SVC_CY2:
+ return capabilities_sym | capabilities_asym;
case SVC_DC:
return capabilities_dc;
+ case SVC_SYM:
+ return capabilities_sym;
+ case SVC_ASYM:
+ return capabilities_asym;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return capabilities_asym | capabilities_dc;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return capabilities_sym | capabilities_dc;
+ default:
+ return 0;
}
-
- return 0;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
@@ -227,13 +296,11 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
switch (get_service_enabled(accel_dev)) {
- case SVC_CY:
- return thrd_to_arb_map_cy;
case SVC_DC:
return thrd_to_arb_map_dc;
+ default:
+ return default_thrd_to_arb_map;
}
-
- return NULL;
}
static void get_arb_info(struct arb_info *arb_info)
@@ -304,47 +371,83 @@ static int adf_init_device(struct adf_accel_dev *accel_dev)
static u32 uof_get_num_objs(void)
{
- BUILD_BUG_ON_MSG(ARRAY_SIZE(adf_4xxx_fw_cy_config) !=
- ARRAY_SIZE(adf_4xxx_fw_dc_config),
- "Size mismatch between adf_4xxx_fw_*_config arrays");
-
- return ARRAY_SIZE(adf_4xxx_fw_cy_config);
+ return ARRAY_SIZE(adf_fw_cy_config);
}
-static char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
+static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
+ const char * const fw_objs[], int num_objs)
{
+ int id;
+
switch (get_service_enabled(accel_dev)) {
case SVC_CY:
- return adf_4xxx_fw_cy_config[obj_num].obj_name;
+ case SVC_CY2:
+ id = adf_fw_cy_config[obj_num].obj;
+ break;
case SVC_DC:
- return adf_4xxx_fw_dc_config[obj_num].obj_name;
+ id = adf_fw_dc_config[obj_num].obj;
+ break;
+ case SVC_SYM:
+ id = adf_fw_sym_config[obj_num].obj;
+ break;
+ case SVC_ASYM:
+ id = adf_fw_asym_config[obj_num].obj;
+ break;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ id = adf_fw_asym_dc_config[obj_num].obj;
+ break;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ id = adf_fw_sym_dc_config[obj_num].obj;
+ break;
+ default:
+ id = -EINVAL;
+ break;
}
- return NULL;
+ if (id < 0 || id > num_objs)
+ return NULL;
+
+ return fw_objs[id];
}
-static char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+static const char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
{
- switch (get_service_enabled(accel_dev)) {
- case SVC_CY:
- return adf_402xx_fw_cy_config[obj_num].obj_name;
- case SVC_DC:
- return adf_402xx_fw_dc_config[obj_num].obj_name;
- }
+ int num_fw_objs = ARRAY_SIZE(adf_4xxx_fw_objs);
- return NULL;
+ return uof_get_name(accel_dev, obj_num, adf_4xxx_fw_objs, num_fw_objs);
+}
+
+static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ int num_fw_objs = ARRAY_SIZE(adf_402xx_fw_objs);
+
+ return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs);
}
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
switch (get_service_enabled(accel_dev)) {
case SVC_CY:
- return adf_4xxx_fw_cy_config[obj_num].ae_mask;
+ return adf_fw_cy_config[obj_num].ae_mask;
case SVC_DC:
- return adf_4xxx_fw_dc_config[obj_num].ae_mask;
+ return adf_fw_dc_config[obj_num].ae_mask;
+ case SVC_CY2:
+ return adf_fw_cy_config[obj_num].ae_mask;
+ case SVC_SYM:
+ return adf_fw_sym_config[obj_num].ae_mask;
+ case SVC_ASYM:
+ return adf_fw_asym_config[obj_num].ae_mask;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return adf_fw_asym_dc_config[obj_num].ae_mask;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return adf_fw_sym_dc_config[obj_num].ae_mask;
+ default:
+ return 0;
}
-
- return 0;
}
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
index 085e259c245a..e5b314d2b60e 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -72,7 +72,7 @@ enum icp_qat_4xxx_slice_mask {
ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
- ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6),
+ ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7),
};
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index ceb87327a5fe..1a15600361d0 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -7,6 +7,7 @@
#include <adf_accel_devices.h>
#include <adf_cfg.h>
#include <adf_common_drv.h>
+#include <adf_dbgfs.h>
#include "adf_4xxx_hw_data.h"
#include "qat_compression.h"
@@ -24,11 +25,25 @@ MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
enum configs {
DEV_CFG_CY = 0,
DEV_CFG_DC,
+ DEV_CFG_SYM,
+ DEV_CFG_ASYM,
+ DEV_CFG_ASYM_SYM,
+ DEV_CFG_ASYM_DC,
+ DEV_CFG_DC_ASYM,
+ DEV_CFG_SYM_DC,
+ DEV_CFG_DC_SYM,
};
static const char * const services_operations[] = {
ADF_CFG_CY,
ADF_CFG_DC,
+ ADF_CFG_SYM,
+ ADF_CFG_ASYM,
+ ADF_CFG_ASYM_SYM,
+ ADF_CFG_ASYM_DC,
+ ADF_CFG_DC_ASYM,
+ ADF_CFG_SYM_DC,
+ ADF_CFG_DC_SYM,
};
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
@@ -37,8 +52,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
adf_clean_hw_data_4xxx(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -241,6 +256,21 @@ err:
return ret;
}
+static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
+{
+ unsigned long val;
+ int ret;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
+
+ return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+}
+
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
@@ -265,11 +295,15 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
switch (ret) {
case DEV_CFG_CY:
+ case DEV_CFG_ASYM_SYM:
ret = adf_crypto_dev_config(accel_dev);
break;
case DEV_CFG_DC:
ret = adf_comp_dev_config(accel_dev);
break;
+ default:
+ ret = adf_no_dev_config(accel_dev);
+ break;
}
if (ret)
@@ -289,7 +323,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
struct adf_bar *bar;
@@ -348,12 +381,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -410,6 +437,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index bb4dca735ab5..468c9102093f 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_c3xxx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -65,8 +66,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -75,7 +76,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -142,12 +142,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -199,6 +193,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err_free_reg;
}
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
index e8cc10f64134..d5a0ecca9d0b 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_c3xxxvf_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -64,8 +65,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
adf_devmgr_rm_dev(accel_dev, pf);
}
@@ -76,7 +77,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -123,12 +123,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -173,6 +167,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index ca18ae14c099..0186921be936 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_c62x_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -65,8 +66,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -75,7 +76,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -142,12 +142,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -199,6 +193,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err_free_reg;
}
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
index 37566309df94..c9ae6c0d0dca 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_c62xvf_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -64,8 +65,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
adf_devmgr_rm_dev(accel_dev, pf);
}
@@ -76,7 +77,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -123,12 +123,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -173,6 +167,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 1fb8d50f509f..38de3aba6e8c 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -27,7 +27,9 @@ intel_qat-objs := adf_cfg.o \
qat_hal.o \
qat_bl.o
-intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
+ adf_dbgfs.o
+
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index bd19e6460899..0399417b91fc 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -202,7 +202,7 @@ struct adf_hw_device_data {
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
- char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
+ const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_num_objs)(void);
u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*dev_config)(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
index 4ce2b666929e..6be064dc64c8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
@@ -13,7 +13,7 @@ static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
struct icp_qat_fw_loader_handle *loader;
- char *obj_name;
+ const char *obj_name;
u32 num_objs;
u32 ae_mask;
int i;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 3b6184c35081..118775ee02f2 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -286,7 +286,6 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
-EXPORT_SYMBOL_GPL(adf_init_admin_pm);
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
index 1931e5b37f2b..8836f015c39c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
@@ -74,15 +74,30 @@ int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
INIT_LIST_HEAD(&dev_cfg_data->sec_list);
init_rwsem(&dev_cfg_data->lock);
accel_dev->cfg = dev_cfg_data;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
- /* accel_dev->debugfs_dir should always be non-NULL here */
- dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
+void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+ dev_cfg_data->debug = debugfs_create_file("dev_cfg", 0400,
accel_dev->debugfs_dir,
dev_cfg_data,
&qat_dev_cfg_fops);
- return 0;
}
-EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+
+void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+ if (!dev_cfg_data)
+ return;
+
+ debugfs_remove(dev_cfg_data->debug);
+ dev_cfg_data->debug = NULL;
+}
static void adf_cfg_section_del_all(struct list_head *head);
@@ -116,7 +131,6 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
up_write(&dev_cfg_data->lock);
- debugfs_remove(dev_cfg_data->debug);
kfree(dev_cfg_data);
accel_dev->cfg = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.h b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
index 376cde61a60e..c0c9052b2213 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
@@ -31,6 +31,8 @@ struct adf_cfg_device_data {
int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev);
int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index 5d8c3bdb258c..3ae1e5caee0e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -25,7 +25,15 @@
#define ADF_DC "Dc"
#define ADF_CFG_DC "dc"
#define ADF_CFG_CY "sym;asym"
+#define ADF_CFG_SYM "sym"
+#define ADF_CFG_ASYM "asym"
+#define ADF_CFG_ASYM_SYM "asym;sym"
+#define ADF_CFG_ASYM_DC "asym;dc"
+#define ADF_CFG_DC_ASYM "dc;asym"
+#define ADF_CFG_SYM_DC "sym;dc"
+#define ADF_CFG_DC_SYM "dc;sym"
#define ADF_SERVICES_ENABLED "ServicesEnabled"
+#define ADF_PM_IDLE_SUPPORT "PmIdleSupport"
#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index db79759bee61..b8132eb9bc2a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -187,7 +187,7 @@ void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle);
int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr,
int mem_size);
int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
- void *addr_ptr, u32 mem_size, char *obj_name);
+ void *addr_ptr, u32 mem_size, const char *obj_name);
int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
unsigned int cfg_ae_mask);
int adf_init_misc_wq(void);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
new file mode 100644
index 000000000000..d0a2f892e6eb
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+#include "adf_dbgfs.h"
+
+/**
+ * adf_dbgfs_init() - add persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * This function creates debugfs entries that are persistent through a device
+ * state change (from up to down or vice versa).
+ */
+void adf_dbgfs_init(struct adf_accel_dev *accel_dev)
+{
+ char name[ADF_DEVICE_NAME_LENGTH];
+ void *ret;
+
+ /* Create dev top level debugfs entry */
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ accel_dev->hw_device->dev_class->name,
+ pci_name(accel_dev->accel_pci_dev.pci_dev));
+
+ ret = debugfs_create_dir(name, NULL);
+ if (IS_ERR_OR_NULL(ret))
+ return;
+
+ accel_dev->debugfs_dir = ret;
+
+ adf_cfg_dev_dbgfs_add(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_dbgfs_init);
+
+/**
+ * adf_dbgfs_exit() - remove persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ */
+void adf_dbgfs_exit(struct adf_accel_dev *accel_dev)
+{
+ adf_cfg_dev_dbgfs_rm(accel_dev);
+ debugfs_remove(accel_dev->debugfs_dir);
+}
+EXPORT_SYMBOL_GPL(adf_dbgfs_exit);
+
+/**
+ * adf_dbgfs_add() - add non-persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * This function creates debugfs entries that are not persistent through
+ * a device state change (from up to down or vice versa).
+ */
+void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->debugfs_dir)
+ return;
+}
+
+/**
+ * adf_dbgfs_rm() - remove non-persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ */
+void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->debugfs_dir)
+ return;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.h
new file mode 100644
index 000000000000..e0cb2c2a2ed0
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_DBGFS_H
+#define ADF_DBGFS_H
+
+#ifdef CONFIG_DEBUG_FS
+void adf_dbgfs_init(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_rm(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_exit(struct adf_accel_dev *accel_dev);
+#else
+static inline void adf_dbgfs_init(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_dbgfs_exit(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
index 7037c0892a8a..34c6cd8e27c0 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
@@ -23,15 +23,25 @@ struct adf_gen4_pm_data {
static int send_host_msg(struct adf_accel_dev *accel_dev)
{
+ char pm_idle_support_cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ bool pm_idle_support;
u32 msg;
+ int ret;
msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
if (msg & ADF_GEN4_PM_MSG_PENDING)
return -EBUSY;
+ adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_PM_IDLE_SUPPORT, pm_idle_support_cfg);
+ ret = kstrtobool(pm_idle_support_cfg, &pm_idle_support);
+ if (ret)
+ pm_idle_support = true;
+
/* Send HOST_MSG */
- msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN);
+ msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK,
+ pm_idle_support ? PM_SET_MIN : PM_NO_CHANGE);
msg |= ADF_GEN4_PM_MSG_PENDING;
ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
index f8f8a9ee29e5..dd112923e006 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
@@ -37,6 +37,7 @@
#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x0)
#define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7)
+#define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1)
int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index 0985f64ab11a..826179c98524 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -7,6 +7,7 @@
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
+#include "adf_dbgfs.h"
static LIST_HEAD(service_table);
static DEFINE_MUTEX(service_lock);
@@ -216,6 +217,9 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
return -EFAULT;
}
+
+ adf_dbgfs_add(accel_dev);
+
return 0;
}
@@ -240,6 +244,8 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
!test_bit(ADF_STATUS_STARTING, &accel_dev->status))
return;
+ adf_dbgfs_rm(accel_dev);
+
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index 3eb6611ab1b1..a74d2f930367 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -78,6 +78,13 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
static const char * const services_operations[] = {
ADF_CFG_CY,
ADF_CFG_DC,
+ ADF_CFG_SYM,
+ ADF_CFG_ASYM,
+ ADF_CFG_ASYM_SYM,
+ ADF_CFG_ASYM_DC,
+ ADF_CFG_DC_ASYM,
+ ADF_CFG_SYM_DC,
+ ADF_CFG_DC_SYM,
};
static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
@@ -145,12 +152,65 @@ static ssize_t cfg_services_store(struct device *dev, struct device_attribute *a
return count;
}
+static ssize_t pm_idle_enabled_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char pm_idle_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
+ struct adf_accel_dev *accel_dev;
+ int ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_PM_IDLE_SUPPORT, pm_idle_enabled);
+ if (ret)
+ return sysfs_emit(buf, "1\n");
+
+ return sysfs_emit(buf, "%s\n", pm_idle_enabled);
+}
+
+static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long pm_idle_enabled_cfg_val;
+ struct adf_accel_dev *accel_dev;
+ bool pm_idle_enabled;
+ int ret;
+
+ ret = kstrtobool(buf, &pm_idle_enabled);
+ if (ret)
+ return ret;
+
+ pm_idle_enabled_cfg_val = pm_idle_enabled;
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ if (adf_dev_started(accel_dev)) {
+ dev_info(dev, "Device qat_dev%d must be down to set pm_idle_enabled.\n",
+ accel_dev->accel_id);
+ return -EINVAL;
+ }
+
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_PM_IDLE_SUPPORT, &pm_idle_enabled_cfg_val,
+ ADF_DEC);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(pm_idle_enabled);
+
static DEVICE_ATTR_RW(state);
static DEVICE_ATTR_RW(cfg_services);
static struct attribute *qat_attrs[] = {
&dev_attr_state.attr,
&dev_attr_cfg_services.attr,
+ &dev_attr_pm_idle_enabled.attr,
NULL,
};
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index 4042739bb6fa..a65059e56248 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
@@ -87,8 +87,7 @@ enum icp_qat_capabilities_mask {
ICP_ACCEL_CAPABILITIES_AUTHENTICATION = BIT(3),
ICP_ACCEL_CAPABILITIES_RESERVED_1 = BIT(4),
ICP_ACCEL_CAPABILITIES_COMPRESSION = BIT(5),
- ICP_ACCEL_CAPABILITIES_LZS_COMPRESSION = BIT(6),
- ICP_ACCEL_CAPABILITIES_RAND = BIT(7),
+ /* Bits 6-7 are currently reserved */
ICP_ACCEL_CAPABILITIES_ZUC = BIT(8),
ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9),
/* Bits 10-11 are currently reserved */
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 538dcbfbcd26..3c4bba4a8779 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -106,7 +106,6 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
default:
return -EFAULT;
}
- return -EFAULT;
}
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
index 935a7e012946..4128200a9032 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
@@ -170,15 +170,14 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
}
areq->dst_len = req->ctx.dh->p_size;
+ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+ DMA_FROM_DEVICE);
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
kfree_sensitive(req->dst_align);
}
- dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
- DMA_FROM_DEVICE);
-
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
@@ -521,12 +520,14 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
- kfree_sensitive(req->src_align);
-
dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
DMA_TO_DEVICE);
+ kfree_sensitive(req->src_align);
+
areq->dst_len = req->ctx.rsa->key_sz;
+ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+ DMA_FROM_DEVICE);
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
@@ -534,9 +535,6 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
kfree_sensitive(req->dst_align);
}
- dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
- DMA_FROM_DEVICE);
-
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 3ba8ca20b3d7..ce837bcc1cab 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -1685,7 +1685,7 @@ static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
}
static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
- char *obj_name, char **obj_ptr,
+ const char *obj_name, char **obj_ptr,
unsigned int *obj_size)
{
struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
@@ -1837,8 +1837,8 @@ static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_mof_file_hdr *mof_ptr,
- u32 mof_size, char *obj_name, char **obj_ptr,
- unsigned int *obj_size)
+ u32 mof_size, const char *obj_name,
+ char **obj_ptr, unsigned int *obj_size)
{
struct icp_qat_mof_chunkhdr *mof_chunkhdr;
unsigned int file_id = mof_ptr->file_id;
@@ -1888,7 +1888,7 @@ static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
}
int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
- void *addr_ptr, u32 mem_size, char *obj_name)
+ void *addr_ptr, u32 mem_size, const char *obj_name)
{
char *obj_addr;
u32 obj_size;
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index e18860ab5c8e..1e748e8ce12d 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_dh895xcc_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -65,8 +66,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -75,7 +76,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -140,12 +140,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -199,6 +193,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err_free_reg;
}
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
index 96854a1cd87e..fefb85ceaeb9 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_dh895xccvf_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -64,8 +65,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
adf_devmgr_rm_dev(accel_dev, pf);
}
@@ -76,7 +77,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -123,12 +123,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -173,6 +167,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index c6f2fa753b7c..0f37dfd42d85 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -297,7 +297,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
err = verify_skcipher_des3_key(cipher, key);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index 6019066a6451..46b778bbbee4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -40,11 +40,26 @@ enum otx2_cpt_eng_type {
};
/* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */
+#define MBOX_MSG_RX_INLINE_IPSEC_LF_CFG 0xBFE
#define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF
#define MBOX_MSG_GET_CAPS 0xBFD
#define MBOX_MSG_GET_KVF_LIMITS 0xBFC
/*
+ * Message request to config cpt lf for inline inbound ipsec.
+ * This message is only used between CPT PF <-> CPT VF
+ */
+struct otx2_cpt_rx_inline_lf_cfg {
+ struct mbox_msghdr hdr;
+ u16 sso_pf_func;
+ u16 param1;
+ u16 param2;
+ u16 opcode;
+ u32 credit;
+ u32 reserved;
+};
+
+/*
* Message request and response to get engine group number
* which has attached a given type of engines (SE, AE, IE)
* This messages are only used between CPT PF <=> CPT VF
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 115997475beb..273ee5352a50 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -141,6 +141,8 @@ int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = 0;
req->cptlfs = lfs->lfs_num;
+ req->cpt_blkaddr = lfs->blkaddr;
+ req->modify = 1;
ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
if (ret)
return ret;
@@ -168,6 +170,7 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
req->hdr.id = MBOX_MSG_DETACH_RESOURCES;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = 0;
+ req->cptlfs = 1;
ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index 71e5f79431af..6edd27ff8c4e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -13,10 +13,10 @@ static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
{
union otx2_cptx_lf_done_wait done_wait;
- done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
- OTX2_CPT_LF_DONE_WAIT);
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
+ lf->slot, OTX2_CPT_LF_DONE_WAIT);
done_wait.s.time_wait = time_wait;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_WAIT, done_wait.u);
}
@@ -24,10 +24,10 @@ static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
{
union otx2_cptx_lf_done_wait done_wait;
- done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
- OTX2_CPT_LF_DONE_WAIT);
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
+ lf->slot, OTX2_CPT_LF_DONE_WAIT);
done_wait.s.num_wait = num_wait;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_WAIT, done_wait.u);
}
@@ -147,7 +147,7 @@ static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
irq_misc.s.nwrp = 0x1;
for (slot = 0; slot < lfs->lfs_num; slot++)
- otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg,
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg,
irq_misc.u);
}
@@ -157,7 +157,7 @@ static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
/* Enable done interrupts */
for (slot = 0; slot < lfs->lfs_num; slot++)
- otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
/* Enable Misc interrupts */
cptlf_set_misc_intrs(lfs, true);
@@ -168,7 +168,7 @@ static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
int slot;
for (slot = 0; slot < lfs->lfs_num; slot++)
- otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
cptlf_set_misc_intrs(lfs, false);
}
@@ -177,7 +177,7 @@ static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
{
union otx2_cptx_lf_done irq_cnt;
- irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE);
return irq_cnt.s.done;
}
@@ -189,8 +189,8 @@ static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
struct device *dev;
dev = &lf->lfs->pdev->dev;
- irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
- OTX2_CPT_LF_MISC_INT);
+ irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
+ lf->slot, OTX2_CPT_LF_MISC_INT);
irq_misc_ack.u = 0x0;
if (irq_misc.s.fault) {
@@ -222,7 +222,7 @@ static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
}
/* Acknowledge interrupts */
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
return IRQ_HANDLED;
@@ -237,13 +237,13 @@ static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
/* Read the number of completed requests */
irq_cnt = cptlf_read_done_cnt(lf);
if (irq_cnt) {
- done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
lf->slot, OTX2_CPT_LF_DONE_WAIT);
/* Acknowledge the number of completed requests */
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_ACK, irq_cnt);
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_WAIT, done_wait.u);
if (unlikely(!lf->wqe)) {
dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
@@ -393,7 +393,7 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
OTX2_CPT_LMT_LF_LMTLINEX(0));
lfs->lf[slot].ioreg = lfs->reg_base +
- OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
+ OTX2_CPT_RVU_FUNC_ADDR_S(lfs->blkaddr, slot,
OTX2_CPT_LF_NQX(0));
}
/* Send request to attach LFs */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index 4fcaf61a70e3..5302fe3d0e6f 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -180,7 +180,7 @@ static inline void otx2_cptlf_set_iqueues_base_addr(
for (slot = 0; slot < lfs->lfs_num; slot++) {
lf_q_base.u = lfs->lf[slot].iqueue.dma_addr;
- otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
OTX2_CPT_LF_Q_BASE, lf_q_base.u);
}
}
@@ -191,7 +191,7 @@ static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf)
lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 +
OTX2_CPT_EXTRA_SIZE_DIV40;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_Q_SIZE, lf_q_size.u);
}
@@ -207,15 +207,16 @@ static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf)
{
union otx2_cptx_lf_ctl lf_ctl = { .u = 0x0 };
union otx2_cptx_lf_inprog lf_inprog;
+ u8 blkaddr = lf->lfs->blkaddr;
int timeout = 20;
/* Disable instructions enqueuing */
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_CTL, lf_ctl.u);
/* Wait for instruction queue to become empty */
do {
- lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr,
lf->slot, OTX2_CPT_LF_INPROG);
if (!lf_inprog.s.inflight)
break;
@@ -234,7 +235,7 @@ static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf)
* the queue should be empty at this point
*/
lf_inprog.s.eena = 0x0;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_INPROG, lf_inprog.u);
}
@@ -249,14 +250,15 @@ static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs)
static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf,
bool enable)
{
+ u8 blkaddr = lf->lfs->blkaddr;
union otx2_cptx_lf_ctl lf_ctl;
- lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_CTL);
/* Set iqueue's enqueuing */
lf_ctl.s.ena = enable ? 0x1 : 0x0;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_CTL, lf_ctl.u);
}
@@ -269,13 +271,14 @@ static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf,
bool enable)
{
union otx2_cptx_lf_inprog lf_inprog;
+ u8 blkaddr = lf->lfs->blkaddr;
- lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_INPROG);
/* Set iqueue's execution */
lf_inprog.s.eena = enable ? 0x1 : 0x0;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_INPROG, lf_inprog.u);
}
@@ -364,6 +367,18 @@ static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs)
return atomic_read(&lfs->state) == OTX2_CPTLF_STARTED;
}
+static inline void otx2_cptlf_set_dev_info(struct otx2_cptlfs_info *lfs,
+ struct pci_dev *pdev,
+ void __iomem *reg_base,
+ struct otx2_mbox *mbox,
+ int blkaddr)
+{
+ lfs->pdev = pdev;
+ lfs->reg_base = reg_base;
+ lfs->mbox = mbox;
+ lfs->blkaddr = blkaddr;
+}
+
int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri,
int lfs_num);
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index 936174b012e8..a209ec5af381 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -31,6 +31,7 @@ struct otx2_cptpf_dev {
struct otx2_cptvf_info vf[OTX2_CPT_MAX_VFS_NUM];
struct otx2_cpt_eng_grps eng_grps;/* Engine groups information */
struct otx2_cptlfs_info lfs; /* CPT LFs attached to this PF */
+ struct otx2_cptlfs_info cpt1_lfs; /* CPT1 LFs attached to this PF */
/* HW capabilities for each engine type */
union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
bool is_eng_caps_discovered;
@@ -40,6 +41,9 @@ struct otx2_cptpf_dev {
struct work_struct afpf_mbox_work;
struct workqueue_struct *afpf_mbox_wq;
+ struct otx2_mbox afpf_mbox_up;
+ struct work_struct afpf_mbox_up_work;
+
/* VF <=> PF mbox */
struct otx2_mbox vfpf_mbox;
struct workqueue_struct *vfpf_mbox_wq;
@@ -52,8 +56,10 @@ struct otx2_cptpf_dev {
u8 pf_id; /* RVU PF number */
u8 max_vfs; /* Maximum number of VFs supported by CPT */
u8 enabled_vfs; /* Number of enabled VFs */
+ u8 sso_pf_func_ovrd; /* SSO PF_FUNC override bit */
u8 kvf_limits; /* Kernel crypto limits */
bool has_cpt1;
+ u8 rsrc_req_blkaddr;
/* Devlink */
struct devlink *dl;
@@ -61,6 +67,7 @@ struct otx2_cptpf_dev {
irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
void otx2_cptpf_afpf_mbox_handler(struct work_struct *work);
+void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work);
irqreturn_t otx2_cptpf_vfpf_mbox_intr(int irq, void *arg);
void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 30e6acfc93d9..e34223daa327 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -13,6 +13,8 @@
#define OTX2_CPT_DRV_NAME "rvu_cptpf"
#define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
+#define CPT_UC_RID_CN9K_B0 1
+
static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
int num_vfs)
{
@@ -473,10 +475,19 @@ static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
if (err)
goto error;
+ err = otx2_mbox_init(&cptpf->afpf_mbox_up, cptpf->afpf_mbox_base,
+ pdev, cptpf->reg_base, MBOX_DIR_PFAF_UP, 1);
+ if (err)
+ goto mbox_cleanup;
+
INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
+ INIT_WORK(&cptpf->afpf_mbox_up_work, otx2_cptpf_afpf_mbox_up_handler);
mutex_init(&cptpf->lock);
+
return 0;
+mbox_cleanup:
+ otx2_mbox_destroy(&cptpf->afpf_mbox);
error:
destroy_workqueue(cptpf->afpf_mbox_wq);
return err;
@@ -486,6 +497,33 @@ static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
{
destroy_workqueue(cptpf->afpf_mbox_wq);
otx2_mbox_destroy(&cptpf->afpf_mbox);
+ otx2_mbox_destroy(&cptpf->afpf_mbox_up);
+}
+
+static ssize_t sso_pf_func_ovrd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd);
+}
+
+static ssize_t sso_pf_func_ovrd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+ u8 sso_pf_func_ovrd;
+
+ if (!(cptpf->pdev->revision == CPT_UC_RID_CN9K_B0))
+ return count;
+
+ if (kstrtou8(buf, 0, &sso_pf_func_ovrd))
+ return -EINVAL;
+
+ cptpf->sso_pf_func_ovrd = sso_pf_func_ovrd;
+
+ return count;
}
static ssize_t kvf_limits_show(struct device *dev,
@@ -518,8 +556,11 @@ static ssize_t kvf_limits_store(struct device *dev,
}
static DEVICE_ATTR_RW(kvf_limits);
+static DEVICE_ATTR_RW(sso_pf_func_ovrd);
+
static struct attribute *cptpf_attrs[] = {
&dev_attr_kvf_limits.attr,
+ &dev_attr_sso_pf_func_ovrd.attr,
NULL
};
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index dee0aa60b698..480b3720f15a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -5,6 +5,20 @@
#include "otx2_cptpf.h"
#include "rvu_reg.h"
+/* Fastpath ipsec opcode with inplace processing */
+#define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6))
+#define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6))
+
+#define cpt_inline_rx_opcode(pdev) \
+({ \
+ u8 opcode; \
+ if (is_dev_otx2(pdev)) \
+ opcode = CPT_INLINE_RX_OPCODE; \
+ else \
+ opcode = CN10K_CPT_INLINE_RX_OPCODE; \
+ (opcode); \
+})
+
/*
* CPT PF driver version, It will be incremented by 1 for every feature
* addition in CPT mailbox messages.
@@ -112,6 +126,139 @@ static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
return 0;
}
+static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
+ int sso_pf_func, u8 slot)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ struct pci_dev *pdev = cptpf->pdev;
+
+ req = (struct cpt_inline_ipsec_cfg_msg *)
+ otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ memset(req, 0, sizeof(*req));
+ req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ req->dir = CPT_INLINE_INBOUND;
+ req->slot = slot;
+ req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
+ req->sso_pf_func = sso_pf_func;
+ req->enable = 1;
+
+ return otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
+}
+
+static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
+ struct otx2_cpt_rx_inline_lf_cfg *req)
+{
+ struct nix_inline_ipsec_cfg *nix_req;
+ struct pci_dev *pdev = cptpf->pdev;
+ int ret;
+
+ nix_req = (struct nix_inline_ipsec_cfg *)
+ otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
+ sizeof(*nix_req),
+ sizeof(struct msg_rsp));
+ if (nix_req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ memset(nix_req, 0, sizeof(*nix_req));
+ nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG;
+ nix_req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ nix_req->enable = 1;
+ if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS)
+ nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1;
+ else
+ nix_req->cpt_credit = req->credit - 1;
+ nix_req->gen_cfg.egrp = egrp;
+ if (req->opcode)
+ nix_req->gen_cfg.opcode = req->opcode;
+ else
+ nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
+ nix_req->gen_cfg.param1 = req->param1;
+ nix_req->gen_cfg.param2 = req->param2;
+ nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ nix_req->inst_qsel.cpt_slot = 0;
+ ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
+ if (ret)
+ return ret;
+
+ if (cptpf->has_cpt1) {
+ ret = send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 1);
+ if (ret)
+ return ret;
+ }
+
+ return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0);
+}
+
+static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_rx_inline_lf_cfg *cfg_req;
+ u8 egrp;
+ int ret;
+
+ cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req;
+ if (cptpf->lfs.lfs_num) {
+ dev_err(&cptpf->pdev->dev,
+ "LF is already configured for RX inline ipsec.\n");
+ return -EEXIST;
+ }
+ /*
+ * Allow LFs to execute requests destined to only grp IE_TYPES and
+ * set queue priority of each LF to high
+ */
+ egrp = otx2_cpt_get_eng_grp(&cptpf->eng_grps, OTX2_CPT_IE_TYPES);
+ if (egrp == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(&cptpf->pdev->dev,
+ "Engine group for inline ipsec is not available\n");
+ return -ENOENT;
+ }
+
+ otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
+ &cptpf->afpf_mbox, BLKADDR_CPT0);
+ ret = otx2_cptlf_init(&cptpf->lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO,
+ 1);
+ if (ret) {
+ dev_err(&cptpf->pdev->dev,
+ "LF configuration failed for RX inline ipsec.\n");
+ return ret;
+ }
+
+ if (cptpf->has_cpt1) {
+ cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
+ otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
+ cptpf->reg_base, &cptpf->afpf_mbox,
+ BLKADDR_CPT1);
+ ret = otx2_cptlf_init(&cptpf->cpt1_lfs, 1 << egrp,
+ OTX2_CPT_QUEUE_HI_PRIO, 1);
+ if (ret) {
+ dev_err(&cptpf->pdev->dev,
+ "LF configuration failed for RX inline ipsec.\n");
+ goto lf_cleanup;
+ }
+ cptpf->rsrc_req_blkaddr = 0;
+ }
+
+ ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, cfg_req);
+ if (ret)
+ goto lf1_cleanup;
+
+ return 0;
+
+lf1_cleanup:
+ otx2_cptlf_shutdown(&cptpf->cpt1_lfs);
+lf_cleanup:
+ otx2_cptlf_shutdown(&cptpf->lfs);
+ return ret;
+}
+
static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
struct otx2_cptvf_info *vf,
struct mbox_msghdr *req, int size)
@@ -132,6 +279,10 @@ static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
case MBOX_MSG_GET_KVF_LIMITS:
err = handle_msg_kvf_limits(cptpf, vf, req);
break;
+ case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG:
+ err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req);
+ break;
+
default:
err = forward_to_af(cptpf, vf, req, size);
break;
@@ -224,14 +375,28 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
{
struct otx2_cptpf_dev *cptpf = arg;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
u64 intr;
/* Read the interrupt bits */
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
if (intr & 0x1ULL) {
- /* Schedule work queue function to process the MBOX request */
- queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
+ mbox = &cptpf->afpf_mbox;
+ mdev = &mbox->dev[0];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
+
+ mbox = &cptpf->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_up_work);
/* Clear and ack the interrupt */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
0x1ULL);
@@ -242,6 +407,7 @@ irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *msg)
{
+ struct otx2_cptlfs_info *lfs = &cptpf->lfs;
struct device *dev = &cptpf->pdev->dev;
struct cpt_rd_wr_reg_msg *rsp_rd_wr;
@@ -254,6 +420,8 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
msg->sig, msg->id);
return;
}
+ if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1)
+ lfs = &cptpf->cpt1_lfs;
switch (msg->id) {
case MBOX_MSG_READY:
@@ -273,11 +441,14 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
break;
case MBOX_MSG_ATTACH_RESOURCES:
if (!msg->rc)
- cptpf->lfs.are_lfs_attached = 1;
+ lfs->are_lfs_attached = 1;
break;
case MBOX_MSG_DETACH_RESOURCES:
if (!msg->rc)
- cptpf->lfs.are_lfs_attached = 0;
+ lfs->are_lfs_attached = 0;
+ break;
+ case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
+ case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
break;
default:
@@ -367,3 +538,71 @@ void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
}
otx2_mbox_reset(afpf_mbox, 0);
}
+
+static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *msg)
+{
+ struct cpt_inst_lmtst_req *req = (struct cpt_inst_lmtst_req *)msg;
+ struct otx2_cptlfs_info *lfs = &cptpf->lfs;
+ struct msg_rsp *rsp;
+
+ if (cptpf->lfs.lfs_num)
+ lfs->ops->send_cmd((union otx2_cpt_inst_s *)req->inst, 1,
+ &lfs->lf[0]);
+
+ rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(&cptpf->afpf_mbox_up, 0,
+ sizeof(*rsp));
+ if (!rsp)
+ return;
+
+ rsp->hdr.id = msg->id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = 0;
+ rsp->hdr.rc = 0;
+}
+
+static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *msg)
+{
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CPT_INST_LMTST:
+ handle_msg_cpt_inst_lmtst(cptpf, msg);
+ break;
+ default:
+ otx2_reply_invalid_msg(&cptpf->afpf_mbox_up, 0, 0, msg->id);
+ }
+}
+
+void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work)
+{
+ struct otx2_cptpf_dev *cptpf;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, i;
+
+ cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work);
+ mbox = &cptpf->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ process_afpf_mbox_up_msg(cptpf, msg);
+
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+ otx2_mbox_msg_send(mbox, 0);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 1577986677f6..1958b797a421 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1504,11 +1504,9 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
if (ret)
goto delete_grps;
- lfs->pdev = pdev;
- lfs->reg_base = cptpf->reg_base;
- lfs->mbox = &cptpf->afpf_mbox;
- lfs->blkaddr = BLKADDR_CPT0;
- ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
+ otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base,
+ &cptpf->afpf_mbox, BLKADDR_CPT0);
+ ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret)
goto delete_grps;
@@ -1562,7 +1560,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
free_result:
kfree(result);
lf_cleanup:
- otx2_cptlf_shutdown(&cptpf->lfs);
+ otx2_cptlf_shutdown(lfs);
delete_grps:
delete_engine_grps(pdev, &cptpf->eng_grps);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
index 4207e2236903..994291e90da1 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
@@ -19,6 +19,7 @@ struct otx2_cptvf_dev {
struct otx2_mbox pfvf_mbox;
struct work_struct pfvf_mbox_work;
struct workqueue_struct *pfvf_mbox_wq;
+ int blkaddr;
void *bbuf_base;
unsigned long cap_flag;
};
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 6023a7adb70c..bac729c885f9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -277,12 +277,11 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
if (ret)
return ret;
- lfs->reg_base = cptvf->reg_base;
- lfs->pdev = cptvf->pdev;
- lfs->mbox = &cptvf->pfvf_mbox;
-
lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
num_online_cpus();
+
+ otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base,
+ &cptvf->pfvf_mbox, cptvf->blkaddr);
ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
lfs_num);
if (ret)
@@ -380,6 +379,7 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
if (ret)
goto destroy_pfvf_mbox;
+ cptvf->blkaddr = BLKADDR_CPT0;
/* Initialize CPT LFs */
ret = cptvf_lf_init(cptvf);
if (ret)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 20d0dcd50344..4f6ca229ee5e 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/cpumask.h>
#include <linux/slab.h>
@@ -1795,11 +1796,9 @@ static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
struct spu_mdesc_info *ip,
const char *node_name)
{
- const unsigned int *reg;
- u64 node;
+ u64 node, reg;
- reg = of_get_property(dev->dev.of_node, "reg", NULL);
- if (!reg)
+ if (of_property_read_reg(dev->dev.of_node, 0, &reg, NULL) < 0)
return -ENODEV;
mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
@@ -1810,7 +1809,7 @@ static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
if (!name || strcmp(name, node_name))
continue;
chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
- if (!chdl || (*chdl != *reg))
+ if (!chdl || (*chdl != reg))
continue;
ip->cfg_handle = *chdl;
return get_irq_props(mdesc, node, ip);
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index d00181a26dd6..483cef62acee 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
nx-crypto-objs := nx.o \
- nx_debugfs.o \
nx-aes-cbc.o \
nx-aes-ecb.o \
nx-aes-gcm.o \
@@ -11,6 +10,7 @@ nx-crypto-objs := nx.o \
nx-sha256.o \
nx-sha512.o
+nx-crypto-$(CONFIG_DEBUG_FS) += nx_debugfs.o
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
nx-compress-objs := nx-842.o
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index c6233173c612..2697baebb6a3 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -170,8 +170,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
void nx_debugfs_init(struct nx_crypto_driver *);
void nx_debugfs_fini(struct nx_crypto_driver *);
#else
-#define NX_DEBUGFS_INIT(drv) (0)
-#define NX_DEBUGFS_FINI(drv) (0)
+#define NX_DEBUGFS_INIT(drv) do {} while (0)
+#define NX_DEBUGFS_FINI(drv) do {} while (0)
#endif
#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
index 92bf97232a29..12c17a68d350 100644
--- a/drivers/crypto/sa2ul.h
+++ b/drivers/crypto/sa2ul.h
@@ -170,7 +170,7 @@ struct sa_tfm_ctx;
* the following range, so avoid using it.
*/
#define SA_UNSAFE_DATA_SZ_MIN 240
-#define SA_UNSAFE_DATA_SZ_MAX 256
+#define SA_UNSAFE_DATA_SZ_MAX 255
struct sa_match_data;
diff --git a/drivers/crypto/starfive/Kconfig b/drivers/crypto/starfive/Kconfig
new file mode 100644
index 000000000000..df745fcb09df
--- /dev/null
+++ b/drivers/crypto/starfive/Kconfig
@@ -0,0 +1,20 @@
+#
+# StarFive crypto drivers configuration
+#
+
+config CRYPTO_DEV_JH7110
+ tristate "StarFive JH7110 cryptographic engine driver"
+ depends on SOC_STARFIVE || AMBA_PL08X || COMPILE_TEST
+ depends on HAS_DMA
+ select CRYPTO_ENGINE
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_SM3_GENERIC
+ select CRYPTO_RSA
+ help
+ Support for StarFive JH7110 crypto hardware acceleration engine.
+ This module provides acceleration for public key algo,
+ skciphers, AEAD and hash functions.
+
+ If you choose 'M' here, this module will be called jh7110-crypto.
diff --git a/drivers/crypto/starfive/Makefile b/drivers/crypto/starfive/Makefile
new file mode 100644
index 000000000000..98b01d2f1ccf
--- /dev/null
+++ b/drivers/crypto/starfive/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CRYPTO_DEV_JH7110) += jh7110-crypto.o
+jh7110-crypto-objs := jh7110-cryp.o jh7110-hash.o jh7110-rsa.o
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
new file mode 100644
index 000000000000..cc43556b6c80
--- /dev/null
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cryptographic API.
+ *
+ * Support for StarFive hardware cryptographic engine.
+ * Copyright (c) 2022 StarFive Technology
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "jh7110-cryp.h"
+
+#define DRIVER_NAME "jh7110-crypto"
+
+struct starfive_dev_list {
+ struct list_head dev_list;
+ spinlock_t lock; /* protect dev_list */
+};
+
+static struct starfive_dev_list dev_list = {
+ .dev_list = LIST_HEAD_INIT(dev_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(dev_list.lock),
+};
+
+struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = NULL, *tmp;
+
+ spin_lock_bh(&dev_list.lock);
+ if (!ctx->cryp) {
+ list_for_each_entry(tmp, &dev_list.dev_list, list) {
+ cryp = tmp;
+ break;
+ }
+ ctx->cryp = cryp;
+ } else {
+ cryp = ctx->cryp;
+ }
+
+ spin_unlock_bh(&dev_list.lock);
+
+ return cryp;
+}
+
+static int starfive_dma_init(struct starfive_cryp_dev *cryp)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ cryp->tx = dma_request_chan(cryp->dev, "tx");
+ if (IS_ERR(cryp->tx))
+ return dev_err_probe(cryp->dev, PTR_ERR(cryp->tx),
+ "Error requesting tx dma channel.\n");
+
+ cryp->rx = dma_request_chan(cryp->dev, "rx");
+ if (IS_ERR(cryp->rx)) {
+ dma_release_channel(cryp->tx);
+ return dev_err_probe(cryp->dev, PTR_ERR(cryp->rx),
+ "Error requesting rx dma channel.\n");
+ }
+
+ return 0;
+}
+
+static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp)
+{
+ dma_release_channel(cryp->tx);
+ dma_release_channel(cryp->rx);
+}
+
+static irqreturn_t starfive_cryp_irq(int irq, void *priv)
+{
+ u32 status;
+ struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv;
+
+ status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET);
+ if (status & STARFIVE_IE_FLAG_HASH_DONE) {
+ status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ status |= STARFIVE_IE_MASK_HASH_DONE;
+ writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ tasklet_schedule(&cryp->hash_done);
+ }
+
+ if (status & STARFIVE_IE_FLAG_PKA_DONE) {
+ status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ status |= STARFIVE_IE_MASK_PKA_DONE;
+ writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ complete(&cryp->pka_done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int starfive_cryp_probe(struct platform_device *pdev)
+{
+ struct starfive_cryp_dev *cryp;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL);
+ if (!cryp)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cryp);
+ cryp->dev = &pdev->dev;
+
+ cryp->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(cryp->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base),
+ "Error remapping memory for platform device\n");
+
+ tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp);
+
+ cryp->phys_base = res->start;
+ cryp->dma_maxburst = 32;
+
+ cryp->hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(cryp->hclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cryp->hclk),
+ "Error getting hardware reference clock\n");
+
+ cryp->ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(cryp->ahb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cryp->ahb),
+ "Error getting ahb reference clock\n");
+
+ cryp->rst = devm_reset_control_get_shared(cryp->dev, NULL);
+ if (IS_ERR(cryp->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cryp->rst),
+ "Error getting hardware reset line\n");
+
+ init_completion(&cryp->pka_done);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, starfive_cryp_irq, 0, pdev->name,
+ (void *)cryp);
+ if (ret)
+ return dev_err_probe(&pdev->dev, irq,
+ "Failed to register interrupt handler\n");
+
+ clk_prepare_enable(cryp->hclk);
+ clk_prepare_enable(cryp->ahb);
+ reset_control_deassert(cryp->rst);
+
+ spin_lock(&dev_list.lock);
+ list_add(&cryp->list, &dev_list.dev_list);
+ spin_unlock(&dev_list.lock);
+
+ ret = starfive_dma_init(cryp);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ goto err_probe_defer;
+ else
+ goto err_dma_init;
+ }
+
+ /* Initialize crypto engine */
+ cryp->engine = crypto_engine_alloc_init(&pdev->dev, 1);
+ if (!cryp->engine) {
+ ret = -ENOMEM;
+ goto err_engine;
+ }
+
+ ret = crypto_engine_start(cryp->engine);
+ if (ret)
+ goto err_engine_start;
+
+ ret = starfive_hash_register_algs();
+ if (ret)
+ goto err_algs_hash;
+
+ ret = starfive_rsa_register_algs();
+ if (ret)
+ goto err_algs_rsa;
+
+ return 0;
+
+err_algs_rsa:
+ starfive_hash_unregister_algs();
+err_algs_hash:
+ crypto_engine_stop(cryp->engine);
+err_engine_start:
+ crypto_engine_exit(cryp->engine);
+err_engine:
+ starfive_dma_cleanup(cryp);
+err_dma_init:
+ spin_lock(&dev_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&dev_list.lock);
+
+ clk_disable_unprepare(cryp->hclk);
+ clk_disable_unprepare(cryp->ahb);
+ reset_control_assert(cryp->rst);
+
+ tasklet_kill(&cryp->hash_done);
+err_probe_defer:
+ return ret;
+}
+
+static int starfive_cryp_remove(struct platform_device *pdev)
+{
+ struct starfive_cryp_dev *cryp = platform_get_drvdata(pdev);
+
+ starfive_hash_unregister_algs();
+ starfive_rsa_unregister_algs();
+
+ tasklet_kill(&cryp->hash_done);
+
+ crypto_engine_stop(cryp->engine);
+ crypto_engine_exit(cryp->engine);
+
+ starfive_dma_cleanup(cryp);
+
+ spin_lock(&dev_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&dev_list.lock);
+
+ clk_disable_unprepare(cryp->hclk);
+ clk_disable_unprepare(cryp->ahb);
+ reset_control_assert(cryp->rst);
+
+ return 0;
+}
+
+static const struct of_device_id starfive_dt_ids[] __maybe_unused = {
+ { .compatible = "starfive,jh7110-crypto", .data = NULL},
+ {},
+};
+MODULE_DEVICE_TABLE(of, starfive_dt_ids);
+
+static struct platform_driver starfive_cryp_driver = {
+ .probe = starfive_cryp_probe,
+ .remove = starfive_cryp_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = starfive_dt_ids,
+ },
+};
+
+module_platform_driver(starfive_cryp_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("StarFive JH7110 Cryptographic Module");
diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
new file mode 100644
index 000000000000..0cdcffc0d7d4
--- /dev/null
+++ b/drivers/crypto/starfive/jh7110-cryp.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __STARFIVE_STR_H__
+#define __STARFIVE_STR_H__
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#include <crypto/engine.h>
+#include <crypto/sha2.h>
+#include <crypto/sm3.h>
+
+#define STARFIVE_ALG_CR_OFFSET 0x0
+#define STARFIVE_ALG_FIFO_OFFSET 0x4
+#define STARFIVE_IE_MASK_OFFSET 0x8
+#define STARFIVE_IE_FLAG_OFFSET 0xc
+#define STARFIVE_DMA_IN_LEN_OFFSET 0x10
+#define STARFIVE_DMA_OUT_LEN_OFFSET 0x14
+
+#define STARFIVE_IE_MASK_HASH_DONE 0x4
+#define STARFIVE_IE_MASK_PKA_DONE 0x8
+#define STARFIVE_IE_FLAG_HASH_DONE 0x4
+#define STARFIVE_IE_FLAG_PKA_DONE 0x8
+
+#define STARFIVE_MSG_BUFFER_SIZE SZ_16K
+#define MAX_KEY_SIZE SHA512_BLOCK_SIZE
+
+union starfive_hash_csr {
+ u32 v;
+ struct {
+ u32 start :1;
+ u32 reset :1;
+ u32 ie :1;
+ u32 firstb :1;
+#define STARFIVE_HASH_SM3 0x0
+#define STARFIVE_HASH_SHA224 0x3
+#define STARFIVE_HASH_SHA256 0x4
+#define STARFIVE_HASH_SHA384 0x5
+#define STARFIVE_HASH_SHA512 0x6
+#define STARFIVE_HASH_MODE_MASK 0x7
+ u32 mode :3;
+ u32 rsvd_1 :1;
+ u32 final :1;
+ u32 rsvd_2 :2;
+#define STARFIVE_HASH_HMAC_FLAGS 0x800
+ u32 hmac :1;
+ u32 rsvd_3 :1;
+#define STARFIVE_HASH_KEY_DONE BIT(13)
+ u32 key_done :1;
+ u32 key_flag :1;
+ u32 hmac_done :1;
+#define STARFIVE_HASH_BUSY BIT(16)
+ u32 busy :1;
+ u32 hashdone :1;
+ u32 rsvd_4 :14;
+ };
+};
+
+union starfive_pka_cacr {
+ u32 v;
+ struct {
+ u32 start :1;
+ u32 reset :1;
+ u32 ie :1;
+ u32 rsvd_0 :1;
+ u32 fifo_mode :1;
+ u32 not_r2 :1;
+ u32 ecc_sub :1;
+ u32 pre_expf :1;
+ u32 cmd :4;
+ u32 rsvd_1 :1;
+ u32 ctrl_dummy :1;
+ u32 ctrl_false :1;
+ u32 cln_done :1;
+ u32 opsize :6;
+ u32 rsvd_2 :2;
+ u32 exposize :6;
+ u32 rsvd_3 :1;
+ u32 bigendian :1;
+ };
+};
+
+struct starfive_rsa_key {
+ u8 *n;
+ u8 *e;
+ u8 *d;
+ int e_bitlen;
+ int d_bitlen;
+ int bitlen;
+ size_t key_sz;
+};
+
+union starfive_alg_cr {
+ u32 v;
+ struct {
+ u32 start :1;
+ u32 aes_dma_en :1;
+ u32 rsvd_0 :1;
+ u32 hash_dma_en :1;
+ u32 alg_done :1;
+ u32 rsvd_1 :3;
+ u32 clear :1;
+ u32 rsvd_2 :23;
+ };
+};
+
+struct starfive_cryp_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct starfive_cryp_dev *cryp;
+ struct starfive_cryp_request_ctx *rctx;
+
+ unsigned int hash_mode;
+ u8 key[MAX_KEY_SIZE];
+ int keylen;
+ bool is_hmac;
+ struct starfive_rsa_key rsa_key;
+ struct crypto_akcipher *akcipher_fbk;
+ struct crypto_ahash *ahash_fbk;
+};
+
+struct starfive_cryp_dev {
+ struct list_head list;
+ struct device *dev;
+ struct clk *hclk;
+ struct clk *ahb;
+ struct reset_control *rst;
+
+ void __iomem *base;
+ phys_addr_t phys_base;
+
+ u32 dma_maxburst;
+ struct dma_chan *tx;
+ struct dma_chan *rx;
+ struct dma_slave_config cfg_in;
+ struct dma_slave_config cfg_out;
+ struct crypto_engine *engine;
+ struct tasklet_struct hash_done;
+ struct completion pka_done;
+ int err;
+ union starfive_alg_cr alg_cr;
+ union {
+ struct ahash_request *hreq;
+ } req;
+};
+
+struct starfive_cryp_request_ctx {
+ union {
+ union starfive_hash_csr hash;
+ union starfive_pka_cacr pka;
+ } csr;
+
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
+ struct ahash_request ahash_fbk_req;
+ size_t total;
+ size_t nents;
+ unsigned int blksize;
+ unsigned int digsize;
+ unsigned long in_sg_len;
+ u8 rsa_data[] __aligned(sizeof(u32));
+};
+
+struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx);
+
+int starfive_hash_register_algs(void);
+void starfive_hash_unregister_algs(void);
+
+int starfive_rsa_register_algs(void);
+void starfive_rsa_unregister_algs(void);
+
+void starfive_hash_done_task(unsigned long param);
+#endif
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
new file mode 100644
index 000000000000..5064150b8a1c
--- /dev/null
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hash function and HMAC support for StarFive driver
+ *
+ * Copyright (c) 2022 StarFive Technology
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/dma-direct.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/amba/pl080.h>
+
+#include <crypto/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "jh7110-cryp.h"
+
+#define STARFIVE_HASH_REGS_OFFSET 0x300
+#define STARFIVE_HASH_SHACSR (STARFIVE_HASH_REGS_OFFSET + 0x0)
+#define STARFIVE_HASH_SHAWDR (STARFIVE_HASH_REGS_OFFSET + 0x4)
+#define STARFIVE_HASH_SHARDR (STARFIVE_HASH_REGS_OFFSET + 0x8)
+#define STARFIVE_HASH_SHAWSR (STARFIVE_HASH_REGS_OFFSET + 0xC)
+#define STARFIVE_HASH_SHAWLEN3 (STARFIVE_HASH_REGS_OFFSET + 0x10)
+#define STARFIVE_HASH_SHAWLEN2 (STARFIVE_HASH_REGS_OFFSET + 0x14)
+#define STARFIVE_HASH_SHAWLEN1 (STARFIVE_HASH_REGS_OFFSET + 0x18)
+#define STARFIVE_HASH_SHAWLEN0 (STARFIVE_HASH_REGS_OFFSET + 0x1C)
+#define STARFIVE_HASH_SHAWKR (STARFIVE_HASH_REGS_OFFSET + 0x20)
+#define STARFIVE_HASH_SHAWKLEN (STARFIVE_HASH_REGS_OFFSET + 0x24)
+
+#define STARFIVE_HASH_BUFLEN SHA512_BLOCK_SIZE
+#define STARFIVE_HASH_RESET 0x2
+
+static inline int starfive_hash_wait_busy(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
+ !(status & STARFIVE_HASH_BUSY), 10, 100000);
+}
+
+static inline int starfive_hash_wait_key_done(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
+ (status & STARFIVE_HASH_KEY_DONE), 10, 100000);
+}
+
+static int starfive_hash_hmac_key(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ int klen = ctx->keylen, loop;
+ unsigned int *key = (unsigned int *)ctx->key;
+ unsigned char *cl;
+
+ writel(ctx->keylen, cryp->base + STARFIVE_HASH_SHAWKLEN);
+
+ rctx->csr.hash.hmac = 1;
+ rctx->csr.hash.key_flag = 1;
+
+ writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+
+ for (loop = 0; loop < klen / sizeof(unsigned int); loop++, key++)
+ writel(*key, cryp->base + STARFIVE_HASH_SHAWKR);
+
+ if (klen & 0x3) {
+ cl = (unsigned char *)key;
+ for (loop = 0; loop < (klen & 0x3); loop++, cl++)
+ writeb(*cl, cryp->base + STARFIVE_HASH_SHAWKR);
+ }
+
+ if (starfive_hash_wait_key_done(ctx))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT, "starfive_hash_wait_key_done error\n");
+
+ return 0;
+}
+
+static void starfive_hash_start(void *param)
+{
+ struct starfive_cryp_ctx *ctx = param;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ union starfive_alg_cr alg_cr;
+ union starfive_hash_csr csr;
+ u32 stat;
+
+ dma_unmap_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
+
+ alg_cr.v = 0;
+ alg_cr.clear = 1;
+
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ csr.v = readl(cryp->base + STARFIVE_HASH_SHACSR);
+ csr.firstb = 0;
+ csr.final = 1;
+
+ stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ stat &= ~STARFIVE_IE_MASK_HASH_DONE;
+ writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ writel(csr.v, cryp->base + STARFIVE_HASH_SHACSR);
+}
+
+static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct dma_async_tx_descriptor *in_desc;
+ union starfive_alg_cr alg_cr;
+ int total_len;
+ int ret;
+
+ if (!rctx->total) {
+ starfive_hash_start(ctx);
+ return 0;
+ }
+
+ writel(rctx->total, cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
+
+ total_len = rctx->total;
+ total_len = (total_len & 0x3) ? (((total_len >> 2) + 1) << 2) : total_len;
+ sg_dma_len(rctx->in_sg) = total_len;
+
+ alg_cr.v = 0;
+ alg_cr.start = 1;
+ alg_cr.hash_dma_en = 1;
+
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ ret = dma_map_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
+ if (!ret)
+ return dev_err_probe(cryp->dev, -EINVAL, "dma_map_sg() error\n");
+
+ cryp->cfg_in.direction = DMA_MEM_TO_DEV;
+ cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cryp->cfg_in.src_maxburst = cryp->dma_maxburst;
+ cryp->cfg_in.dst_maxburst = cryp->dma_maxburst;
+ cryp->cfg_in.dst_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET;
+
+ dmaengine_slave_config(cryp->tx, &cryp->cfg_in);
+
+ in_desc = dmaengine_prep_slave_sg(cryp->tx, rctx->in_sg,
+ ret, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!in_desc)
+ return -EINVAL;
+
+ in_desc->callback = starfive_hash_start;
+ in_desc->callback_param = ctx;
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(cryp->tx);
+
+ return 0;
+}
+
+static int starfive_hash_xmit(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ int ret = 0;
+
+ rctx->csr.hash.v = 0;
+ rctx->csr.hash.reset = 1;
+ writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+
+ if (starfive_hash_wait_busy(ctx))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting engine.\n");
+
+ rctx->csr.hash.v = 0;
+ rctx->csr.hash.mode = ctx->hash_mode;
+ rctx->csr.hash.ie = 1;
+
+ if (ctx->is_hmac) {
+ ret = starfive_hash_hmac_key(ctx);
+ if (ret)
+ return ret;
+ } else {
+ rctx->csr.hash.start = 1;
+ rctx->csr.hash.firstb = 1;
+ writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+ }
+
+ return starfive_hash_xmit_dma(ctx);
+}
+
+static int starfive_hash_copy_hash(struct ahash_request *req)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ int count, *data;
+ int mlen;
+
+ if (!req->result)
+ return 0;
+
+ mlen = rctx->digsize / sizeof(u32);
+ data = (u32 *)req->result;
+
+ for (count = 0; count < mlen; count++)
+ data[count] = readl(ctx->cryp->base + STARFIVE_HASH_SHARDR);
+
+ return 0;
+}
+
+void starfive_hash_done_task(unsigned long param)
+{
+ struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
+ int err = cryp->err;
+
+ if (!err)
+ err = starfive_hash_copy_hash(cryp->req.hreq);
+
+ /* Reset to clear hash_done in irq register*/
+ writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR);
+
+ crypto_finalize_hash_request(cryp->engine, cryp->req.hreq, err);
+}
+
+static int starfive_hash_check_aligned(struct scatterlist *sg, size_t total, size_t align)
+{
+ int len = 0;
+
+ if (!total)
+ return 0;
+
+ if (!IS_ALIGNED(total, align))
+ return -EINVAL;
+
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(sg->length, align))
+ return -EINVAL;
+
+ len += sg->length;
+ sg = sg_next(sg);
+ }
+
+ if (len != total)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ return starfive_hash_xmit(ctx);
+}
+
+static int starfive_hash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
+ req->result, req->nbytes);
+
+ return crypto_ahash_init(&rctx->ahash_fbk_req);
+}
+
+static int starfive_hash_update(struct ahash_request *req)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
+ req->result, req->nbytes);
+
+ return crypto_ahash_update(&rctx->ahash_fbk_req);
+}
+
+static int starfive_hash_final(struct ahash_request *req)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
+ req->result, req->nbytes);
+
+ return crypto_ahash_final(&rctx->ahash_fbk_req);
+}
+
+static int starfive_hash_finup(struct ahash_request *req)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
+ req->result, req->nbytes);
+
+ return crypto_ahash_finup(&rctx->ahash_fbk_req);
+}
+
+static int starfive_hash_digest_fb(struct ahash_request *req)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags,
+ req->base.complete, req->base.data);
+
+ ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
+ req->result, req->nbytes);
+
+ return crypto_ahash_digest(&rctx->ahash_fbk_req);
+}
+
+static int starfive_hash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ memset(rctx, 0, sizeof(struct starfive_cryp_request_ctx));
+
+ cryp->req.hreq = req;
+ rctx->total = req->nbytes;
+ rctx->in_sg = req->src;
+ rctx->blksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ rctx->digsize = crypto_ahash_digestsize(tfm);
+ rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
+ ctx->rctx = rctx;
+
+ if (starfive_hash_check_aligned(rctx->in_sg, rctx->total, rctx->blksize))
+ return starfive_hash_digest_fb(req);
+
+ return crypto_transfer_hash_request_to_engine(cryp->engine, req);
+}
+
+static int starfive_hash_export(struct ahash_request *req, void *out)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ return crypto_ahash_export(&rctx->ahash_fbk_req, out);
+}
+
+static int starfive_hash_import(struct ahash_request *req, const void *in)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ return crypto_ahash_import(&rctx->ahash_fbk_req, in);
+}
+
+static int starfive_hash_init_tfm(struct crypto_ahash *hash,
+ const char *alg_name,
+ unsigned int mode)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->cryp = starfive_cryp_find_dev(ctx);
+
+ if (!ctx->cryp)
+ return -ENODEV;
+
+ ctx->ahash_fbk = crypto_alloc_ahash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->ahash_fbk))
+ return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->ahash_fbk),
+ "starfive_hash: Could not load fallback driver.\n");
+
+ crypto_ahash_set_statesize(hash, crypto_ahash_statesize(ctx->ahash_fbk));
+ crypto_ahash_set_reqsize(hash, sizeof(struct starfive_cryp_request_ctx) +
+ crypto_ahash_reqsize(ctx->ahash_fbk));
+
+ ctx->keylen = 0;
+ ctx->hash_mode = mode;
+
+ ctx->enginectx.op.do_one_request = starfive_hash_one_request;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void starfive_hash_exit_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ crypto_free_ahash(ctx->ahash_fbk);
+
+ ctx->ahash_fbk = NULL;
+ ctx->enginectx.op.do_one_request = NULL;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+}
+
+static int starfive_hash_long_setkey(struct starfive_cryp_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ const char *alg_name)
+{
+ struct crypto_wait wait;
+ struct ahash_request *req;
+ struct scatterlist sg;
+ struct crypto_ahash *ahash_tfm;
+ u8 *buf;
+ int ret;
+
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
+ if (IS_ERR(ahash_tfm))
+ return PTR_ERR(ahash_tfm);
+
+ req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_free_ahash;
+ }
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+ buf = kzalloc(keylen + STARFIVE_HASH_BUFLEN, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_free_req;
+ }
+
+ memcpy(buf, key, keylen);
+ sg_init_one(&sg, buf, keylen);
+ ahash_request_set_crypt(req, &sg, ctx->key, keylen);
+
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+
+ kfree(buf);
+err_free_req:
+ ahash_request_free(req);
+err_free_ahash:
+ crypto_free_ahash(ahash_tfm);
+ return ret;
+}
+
+static int starfive_hash_setkey(struct crypto_ahash *hash,
+ const u8 *key, unsigned int keylen)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+ unsigned int digestsize = crypto_ahash_digestsize(hash);
+ unsigned int blocksize = crypto_ahash_blocksize(hash);
+ const char *alg_name;
+
+ crypto_ahash_setkey(ctx->ahash_fbk, key, keylen);
+
+ if (keylen <= blocksize) {
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ return 0;
+ }
+
+ ctx->keylen = digestsize;
+
+ switch (digestsize) {
+ case SHA224_DIGEST_SIZE:
+ alg_name = "sha224-starfive";
+ break;
+ case SHA256_DIGEST_SIZE:
+ if (ctx->hash_mode == STARFIVE_HASH_SM3)
+ alg_name = "sm3-starfive";
+ else
+ alg_name = "sha256-starfive";
+ break;
+ case SHA384_DIGEST_SIZE:
+ alg_name = "sha384-starfive";
+ break;
+ case SHA512_DIGEST_SIZE:
+ alg_name = "sha512-starfive";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return starfive_hash_long_setkey(ctx, key, keylen, alg_name);
+}
+
+static int starfive_sha224_init_tfm(struct crypto_ahash *hash)
+{
+ return starfive_hash_init_tfm(hash, "sha224-generic",
+ STARFIVE_HASH_SHA224);
+}
+
+static int starfive_sha256_init_tfm(struct crypto_ahash *hash)
+{
+ return starfive_hash_init_tfm(hash, "sha256-generic",
+ STARFIVE_HASH_SHA256);
+}
+
+static int starfive_sha384_init_tfm(struct crypto_ahash *hash)
+{
+ return starfive_hash_init_tfm(hash, "sha384-generic",
+ STARFIVE_HASH_SHA384);
+}
+
+static int starfive_sha512_init_tfm(struct crypto_ahash *hash)
+{
+ return starfive_hash_init_tfm(hash, "sha512-generic",
+ STARFIVE_HASH_SHA512);
+}
+
+static int starfive_sm3_init_tfm(struct crypto_ahash *hash)
+{
+ return starfive_hash_init_tfm(hash, "sm3-generic",
+ STARFIVE_HASH_SM3);
+}
+
+static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->is_hmac = true;
+
+ return starfive_hash_init_tfm(hash, "hmac(sha224-generic)",
+ STARFIVE_HASH_SHA224);
+}
+
+static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->is_hmac = true;
+
+ return starfive_hash_init_tfm(hash, "hmac(sha256-generic)",
+ STARFIVE_HASH_SHA256);
+}
+
+static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->is_hmac = true;
+
+ return starfive_hash_init_tfm(hash, "hmac(sha384-generic)",
+ STARFIVE_HASH_SHA384);
+}
+
+static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->is_hmac = true;
+
+ return starfive_hash_init_tfm(hash, "hmac(sha512-generic)",
+ STARFIVE_HASH_SHA512);
+}
+
+static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->is_hmac = true;
+
+ return starfive_hash_init_tfm(hash, "hmac(sm3-generic)",
+ STARFIVE_HASH_SM3);
+}
+
+static struct ahash_alg algs_sha2_sm3[] = {
+{
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_sha224_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_hmac_sha224_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .setkey = starfive_hash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "sha224-hmac-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_sha256_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_hmac_sha256_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .setkey = starfive_hash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "sha256-hmac-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_sha384_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_hmac_sha384_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .setkey = starfive_hash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "sha384-hmac-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_sha512_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_hmac_sha512_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .setkey = starfive_hash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "sha512-hmac-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_sm3_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct sm3_state),
+ .base = {
+ .cra_name = "sm3",
+ .cra_driver_name = "sm3-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_hmac_sm3_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .setkey = starfive_hash_setkey,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct sm3_state),
+ .base = {
+ .cra_name = "hmac(sm3)",
+ .cra_driver_name = "sm3-hmac-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+},
+};
+
+int starfive_hash_register_algs(void)
+{
+ return crypto_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
+}
+
+void starfive_hash_unregister_algs(void)
+{
+ crypto_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
+}
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
new file mode 100644
index 000000000000..f31bbd825f88
--- /dev/null
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive Public Key Algo acceleration driver
+ *
+ * Copyright (c) 2022 StarFive Technology
+ */
+
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direct.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <crypto/akcipher.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/scatterwalk.h>
+
+#include "jh7110-cryp.h"
+
+#define STARFIVE_PKA_REGS_OFFSET 0x400
+#define STARFIVE_PKA_CACR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x0)
+#define STARFIVE_PKA_CASR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x4)
+#define STARFIVE_PKA_CAAR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x8)
+#define STARFIVE_PKA_CAER_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x108)
+#define STARFIVE_PKA_CANR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x208)
+
+// R^2 mod N and N0'
+#define CRYPTO_CMD_PRE 0x0
+// A * R mod N ==> A
+#define CRYPTO_CMD_ARN 0x5
+// A * E * R mod N ==> A
+#define CRYPTO_CMD_AERN 0x6
+// A * A * R mod N ==> A
+#define CRYPTO_CMD_AARN 0x7
+
+#define STARFIVE_RSA_MAX_KEYSZ 256
+#define STARFIVE_RSA_RESET 0x2
+
+static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ return wait_for_completion_timeout(&cryp->pka_done,
+ usecs_to_jiffies(100000));
+}
+
+static inline void starfive_pka_irq_mask_clear(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 stat;
+
+ stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ stat &= ~STARFIVE_IE_MASK_PKA_DONE;
+ writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+
+ reinit_completion(&cryp->pka_done);
+}
+
+static void starfive_rsa_free_key(struct starfive_rsa_key *key)
+{
+ if (key->d)
+ kfree_sensitive(key->d);
+ if (key->e)
+ kfree_sensitive(key->e);
+ if (key->n)
+ kfree_sensitive(key->n);
+ memset(key, 0, sizeof(*key));
+}
+
+static unsigned int starfive_rsa_get_nbit(u8 *pa, u32 snum, int key_sz)
+{
+ u32 i;
+ u8 value;
+
+ i = snum >> 3;
+
+ value = pa[key_sz - i - 1];
+ value >>= snum & 0x7;
+ value &= 0x1;
+
+ return value;
+}
+
+static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
+ u32 *out, u32 *in, u8 mont,
+ u32 *mod, int bit_len)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ int count = rctx->total / sizeof(u32) - 1;
+ int loop;
+ u32 temp;
+ u8 opsize;
+
+ opsize = (bit_len - 1) >> 5;
+ rctx->csr.pka.v = 0;
+
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ for (loop = 0; loop <= opsize; loop++)
+ writel(mod[opsize - loop], cryp->base + STARFIVE_PKA_CANR_OFFSET + loop * 4);
+
+ if (mont) {
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_PRE;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.not_r2 = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ if (!starfive_pka_wait_done(ctx))
+ return -ETIMEDOUT;
+
+ for (loop = 0; loop <= opsize; loop++)
+ writel(in[opsize - loop], cryp->base + STARFIVE_PKA_CAAR_OFFSET + loop * 4);
+
+ writel(0x1000000, cryp->base + STARFIVE_PKA_CAER_OFFSET);
+
+ for (loop = 1; loop <= opsize; loop++)
+ writel(0, cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4);
+
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_AERN;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ if (!starfive_pka_wait_done(ctx))
+ return -ETIMEDOUT;
+ } else {
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_PRE;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.pre_expf = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ if (!starfive_pka_wait_done(ctx))
+ return -ETIMEDOUT;
+
+ for (loop = 0; loop <= count; loop++)
+ writel(in[count - loop], cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4);
+
+ /*pad with 0 up to opsize*/
+ for (loop = count + 1; loop <= opsize; loop++)
+ writel(0, cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4);
+
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_ARN;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ if (!starfive_pka_wait_done(ctx))
+ return -ETIMEDOUT;
+ }
+
+ for (loop = 0; loop <= opsize; loop++) {
+ temp = readl(cryp->base + STARFIVE_PKA_CAAR_OFFSET + 0x4 * loop);
+ out[opsize - loop] = temp;
+ }
+
+ return 0;
+}
+
+static int starfive_rsa_cpu_start(struct starfive_cryp_ctx *ctx, u32 *result,
+ u8 *de, u32 *n, int key_sz)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_rsa_key *key = &ctx->rsa_key;
+ u32 temp;
+ int ret = 0;
+ int opsize, mlen, loop;
+ unsigned int *mta;
+
+ opsize = (key_sz - 1) >> 2;
+
+ mta = kmalloc(key_sz, GFP_KERNEL);
+ if (!mta)
+ return -ENOMEM;
+
+ ret = starfive_rsa_montgomery_form(ctx, mta, (u32 *)rctx->rsa_data,
+ 0, n, key_sz << 3);
+ if (ret) {
+ dev_err_probe(cryp->dev, ret, "Conversion to Montgomery failed");
+ goto rsa_err;
+ }
+
+ for (loop = 0; loop <= opsize; loop++)
+ writel(mta[opsize - loop],
+ cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4);
+
+ for (loop = key->bitlen - 1; loop > 0; loop--) {
+ mlen = starfive_rsa_get_nbit(de, loop - 1, key_sz);
+
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_AARN;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ ret = -ETIMEDOUT;
+ if (!starfive_pka_wait_done(ctx))
+ goto rsa_err;
+
+ if (mlen) {
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_AERN;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ if (!starfive_pka_wait_done(ctx))
+ goto rsa_err;
+ }
+ }
+
+ for (loop = 0; loop <= opsize; loop++) {
+ temp = readl(cryp->base + STARFIVE_PKA_CAAR_OFFSET + 0x4 * loop);
+ result[opsize - loop] = temp;
+ }
+
+ ret = starfive_rsa_montgomery_form(ctx, result, result, 1, n, key_sz << 3);
+ if (ret)
+ dev_err_probe(cryp->dev, ret, "Conversion from Montgomery failed");
+rsa_err:
+ kfree(mta);
+ return ret;
+}
+
+static int starfive_rsa_start(struct starfive_cryp_ctx *ctx, u8 *result,
+ u8 *de, u8 *n, int key_sz)
+{
+ return starfive_rsa_cpu_start(ctx, (u32 *)result, de, (u32 *)n, key_sz);
+}
+
+static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_rsa_key *key = &ctx->rsa_key;
+ int ret = 0;
+
+ writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents,
+ rctx->rsa_data, rctx->total);
+
+ if (enc) {
+ key->bitlen = key->e_bitlen;
+ ret = starfive_rsa_start(ctx, rctx->rsa_data, key->e,
+ key->n, key->key_sz);
+ } else {
+ key->bitlen = key->d_bitlen;
+ ret = starfive_rsa_start(ctx, rctx->rsa_data, key->d,
+ key->n, key->key_sz);
+ }
+
+ if (ret)
+ goto err_rsa_crypt;
+
+ sg_copy_buffer(rctx->out_sg, sg_nents(rctx->out_sg),
+ rctx->rsa_data, key->key_sz, 0, 0);
+
+err_rsa_crypt:
+ writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+ kfree(rctx->rsa_data);
+ return ret;
+}
+
+static int starfive_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_rsa_key *key = &ctx->rsa_key;
+ struct starfive_cryp_request_ctx *rctx = akcipher_request_ctx(req);
+ int ret;
+
+ if (!key->key_sz) {
+ akcipher_request_set_tfm(req, ctx->akcipher_fbk);
+ ret = crypto_akcipher_encrypt(req);
+ akcipher_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ if (unlikely(!key->n || !key->e))
+ return -EINVAL;
+
+ if (req->dst_len < key->key_sz)
+ return dev_err_probe(cryp->dev, -EOVERFLOW,
+ "Output buffer length less than parameter n\n");
+
+ rctx->in_sg = req->src;
+ rctx->out_sg = req->dst;
+ rctx->total = req->src_len;
+ rctx->nents = sg_nents(rctx->in_sg);
+ ctx->rctx = rctx;
+
+ return starfive_rsa_enc_core(ctx, 1);
+}
+
+static int starfive_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_rsa_key *key = &ctx->rsa_key;
+ struct starfive_cryp_request_ctx *rctx = akcipher_request_ctx(req);
+ int ret;
+
+ if (!key->key_sz) {
+ akcipher_request_set_tfm(req, ctx->akcipher_fbk);
+ ret = crypto_akcipher_decrypt(req);
+ akcipher_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ if (unlikely(!key->n || !key->d))
+ return -EINVAL;
+
+ if (req->dst_len < key->key_sz)
+ return dev_err_probe(cryp->dev, -EOVERFLOW,
+ "Output buffer length less than parameter n\n");
+
+ rctx->in_sg = req->src;
+ rctx->out_sg = req->dst;
+ ctx->rctx = rctx;
+ rctx->total = req->src_len;
+
+ return starfive_rsa_enc_core(ctx, 0);
+}
+
+static int starfive_rsa_set_n(struct starfive_rsa_key *rsa_key,
+ const char *value, size_t vlen)
+{
+ const char *ptr = value;
+ unsigned int bitslen;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+ rsa_key->key_sz = vlen;
+ bitslen = rsa_key->key_sz << 3;
+
+ /* check valid key size */
+ if (bitslen & 0x1f)
+ return -EINVAL;
+
+ ret = -ENOMEM;
+ rsa_key->n = kmemdup(ptr, rsa_key->key_sz, GFP_KERNEL);
+ if (!rsa_key->n)
+ goto err;
+
+ return 0;
+ err:
+ rsa_key->key_sz = 0;
+ rsa_key->n = NULL;
+ starfive_rsa_free_key(rsa_key);
+ return ret;
+}
+
+static int starfive_rsa_set_e(struct starfive_rsa_key *rsa_key,
+ const char *value, size_t vlen)
+{
+ const char *ptr = value;
+ unsigned char pt;
+ int loop;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+ pt = *ptr;
+
+ if (!rsa_key->key_sz || !vlen || vlen > rsa_key->key_sz) {
+ rsa_key->e = NULL;
+ return -EINVAL;
+ }
+
+ rsa_key->e = kzalloc(rsa_key->key_sz, GFP_KERNEL);
+ if (!rsa_key->e)
+ return -ENOMEM;
+
+ for (loop = 8; loop > 0; loop--) {
+ if (pt >> (loop - 1))
+ break;
+ }
+
+ rsa_key->e_bitlen = (vlen - 1) * 8 + loop;
+
+ memcpy(rsa_key->e + (rsa_key->key_sz - vlen), ptr, vlen);
+
+ return 0;
+}
+
+static int starfive_rsa_set_d(struct starfive_rsa_key *rsa_key,
+ const char *value, size_t vlen)
+{
+ const char *ptr = value;
+ unsigned char pt;
+ int loop;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+ pt = *ptr;
+
+ ret = -EINVAL;
+ if (!rsa_key->key_sz || !vlen || vlen > rsa_key->key_sz)
+ goto err;
+
+ ret = -ENOMEM;
+ rsa_key->d = kzalloc(rsa_key->key_sz, GFP_KERNEL);
+ if (!rsa_key->d)
+ goto err;
+
+ for (loop = 8; loop > 0; loop--) {
+ if (pt >> (loop - 1))
+ break;
+ }
+
+ rsa_key->d_bitlen = (vlen - 1) * 8 + loop;
+
+ memcpy(rsa_key->d + (rsa_key->key_sz - vlen), ptr, vlen);
+
+ return 0;
+ err:
+ rsa_key->d = NULL;
+ return ret;
+}
+
+static int starfive_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key = {NULL};
+ struct starfive_rsa_key *rsa_key = &ctx->rsa_key;
+ int ret;
+
+ if (private)
+ ret = rsa_parse_priv_key(&raw_key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&raw_key, key, keylen);
+ if (ret < 0)
+ goto err;
+
+ starfive_rsa_free_key(rsa_key);
+
+ /* Use fallback for mod > 256 + 1 byte prefix */
+ if (raw_key.n_sz > STARFIVE_RSA_MAX_KEYSZ + 1)
+ return 0;
+
+ ret = starfive_rsa_set_n(rsa_key, raw_key.n, raw_key.n_sz);
+ if (ret)
+ return ret;
+
+ ret = starfive_rsa_set_e(rsa_key, raw_key.e, raw_key.e_sz);
+ if (ret)
+ goto err;
+
+ if (private) {
+ ret = starfive_rsa_set_d(rsa_key, raw_key.d, raw_key.d_sz);
+ if (ret)
+ goto err;
+ }
+
+ if (!rsa_key->n || !rsa_key->e) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (private && !rsa_key->d) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+ err:
+ starfive_rsa_free_key(rsa_key);
+ return ret;
+}
+
+static int starfive_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_pub_key(ctx->akcipher_fbk, key, keylen);
+ if (ret)
+ return ret;
+
+ return starfive_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int starfive_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_priv_key(ctx->akcipher_fbk, key, keylen);
+ if (ret)
+ return ret;
+
+ return starfive_rsa_setkey(tfm, key, keylen, true);
+}
+
+static unsigned int starfive_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ if (ctx->rsa_key.key_sz)
+ return ctx->rsa_key.key_sz;
+
+ return crypto_akcipher_maxsize(ctx->akcipher_fbk);
+}
+
+static int starfive_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ ctx->akcipher_fbk = crypto_alloc_akcipher("rsa-generic", 0, 0);
+ if (IS_ERR(ctx->akcipher_fbk))
+ return PTR_ERR(ctx->akcipher_fbk);
+
+ ctx->cryp = starfive_cryp_find_dev(ctx);
+ if (!ctx->cryp) {
+ crypto_free_akcipher(ctx->akcipher_fbk);
+ return -ENODEV;
+ }
+
+ akcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
+ sizeof(struct crypto_akcipher) + 32);
+
+ return 0;
+}
+
+static void starfive_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct starfive_rsa_key *key = (struct starfive_rsa_key *)&ctx->rsa_key;
+
+ crypto_free_akcipher(ctx->akcipher_fbk);
+ starfive_rsa_free_key(key);
+}
+
+static struct akcipher_alg starfive_rsa = {
+ .encrypt = starfive_rsa_enc,
+ .decrypt = starfive_rsa_dec,
+ .sign = starfive_rsa_dec,
+ .verify = starfive_rsa_enc,
+ .set_pub_key = starfive_rsa_set_pub_key,
+ .set_priv_key = starfive_rsa_set_priv_key,
+ .max_size = starfive_rsa_max_size,
+ .init = starfive_rsa_init_tfm,
+ .exit = starfive_rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "starfive-rsa",
+ .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_priority = 3000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ },
+};
+
+int starfive_rsa_register_algs(void)
+{
+ return crypto_register_akcipher(&starfive_rsa);
+}
+
+void starfive_rsa_unregister_algs(void)
+{
+ crypto_unregister_akcipher(&starfive_rsa);
+}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 27f4aa8d10bd..1dc6227d353e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -31,10 +31,10 @@
#include <asm/unaligned.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
-#include <crypto/algapi.h>
#include <crypto/skcipher.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
+#include <crypto/utils.h>
#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
#include <linux/key-type.h>
#include <keys/user-type.h>
@@ -745,16 +745,23 @@ static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
- u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
+ struct crypto_skcipher *tfm = any_tfm(cc);
struct skcipher_request *req;
struct scatterlist src, dst;
DECLARE_CRYPTO_WAIT(wait);
+ unsigned int reqsize;
int err;
+ u8 *buf;
- req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
+ reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64));
+
+ req = kmalloc(reqsize + cc->iv_size, GFP_NOIO);
if (!req)
return -ENOMEM;
+ skcipher_request_set_tfm(req, tfm);
+
+ buf = (u8 *)req + reqsize;
memset(buf, 0, cc->iv_size);
*(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
@@ -763,7 +770,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
- skcipher_request_free(req);
+ kfree_sensitive(req);
return err;
}