aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds2022-03-21 16:02:36 -0700
committerLinus Torvalds2022-03-21 16:02:36 -0700
commit93e220a62da36f766b3188e76e234607e41488f9 (patch)
treed56d5609e4b290baa9b46a48b123ab9c4f23f073
parent5628b8de1228436d47491c662dc521bc138a3d43 (diff)
parent0e03b8fd29363f2df44e2a7a176d486de550757a (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - hwrng core now credits for low-quality RNG devices. Algorithms: - Optimisations for neon aes on arm/arm64. - Add accelerated crc32_be on arm64. - Add ffdheXYZ(dh) templates. - Disallow hmac keys < 112 bits in FIPS mode. - Add AVX assembly implementation for sm3 on x86. Drivers: - Add missing local_bh_disable calls for crypto_engine callback. - Ensure BH is disabled in crypto_engine callback path. - Fix zero length DMA mappings in ccree. - Add synchronization between mailbox accesses in octeontx2. - Add Xilinx SHA3 driver. - Add support for the TDES IP available on sama7g5 SoC in atmel" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (137 commits) crypto: xilinx - Turn SHA into a tristate and allow COMPILE_TEST MAINTAINERS: update HPRE/SEC2/TRNG driver maintainers list crypto: dh - Remove the unused function dh_safe_prime_dh_alg() hwrng: nomadik - Change clk_disable to clk_disable_unprepare crypto: arm64 - cleanup comments crypto: qat - fix initialization of pfvf rts_map_msg structures crypto: qat - fix initialization of pfvf cap_msg structures crypto: qat - remove unneeded assignment crypto: qat - disable registration of algorithms crypto: hisilicon/qm - fix memset during queues clearing crypto: xilinx: prevent probing on non-xilinx hardware crypto: marvell/octeontx - Use swap() instead of open coding it crypto: ccree - Fix use after free in cc_cipher_exit() crypto: ccp - ccp_dmaengine_unregister release dma channels crypto: octeontx2 - fix missing unlock hwrng: cavium - fix NULL but dereferenced coccicheck error crypto: cavium/nitrox - don't cast parameter in bit operations crypto: vmx - add missing dependencies MAINTAINERS: Add maintainer for Xilinx ZynqMP SHA3 driver crypto: xilinx - Add Xilinx SHA3 driver ...
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-hpre178
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-sec146
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-zip146
-rw-r--r--MAINTAINERS11
-rw-r--r--arch/alpha/include/asm/xor.h53
-rw-r--r--arch/arm/crypto/aes-neonbs-core.S105
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c35
-rw-r--r--arch/arm/include/asm/xor.h42
-rw-r--r--arch/arm/lib/xor-neon.c12
-rw-r--r--arch/arm64/crypto/Kconfig2
-rw-r--r--arch/arm64/crypto/aes-glue.c22
-rw-r--r--arch/arm64/crypto/aes-modes.S18
-rw-r--r--arch/arm64/crypto/aes-neonbs-core.S264
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c97
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha512-armv8.pl2
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c28
-rw-r--r--arch/arm64/include/asm/xor.h21
-rw-r--r--arch/arm64/lib/crc32.S87
-rw-r--r--arch/arm64/lib/xor-neon.c46
-rw-r--r--arch/ia64/include/asm/xor.h21
-rw-r--r--arch/powerpc/include/asm/xor_altivec.h25
-rw-r--r--arch/powerpc/lib/xor_vmx.c28
-rw-r--r--arch/powerpc/lib/xor_vmx.h27
-rw-r--r--arch/powerpc/lib/xor_vmx_glue.c32
-rw-r--r--arch/s390/lib/xor.c21
-rw-r--r--arch/sparc/include/asm/xor_32.h21
-rw-r--r--arch/sparc/include/asm/xor_64.h42
-rw-r--r--arch/x86/crypto/Makefile3
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S63
-rw-r--r--arch/x86/crypto/blowfish_glue.c12
-rw-r--r--arch/x86/crypto/des3_ede_glue.c8
-rw-r--r--arch/x86/crypto/sm3-avx-asm_64.S517
-rw-r--r--arch/x86/crypto/sm3_avx_glue.c134
-rw-r--r--arch/x86/include/asm/xor.h42
-rw-r--r--arch/x86/include/asm/xor_32.h42
-rw-r--r--arch/x86/include/asm/xor_avx.h21
-rw-r--r--crypto/Kconfig25
-rw-r--r--crypto/algapi.c48
-rw-r--r--crypto/api.c19
-rw-r--r--crypto/asymmetric_keys/signature.c2
-rw-r--r--crypto/asymmetric_keys/x509_parser.h2
-rw-r--r--crypto/async_tx/async_xor.c8
-rw-r--r--crypto/async_tx/raid6test.c4
-rw-r--r--crypto/authenc.c2
-rw-r--r--crypto/cfb.c2
-rw-r--r--crypto/crypto_engine.c1
-rw-r--r--crypto/dh.c681
-rw-r--r--crypto/dh_helper.c42
-rw-r--r--crypto/hmac.c4
-rw-r--r--crypto/kpp.c29
-rw-r--r--crypto/lrw.c1
-rw-r--r--crypto/memneq.c22
-rw-r--r--crypto/rsa-pkcs1pad.c38
-rw-r--r--crypto/sm2.c40
-rw-r--r--crypto/sm3_generic.c142
-rw-r--r--crypto/tcrypt.c226
-rw-r--r--crypto/testmgr.c67
-rw-r--r--crypto/testmgr.h1456
-rw-r--r--crypto/xts.c1
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/atmel-rng.c148
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c2
-rw-r--r--drivers/char/hw_random/core.c161
-rw-r--r--drivers/char/hw_random/nomadik-rng.c4
-rw-r--r--drivers/crypto/Kconfig10
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c3
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c2
-rw-r--r--drivers/crypto/atmel-aes.c1
-rw-r--r--drivers/crypto/atmel-sha.c1
-rw-r--r--drivers/crypto/atmel-tdes.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c8
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h2
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c83
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c16
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c7
-rw-r--r--drivers/crypto/ccree/cc_cipher.c2
-rw-r--r--drivers/crypto/gemini/sl3516-ce-cipher.c6
-rw-r--r--drivers/crypto/hisilicon/qm.c4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c43
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h6
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c59
-rw-r--r--drivers/crypto/marvell/Kconfig1
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c5
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c14
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h19
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c25
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c27
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c56
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c17
-rw-r--r--drivers/crypto/mxs-dcp.c2
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c4
-rw-r--r--drivers/crypto/omap-aes.c2
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c23
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h24
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_drv.c7
-rw-r--r--drivers/crypto/qat/qat_common/Makefile1
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c37
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h4
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h14
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pfvf.c42
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pm.c137
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pm.h44
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c42
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c4
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h1
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c7
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c9
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c1
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2
-rw-r--r--drivers/crypto/vmx/Kconfig4
-rw-r--r--drivers/crypto/xilinx/Makefile1
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c264
-rw-r--r--drivers/firmware/xilinx/zynqmp.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c1
-rw-r--r--include/asm-generic/xor.h84
-rw-r--r--include/crypto/algapi.h10
-rw-r--r--include/crypto/dh.h26
-rw-r--r--include/crypto/internal/kpp.h158
-rw-r--r--include/crypto/sm3.h34
-rw-r--r--include/linux/crypto.h9
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h8
-rw-r--r--include/linux/raid/xor.h21
-rw-r--r--kernel/padata.c2
-rw-r--r--lib/crc32.c14
-rw-r--r--lib/crc32test.c2
-rw-r--r--lib/crypto/Kconfig3
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--lib/crypto/sm3.c246
-rw-r--r--lib/mpi/mpi-bit.c1
-rw-r--r--security/keys/dh.c2
147 files changed, 5673 insertions, 1669 deletions
diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre
index b4be5f1db4b7..396de7bc735d 100644
--- a/Documentation/ABI/testing/debugfs-hisi-hpre
+++ b/Documentation/ABI/testing/debugfs-hisi-hpre
@@ -1,140 +1,150 @@
-What: /sys/kernel/debug/hisi_hpre/<bdf>/cluster[0-3]/regs
-Date: Sep 2019
-Contact: linux-crypto@vger.kernel.org
-Description: Dump debug registers from the HPRE cluster.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/cluster[0-3]/regs
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Dump debug registers from the HPRE cluster.
Only available for PF.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/cluster[0-3]/cluster_ctrl
-Date: Sep 2019
-Contact: linux-crypto@vger.kernel.org
-Description: Write the HPRE core selection in the cluster into this file,
+What: /sys/kernel/debug/hisi_hpre/<bdf>/cluster[0-3]/cluster_ctrl
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Write the HPRE core selection in the cluster into this file,
and then we can read the debug information of the core.
Only available for PF.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/rdclr_en
-Date: Sep 2019
-Contact: linux-crypto@vger.kernel.org
-Description: HPRE cores debug registers read clear control. 1 means enable
+What: /sys/kernel/debug/hisi_hpre/<bdf>/rdclr_en
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: HPRE cores debug registers read clear control. 1 means enable
register read clear, otherwise 0. Writing to this file has no
functional effect, only enable or disable counters clear after
reading of these registers.
Only available for PF.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/current_qm
-Date: Sep 2019
-Contact: linux-crypto@vger.kernel.org
-Description: One HPRE controller has one PF and multiple VFs, each function
+What: /sys/kernel/debug/hisi_hpre/<bdf>/current_qm
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: One HPRE controller has one PF and multiple VFs, each function
has a QM. Select the QM which below qm refers to.
Only available for PF.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/regs
-Date: Sep 2019
-Contact: linux-crypto@vger.kernel.org
-Description: Dump debug registers from the HPRE.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/alg_qos
+Date: Jun 2021
+Contact: linux-crypto@vger.kernel.org
+Description: The <bdf> is related the function for PF and VF.
+ HPRE driver supports to configure each function's QoS, the driver
+ supports to write <bdf> value to alg_qos in the host. Such as
+ "echo <bdf> value > alg_qos". The qos value is 1~1000, means
+ 1/1000~1000/1000 of total QoS. The driver reading alg_qos to
+ get related QoS in the host and VM, Such as "cat alg_qos".
+
+What: /sys/kernel/debug/hisi_hpre/<bdf>/regs
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Dump debug registers from the HPRE.
Only available for PF.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/regs
-Date: Sep 2019
-Contact: linux-crypto@vger.kernel.org
-Description: Dump debug registers from the QM.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/regs
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Dump debug registers from the QM.
Available for PF and VF in host. VF in guest currently only
has one debug register.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/current_q
-Date: Sep 2019
-Contact: linux-crypto@vger.kernel.org
-Description: One QM may contain multiple queues. Select specific queue to
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/current_q
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: One QM may contain multiple queues. Select specific queue to
show its debug registers in above regs.
Only available for PF.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/clear_enable
-Date: Sep 2019
-Contact: linux-crypto@vger.kernel.org
-Description: QM debug registers(regs) read clear control. 1 means enable
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/clear_enable
+Date: Sep 2019
+Contact: linux-crypto@vger.kernel.org
+Description: QM debug registers(regs) read clear control. 1 means enable
register read clear, otherwise 0.
Writing to this file has no functional effect, only enable or
disable counters clear after reading of these registers.
Only available for PF.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/err_irq
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of invalid interrupts for
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/err_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of invalid interrupts for
QM task completion.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/aeq_irq
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of QM async event queue interrupts.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/aeq_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of QM async event queue interrupts.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/abnormal_irq
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of interrupts for QM abnormal event.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/abnormal_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of interrupts for QM abnormal event.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/create_qp_err
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of queue allocation errors.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/create_qp_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of queue allocation errors.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/mb_err
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of failed QM mailbox commands.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/mb_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of failed QM mailbox commands.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/status
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the status of the QM.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/status
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the status of the QM.
Four states: initiated, started, stopped and closed.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of sent requests.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of sent requests.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/recv_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of received requests.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/recv_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of received requests.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_busy_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of requests sent
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_busy_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of requests sent
with returning busy.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_fail_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of completed but error requests.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/send_fail_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of completed but error requests.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/invalid_req_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of invalid requests being received.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/invalid_req_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of invalid requests being received.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/overtime_thrhld
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Set the threshold time for counting the request which is
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/overtime_thrhld
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Set the threshold time for counting the request which is
processed longer than the threshold.
0: disable(default), 1: 1 microsecond.
Available for both PF and VF, and take no other effect on HPRE.
-What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/over_thrhld_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of time out requests.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/hpre_dfx/over_thrhld_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of time out requests.
Available for both PF and VF, and take no other effect on HPRE.
diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec
index 85feb4408e0f..2bf84ced484b 100644
--- a/Documentation/ABI/testing/debugfs-hisi-sec
+++ b/Documentation/ABI/testing/debugfs-hisi-sec
@@ -1,113 +1,123 @@
-What: /sys/kernel/debug/hisi_sec2/<bdf>/clear_enable
-Date: Oct 2019
-Contact: linux-crypto@vger.kernel.org
-Description: Enabling/disabling of clear action after reading
+What: /sys/kernel/debug/hisi_sec2/<bdf>/clear_enable
+Date: Oct 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Enabling/disabling of clear action after reading
the SEC debug registers.
0: disable, 1: enable.
Only available for PF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/current_qm
-Date: Oct 2019
-Contact: linux-crypto@vger.kernel.org
-Description: One SEC controller has one PF and multiple VFs, each function
+What: /sys/kernel/debug/hisi_sec2/<bdf>/current_qm
+Date: Oct 2019
+Contact: linux-crypto@vger.kernel.org
+Description: One SEC controller has one PF and multiple VFs, each function
has a QM. This file can be used to select the QM which below
qm refers to.
Only available for PF.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/qm_regs
-Date: Oct 2019
-Contact: linux-crypto@vger.kernel.org
-Description: Dump of QM related debug registers.
+What: /sys/kernel/debug/hisi_sec2/<bdf>/alg_qos
+Date: Jun 2021
+Contact: linux-crypto@vger.kernel.org
+Description: The <bdf> is related the function for PF and VF.
+ SEC driver supports to configure each function's QoS, the driver
+ supports to write <bdf> value to alg_qos in the host. Such as
+ "echo <bdf> value > alg_qos". The qos value is 1~1000, means
+ 1/1000~1000/1000 of total QoS. The driver reading alg_qos to
+ get related QoS in the host and VM, Such as "cat alg_qos".
+
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/qm_regs
+Date: Oct 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Dump of QM related debug registers.
Available for PF and VF in host. VF in guest currently only
has one debug register.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/current_q
-Date: Oct 2019
-Contact: linux-crypto@vger.kernel.org
-Description: One QM of SEC may contain multiple queues. Select specific
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/current_q
+Date: Oct 2019
+Contact: linux-crypto@vger.kernel.org
+Description: One QM of SEC may contain multiple queues. Select specific
queue to show its debug registers in above 'regs'.
Only available for PF.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/clear_enable
-Date: Oct 2019
-Contact: linux-crypto@vger.kernel.org
-Description: Enabling/disabling of clear action after reading
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/clear_enable
+Date: Oct 2019
+Contact: linux-crypto@vger.kernel.org
+Description: Enabling/disabling of clear action after reading
the SEC's QM debug registers.
0: disable, 1: enable.
Only available for PF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/err_irq
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of invalid interrupts for
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/err_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of invalid interrupts for
QM task completion.
Available for both PF and VF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/aeq_irq
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of QM async event queue interrupts.
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/aeq_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of QM async event queue interrupts.
Available for both PF and VF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/abnormal_irq
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of interrupts for QM abnormal event.
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/abnormal_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of interrupts for QM abnormal event.
Available for both PF and VF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/create_qp_err
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of queue allocation errors.
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/create_qp_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of queue allocation errors.
Available for both PF and VF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/mb_err
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of failed QM mailbox commands.
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/mb_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of failed QM mailbox commands.
Available for both PF and VF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/status
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the status of the QM.
+What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/status
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the status of the QM.
Four states: initiated, started, stopped and closed.
Available for both PF and VF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/send_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of sent requests.
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/send_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of sent requests.
Available for both PF and VF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/recv_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of received requests.
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/recv_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of received requests.
Available for both PF and VF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/send_busy_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of requests sent with returning busy.
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/send_busy_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of requests sent with returning busy.
Available for both PF and VF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/err_bd_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of BD type error requests
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/err_bd_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of BD type error requests
to be received.
Available for both PF and VF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/invalid_req_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of invalid requests being received.
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/invalid_req_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of invalid requests being received.
Available for both PF and VF, and take no other effect on SEC.
-What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/done_flag_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of completed but marked error requests
+What: /sys/kernel/debug/hisi_sec2/<bdf>/sec_dfx/done_flag_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of completed but marked error requests
to be received.
Available for both PF and VF, and take no other effect on SEC.
diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip
index 3034a2bf99ca..bf1258bc6495 100644
--- a/Documentation/ABI/testing/debugfs-hisi-zip
+++ b/Documentation/ABI/testing/debugfs-hisi-zip
@@ -1,114 +1,124 @@
-What: /sys/kernel/debug/hisi_zip/<bdf>/comp_core[01]/regs
-Date: Nov 2018
-Contact: linux-crypto@vger.kernel.org
-Description: Dump of compression cores related debug registers.
+What: /sys/kernel/debug/hisi_zip/<bdf>/comp_core[01]/regs
+Date: Nov 2018
+Contact: linux-crypto@vger.kernel.org
+Description: Dump of compression cores related debug registers.
Only available for PF.
-What: /sys/kernel/debug/hisi_zip/<bdf>/decomp_core[0-5]/regs
-Date: Nov 2018
-Contact: linux-crypto@vger.kernel.org
-Description: Dump of decompression cores related debug registers.
+What: /sys/kernel/debug/hisi_zip/<bdf>/decomp_core[0-5]/regs
+Date: Nov 2018
+Contact: linux-crypto@vger.kernel.org
+Description: Dump of decompression cores related debug registers.
Only available for PF.
-What: /sys/kernel/debug/hisi_zip/<bdf>/clear_enable
-Date: Nov 2018
-Contact: linux-crypto@vger.kernel.org
-Description: Compression/decompression core debug registers read clear
+What: /sys/kernel/debug/hisi_zip/<bdf>/clear_enable
+Date: Nov 2018
+Contact: linux-crypto@vger.kernel.org
+Description: Compression/decompression core debug registers read clear
control. 1 means enable register read clear, otherwise 0.
Writing to this file has no functional effect, only enable or
disable counters clear after reading of these registers.
Only available for PF.
-What: /sys/kernel/debug/hisi_zip/<bdf>/current_qm
-Date: Nov 2018
-Contact: linux-crypto@vger.kernel.org
-Description: One ZIP controller has one PF and multiple VFs, each function
+What: /sys/kernel/debug/hisi_zip/<bdf>/current_qm
+Date: Nov 2018
+Contact: linux-crypto@vger.kernel.org
+Description: One ZIP controller has one PF and multiple VFs, each function
has a QM. Select the QM which below qm refers to.
Only available for PF.
-What: /sys/kernel/debug/hisi_zip/<bdf>/qm/regs
-Date: Nov 2018
-Contact: linux-crypto@vger.kernel.org
-Description: Dump of QM related debug registers.
+What: /sys/kernel/debug/hisi_zip/<bdf>/alg_qos
+Date: Jun 2021
+Contact: linux-crypto@vger.kernel.org
+Description: The <bdf> is related the function for PF and VF.
+ ZIP driver supports to configure each function's QoS, the driver
+ supports to write <bdf> value to alg_qos in the host. Such as
+ "echo <bdf> value > alg_qos". The qos value is 1~1000, means
+ 1/1000~1000/1000 of total QoS. The driver reading alg_qos to
+ get related QoS in the host and VM, Such as "cat alg_qos".
+
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/regs
+Date: Nov 2018
+Contact: linux-crypto@vger.kernel.org
+Description: Dump of QM related debug registers.
Available for PF and VF in host. VF in guest currently only
has one debug register.
-What: /sys/kernel/debug/hisi_zip/<bdf>/qm/current_q
-Date: Nov 2018
-Contact: linux-crypto@vger.kernel.org
-Description: One QM may contain multiple queues. Select specific queue to
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/current_q
+Date: Nov 2018
+Contact: linux-crypto@vger.kernel.org
+Description: One QM may contain multiple queues. Select specific queue to
show its debug registers in above regs.
Only available for PF.
-What: /sys/kernel/debug/hisi_zip/<bdf>/qm/clear_enable
-Date: Nov 2018
-Contact: linux-crypto@vger.kernel.org
-Description: QM debug registers(regs) read clear control. 1 means enable
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/clear_enable
+Date: Nov 2018
+Contact: linux-crypto@vger.kernel.org
+Description: QM debug registers(regs) read clear control. 1 means enable
register read clear, otherwise 0.
Writing to this file has no functional effect, only enable or
disable counters clear after reading of these registers.
Only available for PF.
-What: /sys/kernel/debug/hisi_zip/<bdf>/qm/err_irq
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of invalid interrupts for
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/err_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of invalid interrupts for
QM task completion.
Available for both PF and VF, and take no other effect on ZIP.
-What: /sys/kernel/debug/hisi_zip/<bdf>/qm/aeq_irq
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of QM async event queue interrupts.
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/aeq_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of QM async event queue interrupts.
Available for both PF and VF, and take no other effect on ZIP.
-What: /sys/kernel/debug/hisi_zip/<bdf>/qm/abnormal_irq
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of interrupts for QM abnormal event.
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/abnormal_irq
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of interrupts for QM abnormal event.
Available for both PF and VF, and take no other effect on ZIP.
-What: /sys/kernel/debug/hisi_zip/<bdf>/qm/create_qp_err
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of queue allocation errors.
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/create_qp_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of queue allocation errors.
Available for both PF and VF, and take no other effect on ZIP.
-What: /sys/kernel/debug/hisi_zip/<bdf>/qm/mb_err
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the number of failed QM mailbox commands.
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/mb_err
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the number of failed QM mailbox commands.
Available for both PF and VF, and take no other effect on ZIP.
-What: /sys/kernel/debug/hisi_zip/<bdf>/qm/status
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the status of the QM.
+What: /sys/kernel/debug/hisi_zip/<bdf>/qm/status
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the status of the QM.
Four states: initiated, started, stopped and closed.
Available for both PF and VF, and take no other effect on ZIP.
-What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/send_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of sent requests.
+What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/send_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of sent requests.
Available for both PF and VF, and take no other effect on ZIP.
-What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/recv_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of received requests.
+What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/recv_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of received requests.
Available for both PF and VF, and take no other effect on ZIP.
-What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/send_busy_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of requests received
+What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/send_busy_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of requests received
with returning busy.
Available for both PF and VF, and take no other effect on ZIP.
-What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/err_bd_cnt
-Date: Apr 2020
-Contact: linux-crypto@vger.kernel.org
-Description: Dump the total number of BD type error requests
+What: /sys/kernel/debug/hisi_zip/<bdf>/zip_dfx/err_bd_cnt
+Date: Apr 2020
+Contact: linux-crypto@vger.kernel.org
+Description: Dump the total number of BD type error requests
to be received.
Available for both PF and VF, and take no other effect on ZIP.
diff --git a/MAINTAINERS b/MAINTAINERS
index 7940c41f65d5..f39be89f9128 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8644,7 +8644,7 @@ S: Maintained
F: drivers/gpio/gpio-hisi.c
HISILICON HIGH PERFORMANCE RSA ENGINE DRIVER (HPRE)
-M: Zaibo Xu <xuzaibo@huawei.com>
+M: Longfang Liu <liulongfang@huawei.com>
L: linux-crypto@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/debugfs-hisi-hpre
@@ -8724,8 +8724,8 @@ F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
F: drivers/scsi/hisi_sas/
HISILICON SECURITY ENGINE V2 DRIVER (SEC2)
-M: Zaibo Xu <xuzaibo@huawei.com>
M: Kai Ye <yekai13@huawei.com>
+M: Longfang Liu <liulongfang@huawei.com>
L: linux-crypto@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/debugfs-hisi-sec
@@ -8756,7 +8756,7 @@ F: Documentation/devicetree/bindings/mfd/hisilicon,hi6421-spmi-pmic.yaml
F: drivers/mfd/hi6421-spmi-pmic.c
HISILICON TRUE RANDOM NUMBER GENERATOR V2 SUPPORT
-M: Zaibo Xu <xuzaibo@huawei.com>
+M: Weili Qian <qianweili@huawei.com>
S: Maintained
F: drivers/crypto/hisilicon/trng/trng.c
@@ -21302,6 +21302,11 @@ T: git https://github.com/Xilinx/linux-xlnx.git
F: Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
F: drivers/phy/xilinx/phy-zynqmp.c
+XILINX ZYNQMP SHA3 DRIVER
+M: Harsha <harsha.harsha@xilinx.com>
+S: Maintained
+F: drivers/crypto/xilinx/zynqmp-sha.c
+
XILINX EVENT MANAGEMENT DRIVER
M: Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
S: Maintained
diff --git a/arch/alpha/include/asm/xor.h b/arch/alpha/include/asm/xor.h
index 5aeb4fb3cb7c..e0de0c233ab9 100644
--- a/arch/alpha/include/asm/xor.h
+++ b/arch/alpha/include/asm/xor.h
@@ -5,24 +5,43 @@
* Optimized RAID-5 checksumming functions for alpha EV5 and EV6
*/
-extern void xor_alpha_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_alpha_3(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
-extern void xor_alpha_4(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
-extern void xor_alpha_5(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
+extern void
+xor_alpha_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+extern void
+xor_alpha_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+extern void
+xor_alpha_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+extern void
+xor_alpha_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
-extern void xor_alpha_prefetch_2(unsigned long, unsigned long *,
- unsigned long *);
-extern void xor_alpha_prefetch_3(unsigned long, unsigned long *,
- unsigned long *, unsigned long *);
-extern void xor_alpha_prefetch_4(unsigned long, unsigned long *,
- unsigned long *, unsigned long *,
- unsigned long *);
-extern void xor_alpha_prefetch_5(unsigned long, unsigned long *,
- unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
+extern void
+xor_alpha_prefetch_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+extern void
+xor_alpha_prefetch_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+extern void
+xor_alpha_prefetch_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+extern void
+xor_alpha_prefetch_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
asm(" \n\
.text \n\
diff --git a/arch/arm/crypto/aes-neonbs-core.S b/arch/arm/crypto/aes-neonbs-core.S
index 7d0cc7f226a5..7b61032f29fa 100644
--- a/arch/arm/crypto/aes-neonbs-core.S
+++ b/arch/arm/crypto/aes-neonbs-core.S
@@ -758,29 +758,24 @@ ENTRY(aesbs_cbc_decrypt)
ENDPROC(aesbs_cbc_decrypt)
.macro next_ctr, q
- vmov.32 \q\()h[1], r10
+ vmov \q\()h, r9, r10
adds r10, r10, #1
- vmov.32 \q\()h[0], r9
adcs r9, r9, #0
- vmov.32 \q\()l[1], r8
+ vmov \q\()l, r7, r8
adcs r8, r8, #0
- vmov.32 \q\()l[0], r7
adc r7, r7, #0
vrev32.8 \q, \q
.endm
/*
* aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
- * int rounds, int blocks, u8 ctr[], u8 final[])
+ * int rounds, int bytes, u8 ctr[])
*/
ENTRY(aesbs_ctr_encrypt)
mov ip, sp
push {r4-r10, lr}
- ldm ip, {r5-r7} // load args 4-6
- teq r7, #0
- addne r5, r5, #1 // one extra block if final != 0
-
+ ldm ip, {r5, r6} // load args 4-5
vld1.8 {q0}, [r6] // load counter
vrev32.8 q1, q0
vmov r9, r10, d3
@@ -792,20 +787,19 @@ ENTRY(aesbs_ctr_encrypt)
adc r7, r7, #0
99: vmov q1, q0
+ sub lr, r5, #1
vmov q2, q0
+ adr ip, 0f
vmov q3, q0
+ and lr, lr, #112
vmov q4, q0
+ cmp r5, #112
vmov q5, q0
+ sub ip, ip, lr, lsl #1
vmov q6, q0
+ add ip, ip, lr, lsr #2
vmov q7, q0
-
- adr ip, 0f
- sub lr, r5, #1
- and lr, lr, #7
- cmp r5, #8
- sub ip, ip, lr, lsl #5
- sub ip, ip, lr, lsl #2
- movlt pc, ip // computed goto if blocks < 8
+ movle pc, ip // computed goto if bytes < 112
next_ctr q1
next_ctr q2
@@ -820,12 +814,14 @@ ENTRY(aesbs_ctr_encrypt)
bl aesbs_encrypt8
adr ip, 1f
- and lr, r5, #7
- cmp r5, #8
- movgt r4, #0
- ldrle r4, [sp, #40] // load final in the last round
- sub ip, ip, lr, lsl #2
- movlt pc, ip // computed goto if blocks < 8
+ sub lr, r5, #1
+ cmp r5, #128
+ bic lr, lr, #15
+ ands r4, r5, #15 // preserves C flag
+ teqcs r5, r5 // set Z flag if not last iteration
+ sub ip, ip, lr, lsr #2
+ rsb r4, r4, #16
+ movcc pc, ip // computed goto if bytes < 128
vld1.8 {q8}, [r1]!
vld1.8 {q9}, [r1]!
@@ -834,46 +830,70 @@ ENTRY(aesbs_ctr_encrypt)
vld1.8 {q12}, [r1]!
vld1.8 {q13}, [r1]!
vld1.8 {q14}, [r1]!
- teq r4, #0 // skip last block if 'final'
-1: bne 2f
+1: subne r1, r1, r4
vld1.8 {q15}, [r1]!
-2: adr ip, 3f
- cmp r5, #8
- sub ip, ip, lr, lsl #3
- movlt pc, ip // computed goto if blocks < 8
+ add ip, ip, #2f - 1b
veor q0, q0, q8
- vst1.8 {q0}, [r0]!
veor q1, q1, q9
- vst1.8 {q1}, [r0]!
veor q4, q4, q10
- vst1.8 {q4}, [r0]!
veor q6, q6, q11
- vst1.8 {q6}, [r0]!
veor q3, q3, q12
- vst1.8 {q3}, [r0]!
veor q7, q7, q13
- vst1.8 {q7}, [r0]!
veor q2, q2, q14
+ bne 3f
+ veor q5, q5, q15
+
+ movcc pc, ip // computed goto if bytes < 128
+
+ vst1.8 {q0}, [r0]!
+ vst1.8 {q1}, [r0]!
+ vst1.8 {q4}, [r0]!
+ vst1.8 {q6}, [r0]!
+ vst1.8 {q3}, [r0]!
+ vst1.8 {q7}, [r0]!
vst1.8 {q2}, [r0]!
- teq r4, #0 // skip last block if 'final'
- W(bne) 5f
-3: veor q5, q5, q15
+2: subne r0, r0, r4
vst1.8 {q5}, [r0]!
-4: next_ctr q0
+ next_ctr q0
- subs r5, r5, #8
+ subs r5, r5, #128
bgt 99b
vst1.8 {q0}, [r6]
pop {r4-r10, pc}
-5: vst1.8 {q5}, [r4]
- b 4b
+3: adr lr, .Lpermute_table + 16
+ cmp r5, #16 // Z flag remains cleared
+ sub lr, lr, r4
+ vld1.8 {q8-q9}, [lr]
+ vtbl.8 d16, {q5}, d16
+ vtbl.8 d17, {q5}, d17
+ veor q5, q8, q15
+ bcc 4f // have to reload prev if R5 < 16
+ vtbx.8 d10, {q2}, d18
+ vtbx.8 d11, {q2}, d19
+ mov pc, ip // branch back to VST sequence
+
+4: sub r0, r0, r4
+ vshr.s8 q9, q9, #7 // create mask for VBIF
+ vld1.8 {q8}, [r0] // reload
+ vbif q5, q8, q9
+ vst1.8 {q5}, [r0]
+ pop {r4-r10, pc}
ENDPROC(aesbs_ctr_encrypt)
+ .align 6
+.Lpermute_table:
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+ .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+
.macro next_tweak, out, in, const, tmp
vshr.s64 \tmp, \in, #63
vand \tmp, \tmp, \const
@@ -888,6 +908,7 @@ ENDPROC(aesbs_ctr_encrypt)
* aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[], int reorder_last_tweak)
*/
+ .align 6
__xts_prepare8:
vld1.8 {q14}, [r7] // load iv
vmov.i32 d30, #0x87 // compose tweak mask vector
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index 5c6cd3c63cbc..f00f042ef357 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -37,7 +37,7 @@ asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]);
asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
- int rounds, int blocks, u8 ctr[], u8 final[]);
+ int rounds, int blocks, u8 ctr[]);
asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[], int);
@@ -243,32 +243,25 @@ static int ctr_encrypt(struct skcipher_request *req)
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes > 0) {
- unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
- u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL;
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ int bytes = walk.nbytes;
- if (walk.nbytes < walk.total) {
- blocks = round_down(blocks,
- walk.stride / AES_BLOCK_SIZE);
- final = NULL;
- }
+ if (unlikely(bytes < AES_BLOCK_SIZE))
+ src = dst = memcpy(buf + sizeof(buf) - bytes,
+ src, bytes);
+ else if (walk.nbytes < walk.total)
+ bytes &= ~(8 * AES_BLOCK_SIZE - 1);
kernel_neon_begin();
- aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->rk, ctx->rounds, blocks, walk.iv, final);
+ aesbs_ctr_encrypt(dst, src, ctx->rk, ctx->rounds, bytes, walk.iv);
kernel_neon_end();
- if (final) {
- u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
- u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ if (unlikely(bytes < AES_BLOCK_SIZE))
+ memcpy(walk.dst.virt.addr,
+ buf + sizeof(buf) - bytes, bytes);
- crypto_xor_cpy(dst, src, final,
- walk.total % AES_BLOCK_SIZE);
-
- err = skcipher_walk_done(&walk, 0);
- break;
- }
- err = skcipher_walk_done(&walk,
- walk.nbytes - blocks * AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes - bytes);
}
return err;
diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h
index aefddec79286..669cad5194d3 100644
--- a/arch/arm/include/asm/xor.h
+++ b/arch/arm/include/asm/xor.h
@@ -44,7 +44,8 @@
: "0" (dst), "r" (a1), "r" (a2), "r" (a3), "r" (a4))
static void
-xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_arm4regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
unsigned int lines = bytes / sizeof(unsigned long) / 4;
register unsigned int a1 __asm__("r4");
@@ -64,8 +65,9 @@ xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_arm4regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
unsigned int lines = bytes / sizeof(unsigned long) / 4;
register unsigned int a1 __asm__("r4");
@@ -86,8 +88,10 @@ xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_arm4regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
unsigned int lines = bytes / sizeof(unsigned long) / 2;
register unsigned int a1 __asm__("r8");
@@ -105,8 +109,11 @@ xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_arm4regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_arm4regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
unsigned int lines = bytes / sizeof(unsigned long) / 2;
register unsigned int a1 __asm__("r8");
@@ -146,7 +153,8 @@ static struct xor_block_template xor_block_arm4regs = {
extern struct xor_block_template const xor_block_neon_inner;
static void
-xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
if (in_interrupt()) {
xor_arm4regs_2(bytes, p1, p2);
@@ -158,8 +166,9 @@ xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
if (in_interrupt()) {
xor_arm4regs_3(bytes, p1, p2, p3);
@@ -171,8 +180,10 @@ xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
if (in_interrupt()) {
xor_arm4regs_4(bytes, p1, p2, p3, p4);
@@ -184,8 +195,11 @@ xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
if (in_interrupt()) {
xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
index b99dd8e1c93f..522510baed49 100644
--- a/arch/arm/lib/xor-neon.c
+++ b/arch/arm/lib/xor-neon.c
@@ -17,17 +17,11 @@ MODULE_LICENSE("GPL");
/*
* Pull in the reference implementations while instructing GCC (through
* -ftree-vectorize) to attempt to exploit implicit parallelism and emit
- * NEON instructions.
+ * NEON instructions. Clang does this by default at O2 so no pragma is
+ * needed.
*/
-#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#ifdef CONFIG_CC_IS_GCC
#pragma GCC optimize "tree-vectorize"
-#else
-/*
- * While older versions of GCC do not generate incorrect code, they fail to
- * recognize the parallel nature of these functions, and emit plain ARM code,
- * which is known to be slower than the optimized ARM code in asm-arm/xor.h.
- */
-#warning This code requires at least version 4.6 of GCC
#endif
#pragma GCC diagnostic ignored "-Wunused-variable"
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index addfa413650b..2a965aa0188d 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -45,7 +45,7 @@ config CRYPTO_SM3_ARM64_CE
tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
- select CRYPTO_SM3
+ select CRYPTO_LIB_SM3
config CRYPTO_SM4_ARM64_CE
tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)"
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 30b7cc6a7079..561dd2332571 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -24,7 +24,6 @@
#ifdef USE_V8_CRYPTO_EXTENSIONS
#define MODE "ce"
#define PRIO 300
-#define STRIDE 5
#define aes_expandkey ce_aes_expandkey
#define aes_ecb_encrypt ce_aes_ecb_encrypt
#define aes_ecb_decrypt ce_aes_ecb_decrypt
@@ -42,7 +41,6 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
#else
#define MODE "neon"
#define PRIO 200
-#define STRIDE 4
#define aes_ecb_encrypt neon_aes_ecb_encrypt
#define aes_ecb_decrypt neon_aes_ecb_decrypt
#define aes_cbc_encrypt neon_aes_cbc_encrypt
@@ -89,7 +87,7 @@ asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int bytes, u8 const iv[]);
asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
- int rounds, int bytes, u8 ctr[], u8 finalbuf[]);
+ int rounds, int bytes, u8 ctr[]);
asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
int rounds, int bytes, u32 const rk2[], u8 iv[],
@@ -458,26 +456,21 @@ static int __maybe_unused ctr_encrypt(struct skcipher_request *req)
unsigned int nbytes = walk.nbytes;
u8 *dst = walk.dst.virt.addr;
u8 buf[AES_BLOCK_SIZE];
- unsigned int tail;
if (unlikely(nbytes < AES_BLOCK_SIZE))
- src = memcpy(buf, src, nbytes);
+ src = dst = memcpy(buf + sizeof(buf) - nbytes,
+ src, nbytes);
else if (nbytes < walk.total)
nbytes &= ~(AES_BLOCK_SIZE - 1);
kernel_neon_begin();
aes_ctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes,
- walk.iv, buf);
+ walk.iv);
kernel_neon_end();
- tail = nbytes % (STRIDE * AES_BLOCK_SIZE);
- if (tail > 0 && tail < AES_BLOCK_SIZE)
- /*
- * The final partial block could not be returned using
- * an overlapping store, so it was passed via buf[]
- * instead.
- */
- memcpy(dst + nbytes - tail, buf, tail);
+ if (unlikely(nbytes < AES_BLOCK_SIZE))
+ memcpy(walk.dst.virt.addr,
+ buf + sizeof(buf) - nbytes, nbytes);
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
@@ -983,6 +976,7 @@ module_cpu_feature_match(AES, aes_init);
module_init(aes_init);
EXPORT_SYMBOL(neon_aes_ecb_encrypt);
EXPORT_SYMBOL(neon_aes_cbc_encrypt);
+EXPORT_SYMBOL(neon_aes_ctr_encrypt);
EXPORT_SYMBOL(neon_aes_xts_encrypt);
EXPORT_SYMBOL(neon_aes_xts_decrypt);
#endif
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index ff01f0167ba2..dc35eb0245c5 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -321,7 +321,7 @@ AES_FUNC_END(aes_cbc_cts_decrypt)
/*
* aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
- * int bytes, u8 ctr[], u8 finalbuf[])
+ * int bytes, u8 ctr[])
*/
AES_FUNC_START(aes_ctr_encrypt)
@@ -414,8 +414,8 @@ ST5( st1 {v4.16b}, [x0], #16 )
.Lctrtail:
/* XOR up to MAX_STRIDE * 16 - 1 bytes of in/output with v0 ... v3/v4 */
mov x16, #16
- ands x13, x4, #0xf
- csel x13, x13, x16, ne
+ ands x6, x4, #0xf
+ csel x13, x6, x16, ne
ST5( cmp w4, #64 - (MAX_STRIDE << 4) )
ST5( csel x14, x16, xzr, gt )
@@ -424,10 +424,10 @@ ST5( csel x14, x16, xzr, gt )
cmp w4, #32 - (MAX_STRIDE << 4)
csel x16, x16, xzr, gt
cmp w4, #16 - (MAX_STRIDE << 4)
- ble .Lctrtail1x
adr_l x12, .Lcts_permute_table
add x12, x12, x13
+ ble .Lctrtail1x
ST5( ld1 {v5.16b}, [x1], x14 )
ld1 {v6.16b}, [x1], x15
@@ -462,11 +462,19 @@ ST5( st1 {v5.16b}, [x0], x14 )
b .Lctrout
.Lctrtail1x:
- csel x0, x0, x6, eq // use finalbuf if less than a full block
+ sub x7, x6, #16
+ csel x6, x6, x7, eq
+ add x1, x1, x6
+ add x0, x0, x6
ld1 {v5.16b}, [x1]
+ ld1 {v6.16b}, [x0]
ST5( mov v3.16b, v4.16b )
encrypt_block v3, w3, x2, x8, w7
+ ld1 {v10.16b-v11.16b}, [x12]
+ tbl v3.16b, {v3.16b}, v10.16b
+ sshr v11.16b, v11.16b, #7
eor v5.16b, v5.16b, v3.16b
+ bif v5.16b, v6.16b, v11.16b
st1 {v5.16b}, [x0]
b .Lctrout
AES_FUNC_END(aes_ctr_encrypt)
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index a3405b8c344b..d427f4556b6e 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -735,119 +735,67 @@ SYM_FUNC_END(aesbs_cbc_decrypt)
* int blocks, u8 iv[])
*/
SYM_FUNC_START_LOCAL(__xts_crypt8)
- mov x6, #1
- lsl x6, x6, x23
- subs w23, w23, #8
- csel x23, x23, xzr, pl
- csel x6, x6, xzr, mi
+ movi v18.2s, #0x1
+ movi v19.2s, #0x87
+ uzp1 v18.4s, v18.4s, v19.4s
+
+ ld1 {v0.16b-v3.16b}, [x1], #64
+ ld1 {v4.16b-v7.16b}, [x1], #64
+
+ next_tweak v26, v25, v18, v19
+ next_tweak v27, v26, v18, v19
+ next_tweak v28, v27, v18, v19
+ next_tweak v29, v28, v18, v19
+ next_tweak v30, v29, v18, v19
+ next_tweak v31, v30, v18, v19
+ next_tweak v16, v31, v18, v19
+ next_tweak v17, v16, v18, v19
- ld1 {v0.16b}, [x20], #16
- next_tweak v26, v25, v30, v31
eor v0.16b, v0.16b, v25.16b
- tbnz x6, #1, 0f
-
- ld1 {v1.16b}, [x20], #16
- next_tweak v27, v26, v30, v31
eor v1.16b, v1.16b, v26.16b
- tbnz x6, #2, 0f
-
- ld1 {v2.16b}, [x20], #16
- next_tweak v28, v27, v30, v31
eor v2.16b, v2.16b, v27.16b
- tbnz x6, #3, 0f
-
- ld1 {v3.16b}, [x20], #16
- next_tweak v29, v28, v30, v31
eor v3.16b, v3.16b, v28.16b
- tbnz x6, #4, 0f
-
- ld1 {v4.16b}, [x20], #16
- str q29, [sp, #.Lframe_local_offset]
eor v4.16b, v4.16b, v29.16b
- next_tweak v29, v29, v30, v31
- tbnz x6, #5, 0f
-
- ld1 {v5.16b}, [x20], #16
- str q29, [sp, #.Lframe_local_offset + 16]
- eor v5.16b, v5.16b, v29.16b
- next_tweak v29, v29, v30, v31
- tbnz x6, #6, 0f
-
- ld1 {v6.16b}, [x20], #16
- str q29, [sp, #.Lframe_local_offset + 32]
- eor v6.16b, v6.16b, v29.16b
- next_tweak v29, v29, v30, v31
- tbnz x6, #7, 0f
+ eor v5.16b, v5.16b, v30.16b
+ eor v6.16b, v6.16b, v31.16b
+ eor v7.16b, v7.16b, v16.16b
- ld1 {v7.16b}, [x20], #16
- str q29, [sp, #.Lframe_local_offset + 48]
- eor v7.16b, v7.16b, v29.16b
- next_tweak v29, v29, v30, v31
+ stp q16, q17, [sp, #16]
-0: mov bskey, x21
- mov rounds, x22
+ mov bskey, x2
+ mov rounds, x3
br x16
SYM_FUNC_END(__xts_crypt8)
.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
- frame_push 6, 64
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
+ stp x29, x30, [sp, #-48]!
+ mov x29, sp
- movi v30.2s, #0x1
- movi v25.2s, #0x87
- uzp1 v30.4s, v30.4s, v25.4s
- ld1 {v25.16b}, [x24]
+ ld1 {v25.16b}, [x5]
-99: adr x16, \do8
+0: adr x16, \do8
bl __xts_crypt8
- ldp q16, q17, [sp, #.Lframe_local_offset]
- ldp q18, q19, [sp, #.Lframe_local_offset + 32]
+ eor v16.16b, \o0\().16b, v25.16b
+ eor v17.16b, \o1\().16b, v26.16b
+ eor v18.16b, \o2\().16b, v27.16b
+ eor v19.16b, \o3\().16b, v28.16b
- eor \o0\().16b, \o0\().16b, v25.16b
- eor \o1\().16b, \o1\().16b, v26.16b
- eor \o2\().16b, \o2\().16b, v27.16b
- eor \o3\().16b, \o3\().16b, v28.16b
+ ldp q24, q25, [sp, #16]
- st1 {\o0\().16b}, [x19], #16
- mov v25.16b, v26.16b
- tbnz x6, #1, 1f
- st1 {\o1\().16b}, [x19], #16
- mov v25.16b, v27.16b
- tbnz x6, #2, 1f
- st1 {\o2\().16b}, [x19], #16
- mov v25.16b, v28.16b
- tbnz x6, #3, 1f
- st1 {\o3\().16b}, [x19], #16
- mov v25.16b, v29.16b
- tbnz x6, #4, 1f
+ eor v20.16b, \o4\().16b, v29.16b
+ eor v21.16b, \o5\().16b, v30.16b
+ eor v22.16b, \o6\().16b, v31.16b
+ eor v23.16b, \o7\().16b, v24.16b
- eor \o4\().16b, \o4\().16b, v16.16b
- eor \o5\().16b, \o5\().16b, v17.16b
- eor \o6\().16b, \o6\().16b, v18.16b
- eor \o7\().16b, \o7\().16b, v19.16b
+ st1 {v16.16b-v19.16b}, [x0], #64
+ st1 {v20.16b-v23.16b}, [x0], #64
- st1 {\o4\().16b}, [x19], #16
- tbnz x6, #5, 1f
- st1 {\o5\().16b}, [x19], #16
- tbnz x6, #6, 1f
- st1 {\o6\().16b}, [x19], #16
- tbnz x6, #7, 1f
- st1 {\o7\().16b}, [x19], #16
+ subs x4, x4, #8
+ b.gt 0b
- cbz x23, 1f
- st1 {v25.16b}, [x24]
-
- b 99b
-
-1: st1 {v25.16b}, [x24]
- frame_pop
+ st1 {v25.16b}, [x5]
+ ldp x29, x30, [sp], #48
ret
.endm
@@ -869,133 +817,51 @@ SYM_FUNC_END(aesbs_xts_decrypt)
/*
* aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
- * int rounds, int blocks, u8 iv[], u8 final[])
+ * int rounds, int blocks, u8 iv[])
*/
SYM_FUNC_START(aesbs_ctr_encrypt)
- frame_push 8
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
- mov x25, x6
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
- cmp x25, #0
- cset x26, ne
- add x23, x23, x26 // do one extra block if final
-
- ldp x7, x8, [x24]
- ld1 {v0.16b}, [x24]
+ ldp x7, x8, [x5]
+ ld1 {v0.16b}, [x5]
CPU_LE( rev x7, x7 )
CPU_LE( rev x8, x8 )
adds x8, x8, #1
adc x7, x7, xzr
-99: mov x9, #1
- lsl x9, x9, x23
- subs w23, w23, #8
- csel x23, x23, xzr, pl
- csel x9, x9, xzr, le
-
- tbnz x9, #1, 0f
- next_ctr v1
- tbnz x9, #2, 0f
+0: next_ctr v1
next_ctr v2
- tbnz x9, #3, 0f
next_ctr v3
- tbnz x9, #4, 0f
next_ctr v4
- tbnz x9, #5, 0f
next_ctr v5
- tbnz x9, #6, 0f
next_ctr v6
- tbnz x9, #7, 0f
next_ctr v7
-0: mov bskey, x21
- mov rounds, x22
+ mov bskey, x2
+ mov rounds, x3
bl aesbs_encrypt8
- lsr x9, x9, x26 // disregard the extra block
- tbnz x9, #0, 0f
-
- ld1 {v8.16b}, [x20], #16
- eor v0.16b, v0.16b, v8.16b
- st1 {v0.16b}, [x19], #16
- tbnz x9, #1, 1f
+ ld1 { v8.16b-v11.16b}, [x1], #64
+ ld1 {v12.16b-v15.16b}, [x1], #64
- ld1 {v9.16b}, [x20], #16
- eor v1.16b, v1.16b, v9.16b
- st1 {v1.16b}, [x19], #16
- tbnz x9, #2, 2f
+ eor v8.16b, v0.16b, v8.16b
+ eor v9.16b, v1.16b, v9.16b
+ eor v10.16b, v4.16b, v10.16b
+ eor v11.16b, v6.16b, v11.16b
+ eor v12.16b, v3.16b, v12.16b
+ eor v13.16b, v7.16b, v13.16b
+ eor v14.16b, v2.16b, v14.16b
+ eor v15.16b, v5.16b, v15.16b
- ld1 {v10.16b}, [x20], #16
- eor v4.16b, v4.16b, v10.16b
- st1 {v4.16b}, [x19], #16
- tbnz x9, #3, 3f
+ st1 { v8.16b-v11.16b}, [x0], #64
+ st1 {v12.16b-v15.16b}, [x0], #64
- ld1 {v11.16b}, [x20], #16
- eor v6.16b, v6.16b, v11.16b
- st1 {v6.16b}, [x19], #16
- tbnz x9, #4, 4f
-
- ld1 {v12.16b}, [x20], #16
- eor v3.16b, v3.16b, v12.16b
- st1 {v3.16b}, [x19], #16
- tbnz x9, #5, 5f
-
- ld1 {v13.16b}, [x20], #16
- eor v7.16b, v7.16b, v13.16b
- st1 {v7.16b}, [x19], #16
- tbnz x9, #6, 6f
+ next_ctr v0
+ subs x4, x4, #8
+ b.gt 0b
- ld1 {v14.16b}, [x20], #16
- eor v2.16b, v2.16b, v14.16b
- st1 {v2.16b}, [x19], #16
- tbnz x9, #7, 7f
-
- ld1 {v15.16b}, [x20], #16
- eor v5.16b, v5.16b, v15.16b
- st1 {v5.16b}, [x19], #16
-
-8: next_ctr v0
- st1 {v0.16b}, [x24]
- cbz x23, .Lctr_done
-
- b 99b
-
-.Lctr_done:
- frame_pop
+ st1 {v0.16b}, [x5]
+ ldp x29, x30, [sp], #16
ret
-
- /*
- * If we are handling the tail of the input (x6 != NULL), return the
- * final keystream block back to the caller.
- */
-0: cbz x25, 8b
- st1 {v0.16b}, [x25]
- b 8b
-1: cbz x25, 8b
- st1 {v1.16b}, [x25]
- b 8b
-2: cbz x25, 8b
- st1 {v4.16b}, [x25]
- b 8b
-3: cbz x25, 8b
- st1 {v6.16b}, [x25]
- b 8b
-4: cbz x25, 8b
- st1 {v3.16b}, [x25]
- b 8b
-5: cbz x25, 8b
- st1 {v7.16b}, [x25]
- b 8b
-6: cbz x25, 8b
- st1 {v2.16b}, [x25]
- b 8b
-7: cbz x25, 8b
- st1 {v5.16b}, [x25]
- b 8b
SYM_FUNC_END(aesbs_ctr_encrypt)
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index 8df6ad8cb09d..bac4cabef607 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -34,7 +34,7 @@ asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]);
asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
- int rounds, int blocks, u8 iv[], u8 final[]);
+ int rounds, int blocks, u8 iv[]);
asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]);
@@ -46,6 +46,8 @@ asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks);
asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 iv[]);
+asmlinkage void neon_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int bytes, u8 ctr[]);
asmlinkage void neon_aes_xts_encrypt(u8 out[], u8 const in[],
u32 const rk1[], int rounds, int bytes,
u32 const rk2[], u8 iv[], int first);
@@ -58,7 +60,7 @@ struct aesbs_ctx {
int rounds;
} __aligned(AES_BLOCK_SIZE);
-struct aesbs_cbc_ctx {
+struct aesbs_cbc_ctr_ctx {
struct aesbs_ctx key;
u32 enc[AES_MAX_KEYLENGTH_U32];
};
@@ -128,10 +130,10 @@ static int ecb_decrypt(struct skcipher_request *req)
return __ecb_crypt(req, aesbs_ecb_decrypt);
}
-static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
+static int aesbs_cbc_ctr_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_aes_ctx rk;
int err;
@@ -154,7 +156,7 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
static int cbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
int err;
@@ -177,7 +179,7 @@ static int cbc_encrypt(struct skcipher_request *req)
static int cbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
int err;
@@ -205,40 +207,32 @@ static int cbc_decrypt(struct skcipher_request *req)
static int ctr_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
- u8 buf[AES_BLOCK_SIZE];
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes > 0) {
- unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
- u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL;
-
- if (walk.nbytes < walk.total) {
- blocks = round_down(blocks,
- walk.stride / AES_BLOCK_SIZE);
- final = NULL;
- }
+ int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
+ int nbytes = walk.nbytes % (8 * AES_BLOCK_SIZE);
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
kernel_neon_begin();
- aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->rk, ctx->rounds, blocks, walk.iv, final);
- kernel_neon_end();
-
- if (final) {
- u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
- u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
-
- crypto_xor_cpy(dst, src, final,
- walk.total % AES_BLOCK_SIZE);
-
- err = skcipher_walk_done(&walk, 0);
- break;
+ if (blocks >= 8) {
+ aesbs_ctr_encrypt(dst, src, ctx->key.rk, ctx->key.rounds,
+ blocks, walk.iv);
+ dst += blocks * AES_BLOCK_SIZE;
+ src += blocks * AES_BLOCK_SIZE;
}
- err = skcipher_walk_done(&walk,
- walk.nbytes - blocks * AES_BLOCK_SIZE);
+ if (nbytes && walk.nbytes == walk.total) {
+ neon_aes_ctr_encrypt(dst, src, ctx->enc, ctx->key.rounds,
+ nbytes, walk.iv);
+ nbytes = 0;
+ }
+ kernel_neon_end();
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
@@ -308,23 +302,18 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
return err;
while (walk.nbytes >= AES_BLOCK_SIZE) {
- unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
-
- if (walk.nbytes < walk.total || walk.nbytes % AES_BLOCK_SIZE)
- blocks = round_down(blocks,
- walk.stride / AES_BLOCK_SIZE);
-
+ int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
out = walk.dst.virt.addr;
in = walk.src.virt.addr;
nbytes = walk.nbytes;
kernel_neon_begin();
- if (likely(blocks > 6)) { /* plain NEON is faster otherwise */
- if (first)
+ if (blocks >= 8) {
+ if (first == 1)
neon_aes_ecb_encrypt(walk.iv, walk.iv,
ctx->twkey,
ctx->key.rounds, 1);
- first = 0;
+ first = 2;
fn(out, in, ctx->key.rk, ctx->key.rounds, blocks,
walk.iv);
@@ -333,10 +322,17 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
in += blocks * AES_BLOCK_SIZE;
nbytes -= blocks * AES_BLOCK_SIZE;
}
-
- if (walk.nbytes == walk.total && nbytes > 0)
- goto xts_tail;
-
+ if (walk.nbytes == walk.total && nbytes > 0) {
+ if (encrypt)
+ neon_aes_xts_encrypt(out, in, ctx->cts.key_enc,
+ ctx->key.rounds, nbytes,
+ ctx->twkey, walk.iv, first);
+ else
+ neon_aes_xts_decrypt(out, in, ctx->cts.key_dec,
+ ctx->key.rounds, nbytes,
+ ctx->twkey, walk.iv, first);
+ nbytes = first = 0;
+ }
kernel_neon_end();
err = skcipher_walk_done(&walk, nbytes);
}
@@ -361,13 +357,12 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
nbytes = walk.nbytes;
kernel_neon_begin();
-xts_tail:
if (encrypt)
neon_aes_xts_encrypt(out, in, ctx->cts.key_enc, ctx->key.rounds,
- nbytes, ctx->twkey, walk.iv, first ?: 2);
+ nbytes, ctx->twkey, walk.iv, first);
else
neon_aes_xts_decrypt(out, in, ctx->cts.key_dec, ctx->key.rounds,
- nbytes, ctx->twkey, walk.iv, first ?: 2);
+ nbytes, ctx->twkey, walk.iv, first);
kernel_neon_end();
return skcipher_walk_done(&walk, 0);
@@ -402,14 +397,14 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_driver_name = "cbc-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
+ .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctr_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.walksize = 8 * AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .setkey = aesbs_cbc_setkey,
+ .setkey = aesbs_cbc_ctr_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
@@ -417,7 +412,7 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_driver_name = "ctr-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct aesbs_ctx),
+ .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctr_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -425,7 +420,7 @@ static struct skcipher_alg aes_algs[] = { {
.chunksize = AES_BLOCK_SIZE,
.walksize = 8 * AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .setkey = aesbs_setkey,
+ .setkey = aesbs_cbc_ctr_setkey,
.encrypt = ctr_encrypt,
.decrypt = ctr_encrypt,
}, {
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index 8c65cecf560a..250e1377c481 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
/*
* sha3-ce-glue.c - core SHA-3 transform using v8.2 Crypto Extensions
*
diff --git a/arch/arm64/crypto/sha512-armv8.pl b/arch/arm64/crypto/sha512-armv8.pl
index 2d8655d5b1af..35ec9ae99fe1 100644
--- a/arch/arm64/crypto/sha512-armv8.pl
+++ b/arch/arm64/crypto/sha512-armv8.pl
@@ -43,7 +43,7 @@
# on Cortex-A53 (or by 4 cycles per round).
# (***) Super-impressive coefficients over gcc-generated code are
# indication of some compiler "pathology", most notably code
-# generated with -mgeneral-regs-only is significanty faster
+# generated with -mgeneral-regs-only is significantly faster
# and the gap is only 40-90%.
#
# October 2016.
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index e62a094a9d52..94cb7580deb7 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
/*
* sha512-ce-glue.c - SHA-384/SHA-512 using ARMv8 Crypto Extensions
*
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
index d71faca322f2..ee98954ae8ca 100644
--- a/arch/arm64/crypto/sm3-ce-glue.c
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -26,8 +26,10 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- if (!crypto_simd_usable())
- return crypto_sm3_update(desc, data, len);
+ if (!crypto_simd_usable()) {
+ sm3_update(shash_desc_ctx(desc), data, len);
+ return 0;
+ }
kernel_neon_begin();
sm3_base_do_update(desc, data, len, sm3_ce_transform);
@@ -38,8 +40,10 @@ static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
static int sm3_ce_final(struct shash_desc *desc, u8 *out)
{
- if (!crypto_simd_usable())
- return crypto_sm3_finup(desc, NULL, 0, out);
+ if (!crypto_simd_usable()) {
+ sm3_final(shash_desc_ctx(desc), out);
+ return 0;
+ }
kernel_neon_begin();
sm3_base_do_finalize(desc, sm3_ce_transform);
@@ -51,14 +55,22 @@ static int sm3_ce_final(struct shash_desc *desc, u8 *out)
static int sm3_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!crypto_simd_usable())
- return crypto_sm3_finup(desc, data, len, out);
+ if (!crypto_simd_usable()) {
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+
+ if (len)
+ sm3_update(sctx, data, len);
+ sm3_final(sctx, out);
+ return 0;
+ }
kernel_neon_begin();
- sm3_base_do_update(desc, data, len, sm3_ce_transform);
+ if (len)
+ sm3_base_do_update(desc, data, len, sm3_ce_transform);
+ sm3_base_do_finalize(desc, sm3_ce_transform);
kernel_neon_end();
- return sm3_ce_final(desc, out);
+ return sm3_base_finish(desc, out);
}
static struct shash_alg sm3_alg = {
diff --git a/arch/arm64/include/asm/xor.h b/arch/arm64/include/asm/xor.h
index 947f6a4f1aa0..befcd8a7abc9 100644
--- a/arch/arm64/include/asm/xor.h
+++ b/arch/arm64/include/asm/xor.h
@@ -16,7 +16,8 @@
extern struct xor_block_template const xor_block_inner_neon;
static void
-xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
kernel_neon_begin();
xor_block_inner_neon.do_2(bytes, p1, p2);
@@ -24,8 +25,9 @@ xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
kernel_neon_begin();
xor_block_inner_neon.do_3(bytes, p1, p2, p3);
@@ -33,8 +35,10 @@ xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
kernel_neon_begin();
xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4);
@@ -42,8 +46,11 @@ xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
kernel_neon_begin();
xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5);
diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S
index 0f9e10ecda23..8340dccff46f 100644
--- a/arch/arm64/lib/crc32.S
+++ b/arch/arm64/lib/crc32.S
@@ -11,7 +11,44 @@
.arch armv8-a+crc
- .macro __crc32, c
+ .macro byteorder, reg, be
+ .if \be
+CPU_LE( rev \reg, \reg )
+ .else
+CPU_BE( rev \reg, \reg )
+ .endif
+ .endm
+
+ .macro byteorder16, reg, be
+ .if \be
+CPU_LE( rev16 \reg, \reg )
+ .else
+CPU_BE( rev16 \reg, \reg )
+ .endif
+ .endm
+
+ .macro bitorder, reg, be
+ .if \be
+ rbit \reg, \reg
+ .endif
+ .endm
+
+ .macro bitorder16, reg, be
+ .if \be
+ rbit \reg, \reg
+ lsr \reg, \reg, #16
+ .endif
+ .endm
+
+ .macro bitorder8, reg, be
+ .if \be
+ rbit \reg, \reg
+ lsr \reg, \reg, #24
+ .endif
+ .endm
+
+ .macro __crc32, c, be=0
+ bitorder w0, \be
cmp x2, #16
b.lt 8f // less than 16 bytes
@@ -24,10 +61,14 @@
add x8, x8, x1
add x1, x1, x7
ldp x5, x6, [x8]
-CPU_BE( rev x3, x3 )
-CPU_BE( rev x4, x4 )
-CPU_BE( rev x5, x5 )
-CPU_BE( rev x6, x6 )
+ byteorder x3, \be
+ byteorder x4, \be
+ byteorder x5, \be
+ byteorder x6, \be
+ bitorder x3, \be
+ bitorder x4, \be
+ bitorder x5, \be
+ bitorder x6, \be
tst x7, #8
crc32\c\()x w8, w0, x3
@@ -55,33 +96,43 @@ CPU_BE( rev x6, x6 )
32: ldp x3, x4, [x1], #32
sub x2, x2, #32
ldp x5, x6, [x1, #-16]
-CPU_BE( rev x3, x3 )
-CPU_BE( rev x4, x4 )
-CPU_BE( rev x5, x5 )
-CPU_BE( rev x6, x6 )
+ byteorder x3, \be
+ byteorder x4, \be
+ byteorder x5, \be
+ byteorder x6, \be
+ bitorder x3, \be
+ bitorder x4, \be
+ bitorder x5, \be
+ bitorder x6, \be
crc32\c\()x w0, w0, x3
crc32\c\()x w0, w0, x4
crc32\c\()x w0, w0, x5
crc32\c\()x w0, w0, x6
cbnz x2, 32b
-0: ret
+0: bitorder w0, \be
+ ret
8: tbz x2, #3, 4f
ldr x3, [x1], #8
-CPU_BE( rev x3, x3 )
+ byteorder x3, \be
+ bitorder x3, \be
crc32\c\()x w0, w0, x3
4: tbz x2, #2, 2f
ldr w3, [x1], #4
-CPU_BE( rev w3, w3 )
+ byteorder w3, \be
+ bitorder w3, \be
crc32\c\()w w0, w0, w3
2: tbz x2, #1, 1f
ldrh w3, [x1], #2
-CPU_BE( rev16 w3, w3 )
+ byteorder16 w3, \be
+ bitorder16 w3, \be
crc32\c\()h w0, w0, w3
1: tbz x2, #0, 0f
ldrb w3, [x1]
+ bitorder8 w3, \be
crc32\c\()b w0, w0, w3
-0: ret
+0: bitorder w0, \be
+ ret
.endm
.align 5
@@ -99,3 +150,11 @@ alternative_if_not ARM64_HAS_CRC32
alternative_else_nop_endif
__crc32 c
SYM_FUNC_END(__crc32c_le)
+
+ .align 5
+SYM_FUNC_START(crc32_be)
+alternative_if_not ARM64_HAS_CRC32
+ b crc32_be_base
+alternative_else_nop_endif
+ __crc32 be=1
+SYM_FUNC_END(crc32_be)
diff --git a/arch/arm64/lib/xor-neon.c b/arch/arm64/lib/xor-neon.c
index d189cf4e70ea..96b171995d19 100644
--- a/arch/arm64/lib/xor-neon.c
+++ b/arch/arm64/lib/xor-neon.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <asm/neon-intrinsics.h>
-void xor_arm64_neon_2(unsigned long bytes, unsigned long *p1,
- unsigned long *p2)
+void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -37,8 +37,9 @@ void xor_arm64_neon_2(unsigned long bytes, unsigned long *p1,
} while (--lines > 0);
}
-void xor_arm64_neon_3(unsigned long bytes, unsigned long *p1,
- unsigned long *p2, unsigned long *p3)
+void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -72,8 +73,10 @@ void xor_arm64_neon_3(unsigned long bytes, unsigned long *p1,
} while (--lines > 0);
}
-void xor_arm64_neon_4(unsigned long bytes, unsigned long *p1,
- unsigned long *p2, unsigned long *p3, unsigned long *p4)
+void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -115,9 +118,11 @@ void xor_arm64_neon_4(unsigned long bytes, unsigned long *p1,
} while (--lines > 0);
}
-void xor_arm64_neon_5(unsigned long bytes, unsigned long *p1,
- unsigned long *p2, unsigned long *p3,
- unsigned long *p4, unsigned long *p5)
+void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -186,8 +191,10 @@ static inline uint64x2_t eor3(uint64x2_t p, uint64x2_t q, uint64x2_t r)
return res;
}
-static void xor_arm64_eor3_3(unsigned long bytes, unsigned long *p1,
- unsigned long *p2, unsigned long *p3)
+static void xor_arm64_eor3_3(unsigned long bytes,
+ unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -219,9 +226,11 @@ static void xor_arm64_eor3_3(unsigned long bytes, unsigned long *p1,
} while (--lines > 0);
}
-static void xor_arm64_eor3_4(unsigned long bytes, unsigned long *p1,
- unsigned long *p2, unsigned long *p3,
- unsigned long *p4)
+static void xor_arm64_eor3_4(unsigned long bytes,
+ unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
@@ -261,9 +270,12 @@ static void xor_arm64_eor3_4(unsigned long bytes, unsigned long *p1,
} while (--lines > 0);
}
-static void xor_arm64_eor3_5(unsigned long bytes, unsigned long *p1,
- unsigned long *p2, unsigned long *p3,
- unsigned long *p4, unsigned long *p5)
+static void xor_arm64_eor3_5(unsigned long bytes,
+ unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
uint64_t *dp1 = (uint64_t *)p1;
uint64_t *dp2 = (uint64_t *)p2;
diff --git a/arch/ia64/include/asm/xor.h b/arch/ia64/include/asm/xor.h
index 673051bf9d7d..6785f70d3208 100644
--- a/arch/ia64/include/asm/xor.h
+++ b/arch/ia64/include/asm/xor.h
@@ -4,13 +4,20 @@
*/
-extern void xor_ia64_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_ia64_3(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
-extern void xor_ia64_4(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
-extern void xor_ia64_5(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
+extern void xor_ia64_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+extern void xor_ia64_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+extern void xor_ia64_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+extern void xor_ia64_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
static struct xor_block_template xor_block_ia64 = {
.name = "ia64",
diff --git a/arch/powerpc/include/asm/xor_altivec.h b/arch/powerpc/include/asm/xor_altivec.h
index 6ca923510b59..294620a25f80 100644
--- a/arch/powerpc/include/asm/xor_altivec.h
+++ b/arch/powerpc/include/asm/xor_altivec.h
@@ -3,17 +3,20 @@
#define _ASM_POWERPC_XOR_ALTIVEC_H
#ifdef CONFIG_ALTIVEC
-
-void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in);
-void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in);
-void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in);
-void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in);
+void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+void xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
#endif
#endif /* _ASM_POWERPC_XOR_ALTIVEC_H */
diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c
index 54e61979e80e..aab49d056d18 100644
--- a/arch/powerpc/lib/xor_vmx.c
+++ b/arch/powerpc/lib/xor_vmx.c
@@ -49,8 +49,9 @@ typedef vector signed char unative_t;
V1##_3 = vec_xor(V1##_3, V2##_3); \
} while (0)
-void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in)
+void __xor_altivec_2(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in)
{
DEFINE(v1);
DEFINE(v2);
@@ -67,8 +68,10 @@ void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
} while (--lines > 0);
}
-void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in)
+void __xor_altivec_3(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in,
+ const unsigned long * __restrict v3_in)
{
DEFINE(v1);
DEFINE(v2);
@@ -89,9 +92,11 @@ void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
} while (--lines > 0);
}
-void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in)
+void __xor_altivec_4(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in,
+ const unsigned long * __restrict v3_in,
+ const unsigned long * __restrict v4_in)
{
DEFINE(v1);
DEFINE(v2);
@@ -116,9 +121,12 @@ void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
} while (--lines > 0);
}
-void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in)
+void __xor_altivec_5(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in,
+ const unsigned long * __restrict v3_in,
+ const unsigned long * __restrict v4_in,
+ const unsigned long * __restrict v5_in)
{
DEFINE(v1);
DEFINE(v2);
diff --git a/arch/powerpc/lib/xor_vmx.h b/arch/powerpc/lib/xor_vmx.h
index 5c2b0839b179..573c41d90dac 100644
--- a/arch/powerpc/lib/xor_vmx.h
+++ b/arch/powerpc/lib/xor_vmx.h
@@ -6,16 +6,17 @@
* outside of the enable/disable altivec block.
*/
-void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in);
-
-void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in);
-
-void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in);
-
-void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in);
+void __xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void __xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+void __xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void __xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
diff --git a/arch/powerpc/lib/xor_vmx_glue.c b/arch/powerpc/lib/xor_vmx_glue.c
index 80dba916c367..35d917ece4d1 100644
--- a/arch/powerpc/lib/xor_vmx_glue.c
+++ b/arch/powerpc/lib/xor_vmx_glue.c
@@ -12,47 +12,51 @@
#include <asm/xor_altivec.h>
#include "xor_vmx.h"
-void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in)
+void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_2(bytes, v1_in, v2_in);
+ __xor_altivec_2(bytes, p1, p2);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_2);
-void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in)
+void xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_3(bytes, v1_in, v2_in, v3_in);
+ __xor_altivec_3(bytes, p1, p2, p3);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_3);
-void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in)
+void xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_4(bytes, v1_in, v2_in, v3_in, v4_in);
+ __xor_altivec_4(bytes, p1, p2, p3, p4);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_4);
-void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in)
+void xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_5(bytes, v1_in, v2_in, v3_in, v4_in, v5_in);
+ __xor_altivec_5(bytes, p1, p2, p3, p4, p5);
disable_kernel_altivec();
preempt_enable();
}
diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c
index a963c3d8ad0d..fb924a8041dc 100644
--- a/arch/s390/lib/xor.c
+++ b/arch/s390/lib/xor.c
@@ -11,7 +11,8 @@
#include <linux/raid/xor.h>
#include <asm/xor.h>
-static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
asm volatile(
" larl 1,2f\n"
@@ -32,8 +33,9 @@ static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
: "0", "1", "cc", "memory");
}
-static void xor_xc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
asm volatile(
" larl 1,2f\n"
@@ -58,8 +60,10 @@ static void xor_xc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
: : "0", "1", "cc", "memory");
}
-static void xor_xc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
asm volatile(
" larl 1,2f\n"
@@ -88,8 +92,11 @@ static void xor_xc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
: : "0", "1", "cc", "memory");
}
-static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
asm volatile(
" larl 1,2f\n"
diff --git a/arch/sparc/include/asm/xor_32.h b/arch/sparc/include/asm/xor_32.h
index 3e5af37e4b9c..0351813cf3af 100644
--- a/arch/sparc/include/asm/xor_32.h
+++ b/arch/sparc/include/asm/xor_32.h
@@ -13,7 +13,8 @@
*/
static void
-sparc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+sparc_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
int lines = bytes / (sizeof (long)) / 8;
@@ -50,8 +51,9 @@ sparc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-sparc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+sparc_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
int lines = bytes / (sizeof (long)) / 8;
@@ -101,8 +103,10 @@ sparc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-sparc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+sparc_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
int lines = bytes / (sizeof (long)) / 8;
@@ -165,8 +169,11 @@ sparc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-sparc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+sparc_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
int lines = bytes / (sizeof (long)) / 8;
diff --git a/arch/sparc/include/asm/xor_64.h b/arch/sparc/include/asm/xor_64.h
index 16169f3edcd5..caaddea8ad79 100644
--- a/arch/sparc/include/asm/xor_64.h
+++ b/arch/sparc/include/asm/xor_64.h
@@ -12,13 +12,20 @@
#include <asm/spitfire.h>
-void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
-void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
-void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
-void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
+void xor_vis_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void xor_vis_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+void xor_vis_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void xor_vis_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
/* XXX Ugh, write cheetah versions... -DaveM */
@@ -30,13 +37,20 @@ static struct xor_block_template xor_block_VIS = {
.do_5 = xor_vis_5,
};
-void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
-void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
-void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
-void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
+void xor_niagara_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void xor_niagara_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+void xor_niagara_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void xor_niagara_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
static struct xor_block_template xor_block_niagara = {
.name = "Niagara",
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index c3af959648e6..2831685adf6f 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -90,6 +90,9 @@ nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o
obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
+obj-$(CONFIG_CRYPTO_SM3_AVX_X86_64) += sm3-avx-x86_64.o
+sm3-avx-x86_64-y := sm3-avx-asm_64.o sm3_avx_glue.o
+
obj-$(CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64) += sm4-aesni-avx-x86_64.o
sm4-aesni-avx-x86_64-y := sm4-aesni-avx-asm_64.o sm4_aesni_avx_glue.o
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index c799838242a6..43852ba6e19c 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -1,65 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
/*
- * Implement AES CTR mode by8 optimization with AVX instructions. (x86_64)
- *
- * This is AES128/192/256 CTR mode optimization implementation. It requires
- * the support of Intel(R) AESNI and AVX instructions.
- *
- * This work was inspired by the AES CTR mode optimization published
- * in Intel Optimized IPSEC Cryptograhpic library.
- * Additional information on it can be found at:
- * http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
+ * AES CTR mode by8 optimization with AVX instructions. (x86_64)
*
* Copyright(c) 2014 Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* Contact Information:
* James Guilford <james.guilford@intel.com>
* Sean Gulley <sean.m.gulley@intel.com>
* Chandramouli Narayanan <mouli@linux.intel.com>
+ */
+/*
+ * This is AES128/192/256 CTR mode optimization implementation. It requires
+ * the support of Intel(R) AESNI and AVX instructions.
*
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
+ * This work was inspired by the AES CTR mode optimization published
+ * in Intel Optimized IPSEC Cryptographic library.
+ * Additional information on it can be found at:
+ * https://github.com/intel/intel-ipsec-mb
*/
#include <linux/linkage.h>
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index a880e0b1c255..fda6066437aa 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -32,24 +32,12 @@ static inline void blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src)
__blowfish_enc_blk(ctx, dst, src, false);
}
-static inline void blowfish_enc_blk_xor(struct bf_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __blowfish_enc_blk(ctx, dst, src, true);
-}
-
static inline void blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst,
const u8 *src)
{
__blowfish_enc_blk_4way(ctx, dst, src, false);
}
-static inline void blowfish_enc_blk_xor_4way(struct bf_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __blowfish_enc_blk_4way(ctx, dst, src, true);
-}
-
static void blowfish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
blowfish_enc_blk(crypto_tfm_ctx(tfm), dst, src);
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index 787c234d2469..abb8b1fe123b 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -45,14 +45,6 @@ static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
des3_ede_x86_64_crypt_blk(dec_ctx, dst, src);
}
-static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- u32 *enc_ctx = ctx->enc.expkey;
-
- des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src);
-}
-
static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
const u8 *src)
{
diff --git a/arch/x86/crypto/sm3-avx-asm_64.S b/arch/x86/crypto/sm3-avx-asm_64.S
new file mode 100644
index 000000000000..71e6aae23e17
--- /dev/null
+++ b/arch/x86/crypto/sm3-avx-asm_64.S
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM3 AVX accelerated transform.
+ * specified in: https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
+ *
+ * Copyright (C) 2021 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+/* Based on SM3 AES/BMI2 accelerated work by libgcrypt at:
+ * https://gnupg.org/software/libgcrypt/index.html
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+
+/* Context structure */
+
+#define state_h0 0
+#define state_h1 4
+#define state_h2 8
+#define state_h3 12
+#define state_h4 16
+#define state_h5 20
+#define state_h6 24
+#define state_h7 28
+
+/* Constants */
+
+/* Round constant macros */
+
+#define K0 2043430169 /* 0x79cc4519 */
+#define K1 -208106958 /* 0xf3988a32 */
+#define K2 -416213915 /* 0xe7311465 */
+#define K3 -832427829 /* 0xce6228cb */
+#define K4 -1664855657 /* 0x9cc45197 */
+#define K5 965255983 /* 0x3988a32f */
+#define K6 1930511966 /* 0x7311465e */
+#define K7 -433943364 /* 0xe6228cbc */
+#define K8 -867886727 /* 0xcc451979 */
+#define K9 -1735773453 /* 0x988a32f3 */
+#define K10 823420391 /* 0x311465e7 */
+#define K11 1646840782 /* 0x6228cbce */
+#define K12 -1001285732 /* 0xc451979c */
+#define K13 -2002571463 /* 0x88a32f39 */
+#define K14 289824371 /* 0x11465e73 */
+#define K15 579648742 /* 0x228cbce6 */
+#define K16 -1651869049 /* 0x9d8a7a87 */
+#define K17 991229199 /* 0x3b14f50f */
+#define K18 1982458398 /* 0x7629ea1e */
+#define K19 -330050500 /* 0xec53d43c */
+#define K20 -660100999 /* 0xd8a7a879 */
+#define K21 -1320201997 /* 0xb14f50f3 */
+#define K22 1654563303 /* 0x629ea1e7 */
+#define K23 -985840690 /* 0xc53d43ce */
+#define K24 -1971681379 /* 0x8a7a879d */
+#define K25 351604539 /* 0x14f50f3b */
+#define K26 703209078 /* 0x29ea1e76 */
+#define K27 1406418156 /* 0x53d43cec */
+#define K28 -1482130984 /* 0xa7a879d8 */
+#define K29 1330705329 /* 0x4f50f3b1 */
+#define K30 -1633556638 /* 0x9ea1e762 */
+#define K31 1027854021 /* 0x3d43cec5 */
+#define K32 2055708042 /* 0x7a879d8a */
+#define K33 -183551212 /* 0xf50f3b14 */
+#define K34 -367102423 /* 0xea1e7629 */
+#define K35 -734204845 /* 0xd43cec53 */
+#define K36 -1468409689 /* 0xa879d8a7 */
+#define K37 1358147919 /* 0x50f3b14f */
+#define K38 -1578671458 /* 0xa1e7629e */
+#define K39 1137624381 /* 0x43cec53d */
+#define K40 -2019718534 /* 0x879d8a7a */
+#define K41 255530229 /* 0x0f3b14f5 */
+#define K42 511060458 /* 0x1e7629ea */
+#define K43 1022120916 /* 0x3cec53d4 */
+#define K44 2044241832 /* 0x79d8a7a8 */
+#define K45 -206483632 /* 0xf3b14f50 */
+#define K46 -412967263 /* 0xe7629ea1 */
+#define K47 -825934525 /* 0xcec53d43 */
+#define K48 -1651869049 /* 0x9d8a7a87 */
+#define K49 991229199 /* 0x3b14f50f */
+#define K50 1982458398 /* 0x7629ea1e */
+#define K51 -330050500 /* 0xec53d43c */
+#define K52 -660100999 /* 0xd8a7a879 */
+#define K53 -1320201997 /* 0xb14f50f3 */
+#define K54 1654563303 /* 0x629ea1e7 */
+#define K55 -985840690 /* 0xc53d43ce */
+#define K56 -1971681379 /* 0x8a7a879d */
+#define K57 351604539 /* 0x14f50f3b */
+#define K58 703209078 /* 0x29ea1e76 */
+#define K59 1406418156 /* 0x53d43cec */
+#define K60 -1482130984 /* 0xa7a879d8 */
+#define K61 1330705329 /* 0x4f50f3b1 */
+#define K62 -1633556638 /* 0x9ea1e762 */
+#define K63 1027854021 /* 0x3d43cec5 */
+
+/* Register macros */
+
+#define RSTATE %rdi
+#define RDATA %rsi
+#define RNBLKS %rdx
+
+#define t0 %eax
+#define t1 %ebx
+#define t2 %ecx
+
+#define a %r8d
+#define b %r9d
+#define c %r10d
+#define d %r11d
+#define e %r12d
+#define f %r13d
+#define g %r14d
+#define h %r15d
+
+#define W0 %xmm0
+#define W1 %xmm1
+#define W2 %xmm2
+#define W3 %xmm3
+#define W4 %xmm4
+#define W5 %xmm5
+
+#define XTMP0 %xmm6
+#define XTMP1 %xmm7
+#define XTMP2 %xmm8
+#define XTMP3 %xmm9
+#define XTMP4 %xmm10
+#define XTMP5 %xmm11
+#define XTMP6 %xmm12
+
+#define BSWAP_REG %xmm15
+
+/* Stack structure */
+
+#define STACK_W_SIZE (32 * 2 * 3)
+#define STACK_REG_SAVE_SIZE (64)
+
+#define STACK_W (0)
+#define STACK_REG_SAVE (STACK_W + STACK_W_SIZE)
+#define STACK_SIZE (STACK_REG_SAVE + STACK_REG_SAVE_SIZE)
+
+/* Instruction helpers. */
+
+#define roll2(v, reg) \
+ roll $(v), reg;
+
+#define roll3mov(v, src, dst) \
+ movl src, dst; \
+ roll $(v), dst;
+
+#define roll3(v, src, dst) \
+ rorxl $(32-(v)), src, dst;
+
+#define addl2(a, out) \
+ leal (a, out), out;
+
+/* Round function macros. */
+
+#define GG1(x, y, z, o, t) \
+ movl x, o; \
+ xorl y, o; \
+ xorl z, o;
+
+#define FF1(x, y, z, o, t) GG1(x, y, z, o, t)
+
+#define GG2(x, y, z, o, t) \
+ andnl z, x, o; \
+ movl y, t; \
+ andl x, t; \
+ addl2(t, o);
+
+#define FF2(x, y, z, o, t) \
+ movl y, o; \
+ xorl x, o; \
+ movl y, t; \
+ andl x, t; \
+ andl z, o; \
+ xorl t, o;
+
+#define R(i, a, b, c, d, e, f, g, h, round, widx, wtype) \
+ /* rol(a, 12) => t0 */ \
+ roll3mov(12, a, t0); /* rorxl here would reduce perf by 6% on zen3 */ \
+ /* rol (t0 + e + t), 7) => t1 */ \
+ leal K##round(t0, e, 1), t1; \
+ roll2(7, t1); \
+ /* h + w1 => h */ \
+ addl wtype##_W1_ADDR(round, widx), h; \
+ /* h + t1 => h */ \
+ addl2(t1, h); \
+ /* t1 ^ t0 => t0 */ \
+ xorl t1, t0; \
+ /* w1w2 + d => d */ \
+ addl wtype##_W1W2_ADDR(round, widx), d; \
+ /* FF##i(a,b,c) => t1 */ \
+ FF##i(a, b, c, t1, t2); \
+ /* d + t1 => d */ \
+ addl2(t1, d); \
+ /* GG#i(e,f,g) => t2 */ \
+ GG##i(e, f, g, t2, t1); \
+ /* h + t2 => h */ \
+ addl2(t2, h); \
+ /* rol (f, 19) => f */ \
+ roll2(19, f); \
+ /* d + t0 => d */ \
+ addl2(t0, d); \
+ /* rol (b, 9) => b */ \
+ roll2(9, b); \
+ /* P0(h) => h */ \
+ roll3(9, h, t2); \
+ roll3(17, h, t1); \
+ xorl t2, h; \
+ xorl t1, h;
+
+#define R1(a, b, c, d, e, f, g, h, round, widx, wtype) \
+ R(1, a, b, c, d, e, f, g, h, round, widx, wtype)
+
+#define R2(a, b, c, d, e, f, g, h, round, widx, wtype) \
+ R(2, a, b, c, d, e, f, g, h, round, widx, wtype)
+
+/* Input expansion macros. */
+
+/* Byte-swapped input address. */
+#define IW_W_ADDR(round, widx, offs) \
+ (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))(%rsp)
+
+/* Expanded input address. */
+#define XW_W_ADDR(round, widx, offs) \
+ (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))(%rsp)
+
+/* Rounds 1-12, byte-swapped input block addresses. */
+#define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 0)
+#define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 32)
+
+/* Rounds 1-12, expanded input block addresses. */
+#define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0)
+#define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 32)
+
+/* Input block loading. */
+#define LOAD_W_XMM_1() \
+ vmovdqu 0*16(RDATA), XTMP0; /* XTMP0: w3, w2, w1, w0 */ \
+ vmovdqu 1*16(RDATA), XTMP1; /* XTMP1: w7, w6, w5, w4 */ \
+ vmovdqu 2*16(RDATA), XTMP2; /* XTMP2: w11, w10, w9, w8 */ \
+ vmovdqu 3*16(RDATA), XTMP3; /* XTMP3: w15, w14, w13, w12 */ \
+ vpshufb BSWAP_REG, XTMP0, XTMP0; \
+ vpshufb BSWAP_REG, XTMP1, XTMP1; \
+ vpshufb BSWAP_REG, XTMP2, XTMP2; \
+ vpshufb BSWAP_REG, XTMP3, XTMP3; \
+ vpxor XTMP0, XTMP1, XTMP4; \
+ vpxor XTMP1, XTMP2, XTMP5; \
+ vpxor XTMP2, XTMP3, XTMP6; \
+ leaq 64(RDATA), RDATA; \
+ vmovdqa XTMP0, IW_W1_ADDR(0, 0); \
+ vmovdqa XTMP4, IW_W1W2_ADDR(0, 0); \
+ vmovdqa XTMP1, IW_W1_ADDR(4, 0); \
+ vmovdqa XTMP5, IW_W1W2_ADDR(4, 0);
+
+#define LOAD_W_XMM_2() \
+ vmovdqa XTMP2, IW_W1_ADDR(8, 0); \
+ vmovdqa XTMP6, IW_W1W2_ADDR(8, 0);
+
+#define LOAD_W_XMM_3() \
+ vpshufd $0b00000000, XTMP0, W0; /* W0: xx, w0, xx, xx */ \
+ vpshufd $0b11111001, XTMP0, W1; /* W1: xx, w3, w2, w1 */ \
+ vmovdqa XTMP1, W2; /* W2: xx, w6, w5, w4 */ \
+ vpalignr $12, XTMP1, XTMP2, W3; /* W3: xx, w9, w8, w7 */ \
+ vpalignr $8, XTMP2, XTMP3, W4; /* W4: xx, w12, w11, w10 */ \
+ vpshufd $0b11111001, XTMP3, W5; /* W5: xx, w15, w14, w13 */
+
+/* Message scheduling. Note: 3 words per XMM register. */
+#define SCHED_W_0(round, w0, w1, w2, w3, w4, w5) \
+ /* Load (w[i - 16]) => XTMP0 */ \
+ vpshufd $0b10111111, w0, XTMP0; \
+ vpalignr $12, XTMP0, w1, XTMP0; /* XTMP0: xx, w2, w1, w0 */ \
+ /* Load (w[i - 13]) => XTMP1 */ \
+ vpshufd $0b10111111, w1, XTMP1; \
+ vpalignr $12, XTMP1, w2, XTMP1; \
+ /* w[i - 9] == w3 */ \
+ /* XMM3 ^ XTMP0 => XTMP0 */ \
+ vpxor w3, XTMP0, XTMP0;
+
+#define SCHED_W_1(round, w0, w1, w2, w3, w4, w5) \
+ /* w[i - 3] == w5 */ \
+ /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \
+ vpslld $15, w5, XTMP2; \
+ vpsrld $(32-15), w5, XTMP3; \
+ vpxor XTMP2, XTMP3, XTMP3; \
+ vpxor XTMP3, XTMP0, XTMP0; \
+ /* rol(XTMP1, 7) => XTMP1 */ \
+ vpslld $7, XTMP1, XTMP5; \
+ vpsrld $(32-7), XTMP1, XTMP1; \
+ vpxor XTMP5, XTMP1, XTMP1; \
+ /* XMM4 ^ XTMP1 => XTMP1 */ \
+ vpxor w4, XTMP1, XTMP1; \
+ /* w[i - 6] == XMM4 */ \
+ /* P1(XTMP0) ^ XTMP1 => XMM0 */ \
+ vpslld $15, XTMP0, XTMP5; \
+ vpsrld $(32-15), XTMP0, XTMP6; \
+ vpslld $23, XTMP0, XTMP2; \
+ vpsrld $(32-23), XTMP0, XTMP3; \
+ vpxor XTMP0, XTMP1, XTMP1; \
+ vpxor XTMP6, XTMP5, XTMP5; \
+ vpxor XTMP3, XTMP2, XTMP2; \
+ vpxor XTMP2, XTMP5, XTMP5; \
+ vpxor XTMP5, XTMP1, w0;
+
+#define SCHED_W_2(round, w0, w1, w2, w3, w4, w5) \
+ /* W1 in XMM12 */ \
+ vpshufd $0b10111111, w4, XTMP4; \
+ vpalignr $12, XTMP4, w5, XTMP4; \
+ vmovdqa XTMP4, XW_W1_ADDR((round), 0); \
+ /* W1 ^ W2 => XTMP1 */ \
+ vpxor w0, XTMP4, XTMP1; \
+ vmovdqa XTMP1, XW_W1W2_ADDR((round), 0);
+
+
+.section .rodata.cst16, "aM", @progbits, 16
+.align 16
+
+.Lbe32mask:
+ .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f
+
+.text
+
+/*
+ * Transform nblocks*64 bytes (nblocks*16 32-bit words) at DATA.
+ *
+ * void sm3_transform_avx(struct sm3_state *state,
+ * const u8 *data, int nblocks);
+ */
+.align 16
+SYM_FUNC_START(sm3_transform_avx)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: data (64*nblks bytes)
+ * %rdx: nblocks
+ */
+ vzeroupper;
+
+ pushq %rbp;
+ movq %rsp, %rbp;
+
+ movq %rdx, RNBLKS;
+
+ subq $STACK_SIZE, %rsp;
+ andq $(~63), %rsp;
+
+ movq %rbx, (STACK_REG_SAVE + 0 * 8)(%rsp);
+ movq %r15, (STACK_REG_SAVE + 1 * 8)(%rsp);
+ movq %r14, (STACK_REG_SAVE + 2 * 8)(%rsp);
+ movq %r13, (STACK_REG_SAVE + 3 * 8)(%rsp);
+ movq %r12, (STACK_REG_SAVE + 4 * 8)(%rsp);
+
+ vmovdqa .Lbe32mask (%rip), BSWAP_REG;
+
+ /* Get the values of the chaining variables. */
+ movl state_h0(RSTATE), a;
+ movl state_h1(RSTATE), b;
+ movl state_h2(RSTATE), c;
+ movl state_h3(RSTATE), d;
+ movl state_h4(RSTATE), e;
+ movl state_h5(RSTATE), f;
+ movl state_h6(RSTATE), g;
+ movl state_h7(RSTATE), h;
+
+.align 16
+.Loop:
+ /* Load data part1. */
+ LOAD_W_XMM_1();
+
+ leaq -1(RNBLKS), RNBLKS;
+
+ /* Transform 0-3 + Load data part2. */
+ R1(a, b, c, d, e, f, g, h, 0, 0, IW); LOAD_W_XMM_2();
+ R1(d, a, b, c, h, e, f, g, 1, 1, IW);
+ R1(c, d, a, b, g, h, e, f, 2, 2, IW);
+ R1(b, c, d, a, f, g, h, e, 3, 3, IW); LOAD_W_XMM_3();
+
+ /* Transform 4-7 + Precalc 12-14. */
+ R1(a, b, c, d, e, f, g, h, 4, 0, IW);
+ R1(d, a, b, c, h, e, f, g, 5, 1, IW);
+ R1(c, d, a, b, g, h, e, f, 6, 2, IW); SCHED_W_0(12, W0, W1, W2, W3, W4, W5);
+ R1(b, c, d, a, f, g, h, e, 7, 3, IW); SCHED_W_1(12, W0, W1, W2, W3, W4, W5);
+
+ /* Transform 8-11 + Precalc 12-17. */
+ R1(a, b, c, d, e, f, g, h, 8, 0, IW); SCHED_W_2(12, W0, W1, W2, W3, W4, W5);
+ R1(d, a, b, c, h, e, f, g, 9, 1, IW); SCHED_W_0(15, W1, W2, W3, W4, W5, W0);
+ R1(c, d, a, b, g, h, e, f, 10, 2, IW); SCHED_W_1(15, W1, W2, W3, W4, W5, W0);
+ R1(b, c, d, a, f, g, h, e, 11, 3, IW); SCHED_W_2(15, W1, W2, W3, W4, W5, W0);
+
+ /* Transform 12-14 + Precalc 18-20 */
+ R1(a, b, c, d, e, f, g, h, 12, 0, XW); SCHED_W_0(18, W2, W3, W4, W5, W0, W1);
+ R1(d, a, b, c, h, e, f, g, 13, 1, XW); SCHED_W_1(18, W2, W3, W4, W5, W0, W1);
+ R1(c, d, a, b, g, h, e, f, 14, 2, XW); SCHED_W_2(18, W2, W3, W4, W5, W0, W1);
+
+ /* Transform 15-17 + Precalc 21-23 */
+ R1(b, c, d, a, f, g, h, e, 15, 0, XW); SCHED_W_0(21, W3, W4, W5, W0, W1, W2);
+ R2(a, b, c, d, e, f, g, h, 16, 1, XW); SCHED_W_1(21, W3, W4, W5, W0, W1, W2);
+ R2(d, a, b, c, h, e, f, g, 17, 2, XW); SCHED_W_2(21, W3, W4, W5, W0, W1, W2);
+
+ /* Transform 18-20 + Precalc 24-26 */
+ R2(c, d, a, b, g, h, e, f, 18, 0, XW); SCHED_W_0(24, W4, W5, W0, W1, W2, W3);
+ R2(b, c, d, a, f, g, h, e, 19, 1, XW); SCHED_W_1(24, W4, W5, W0, W1, W2, W3);
+ R2(a, b, c, d, e, f, g, h, 20, 2, XW); SCHED_W_2(24, W4, W5, W0, W1, W2, W3);
+
+ /* Transform 21-23 + Precalc 27-29 */
+ R2(d, a, b, c, h, e, f, g, 21, 0, XW); SCHED_W_0(27, W5, W0, W1, W2, W3, W4);
+ R2(c, d, a, b, g, h, e, f, 22, 1, XW); SCHED_W_1(27, W5, W0, W1, W2, W3, W4);
+ R2(b, c, d, a, f, g, h, e, 23, 2, XW); SCHED_W_2(27, W5, W0, W1, W2, W3, W4);
+
+ /* Transform 24-26 + Precalc 30-32 */
+ R2(a, b, c, d, e, f, g, h, 24, 0, XW); SCHED_W_0(30, W0, W1, W2, W3, W4, W5);
+ R2(d, a, b, c, h, e, f, g, 25, 1, XW); SCHED_W_1(30, W0, W1, W2, W3, W4, W5);
+ R2(c, d, a, b, g, h, e, f, 26, 2, XW); SCHED_W_2(30, W0, W1, W2, W3, W4, W5);
+
+ /* Transform 27-29 + Precalc 33-35 */
+ R2(b, c, d, a, f, g, h, e, 27, 0, XW); SCHED_W_0(33, W1, W2, W3, W4, W5, W0);
+ R2(a, b, c, d, e, f, g, h, 28, 1, XW); SCHED_W_1(33, W1, W2, W3, W4, W5, W0);
+ R2(d, a, b, c, h, e, f, g, 29, 2, XW); SCHED_W_2(33, W1, W2, W3, W4, W5, W0);
+
+ /* Transform 30-32 + Precalc 36-38 */
+ R2(c, d, a, b, g, h, e, f, 30, 0, XW); SCHED_W_0(36, W2, W3, W4, W5, W0, W1);
+ R2(b, c, d, a, f, g, h, e, 31, 1, XW); SCHED_W_1(36, W2, W3, W4, W5, W0, W1);
+ R2(a, b, c, d, e, f, g, h, 32, 2, XW); SCHED_W_2(36, W2, W3, W4, W5, W0, W1);
+
+ /* Transform 33-35 + Precalc 39-41 */
+ R2(d, a, b, c, h, e, f, g, 33, 0, XW); SCHED_W_0(39, W3, W4, W5, W0, W1, W2);
+ R2(c, d, a, b, g, h, e, f, 34, 1, XW); SCHED_W_1(39, W3, W4, W5, W0, W1, W2);
+ R2(b, c, d, a, f, g, h, e, 35, 2, XW); SCHED_W_2(39, W3, W4, W5, W0, W1, W2);
+
+ /* Transform 36-38 + Precalc 42-44 */
+ R2(a, b, c, d, e, f, g, h, 36, 0, XW); SCHED_W_0(42, W4, W5, W0, W1, W2, W3);
+ R2(d, a, b, c, h, e, f, g, 37, 1, XW); SCHED_W_1(42, W4, W5, W0, W1, W2, W3);
+ R2(c, d, a, b, g, h, e, f, 38, 2, XW); SCHED_W_2(42, W4, W5, W0, W1, W2, W3);
+
+ /* Transform 39-41 + Precalc 45-47 */
+ R2(b, c, d, a, f, g, h, e, 39, 0, XW); SCHED_W_0(45, W5, W0, W1, W2, W3, W4);
+ R2(a, b, c, d, e, f, g, h, 40, 1, XW); SCHED_W_1(45, W5, W0, W1, W2, W3, W4);
+ R2(d, a, b, c, h, e, f, g, 41, 2, XW); SCHED_W_2(45, W5, W0, W1, W2, W3, W4);
+
+ /* Transform 42-44 + Precalc 48-50 */
+ R2(c, d, a, b, g, h, e, f, 42, 0, XW); SCHED_W_0(48, W0, W1, W2, W3, W4, W5);
+ R2(b, c, d, a, f, g, h, e, 43, 1, XW); SCHED_W_1(48, W0, W1, W2, W3, W4, W5);
+ R2(a, b, c, d, e, f, g, h, 44, 2, XW); SCHED_W_2(48, W0, W1, W2, W3, W4, W5);
+
+ /* Transform 45-47 + Precalc 51-53 */
+ R2(d, a, b, c, h, e, f, g, 45, 0, XW); SCHED_W_0(51, W1, W2, W3, W4, W5, W0);
+ R2(c, d, a, b, g, h, e, f, 46, 1, XW); SCHED_W_1(51, W1, W2, W3, W4, W5, W0);
+ R2(b, c, d, a, f, g, h, e, 47, 2, XW); SCHED_W_2(51, W1, W2, W3, W4, W5, W0);
+
+ /* Transform 48-50 + Precalc 54-56 */
+ R2(a, b, c, d, e, f, g, h, 48, 0, XW); SCHED_W_0(54, W2, W3, W4, W5, W0, W1);
+ R2(d, a, b, c, h, e, f, g, 49, 1, XW); SCHED_W_1(54, W2, W3, W4, W5, W0, W1);
+ R2(c, d, a, b, g, h, e, f, 50, 2, XW); SCHED_W_2(54, W2, W3, W4, W5, W0, W1);
+
+ /* Transform 51-53 + Precalc 57-59 */
+ R2(b, c, d, a, f, g, h, e, 51, 0, XW); SCHED_W_0(57, W3, W4, W5, W0, W1, W2);
+ R2(a, b, c, d, e, f, g, h, 52, 1, XW); SCHED_W_1(57, W3, W4, W5, W0, W1, W2);
+ R2(d, a, b, c, h, e, f, g, 53, 2, XW); SCHED_W_2(57, W3, W4, W5, W0, W1, W2);
+
+ /* Transform 54-56 + Precalc 60-62 */
+ R2(c, d, a, b, g, h, e, f, 54, 0, XW); SCHED_W_0(60, W4, W5, W0, W1, W2, W3);
+ R2(b, c, d, a, f, g, h, e, 55, 1, XW); SCHED_W_1(60, W4, W5, W0, W1, W2, W3);
+ R2(a, b, c, d, e, f, g, h, 56, 2, XW); SCHED_W_2(60, W4, W5, W0, W1, W2, W3);
+
+ /* Transform 57-59 + Precalc 63 */
+ R2(d, a, b, c, h, e, f, g, 57, 0, XW); SCHED_W_0(63, W5, W0, W1, W2, W3, W4);
+ R2(c, d, a, b, g, h, e, f, 58, 1, XW);
+ R2(b, c, d, a, f, g, h, e, 59, 2, XW); SCHED_W_1(63, W5, W0, W1, W2, W3, W4);
+
+ /* Transform 60-62 + Precalc 63 */
+ R2(a, b, c, d, e, f, g, h, 60, 0, XW);
+ R2(d, a, b, c, h, e, f, g, 61, 1, XW); SCHED_W_2(63, W5, W0, W1, W2, W3, W4);
+ R2(c, d, a, b, g, h, e, f, 62, 2, XW);
+
+ /* Transform 63 */
+ R2(b, c, d, a, f, g, h, e, 63, 0, XW);
+
+ /* Update the chaining variables. */
+ xorl state_h0(RSTATE), a;
+ xorl state_h1(RSTATE), b;
+ xorl state_h2(RSTATE), c;
+ xorl state_h3(RSTATE), d;
+ movl a, state_h0(RSTATE);
+ movl b, state_h1(RSTATE);
+ movl c, state_h2(RSTATE);
+ movl d, state_h3(RSTATE);
+ xorl state_h4(RSTATE), e;
+ xorl state_h5(RSTATE), f;
+ xorl state_h6(RSTATE), g;
+ xorl state_h7(RSTATE), h;
+ movl e, state_h4(RSTATE);
+ movl f, state_h5(RSTATE);
+ movl g, state_h6(RSTATE);
+ movl h, state_h7(RSTATE);
+
+ cmpq $0, RNBLKS;
+ jne .Loop;
+
+ vzeroall;
+
+ movq (STACK_REG_SAVE + 0 * 8)(%rsp), %rbx;
+ movq (STACK_REG_SAVE + 1 * 8)(%rsp), %r15;
+ movq (STACK_REG_SAVE + 2 * 8)(%rsp), %r14;
+ movq (STACK_REG_SAVE + 3 * 8)(%rsp), %r13;
+ movq (STACK_REG_SAVE + 4 * 8)(%rsp), %r12;
+
+ vmovdqa %xmm0, IW_W1_ADDR(0, 0);
+ vmovdqa %xmm0, IW_W1W2_ADDR(0, 0);
+ vmovdqa %xmm0, IW_W1_ADDR(4, 0);
+ vmovdqa %xmm0, IW_W1W2_ADDR(4, 0);
+ vmovdqa %xmm0, IW_W1_ADDR(8, 0);
+ vmovdqa %xmm0, IW_W1W2_ADDR(8, 0);
+
+ movq %rbp, %rsp;
+ popq %rbp;
+ ret;
+SYM_FUNC_END(sm3_transform_avx)
diff --git a/arch/x86/crypto/sm3_avx_glue.c b/arch/x86/crypto/sm3_avx_glue.c
new file mode 100644
index 000000000000..661b6f22ffcd
--- /dev/null
+++ b/arch/x86/crypto/sm3_avx_glue.c
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM3 Secure Hash Algorithm, AVX assembler accelerated.
+ * specified in: https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
+ *
+ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <crypto/sm3.h>
+#include <crypto/sm3_base.h>
+#include <asm/simd.h>
+
+asmlinkage void sm3_transform_avx(struct sm3_state *state,
+ const u8 *data, int nblocks);
+
+static int sm3_avx_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+
+ if (!crypto_simd_usable() ||
+ (sctx->count % SM3_BLOCK_SIZE) + len < SM3_BLOCK_SIZE) {
+ sm3_update(sctx, data, len);
+ return 0;
+ }
+
+ /*
+ * Make sure struct sm3_state begins directly with the SM3
+ * 256-bit internal state, as this is what the asm functions expect.
+ */
+ BUILD_BUG_ON(offsetof(struct sm3_state, state) != 0);
+
+ kernel_fpu_begin();
+ sm3_base_do_update(desc, data, len, sm3_transform_avx);
+ kernel_fpu_end();
+
+ return 0;
+}
+
+static int sm3_avx_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (!crypto_simd_usable()) {
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+
+ if (len)
+ sm3_update(sctx, data, len);
+
+ sm3_final(sctx, out);
+ return 0;
+ }
+
+ kernel_fpu_begin();
+ if (len)
+ sm3_base_do_update(desc, data, len, sm3_transform_avx);
+ sm3_base_do_finalize(desc, sm3_transform_avx);
+ kernel_fpu_end();
+
+ return sm3_base_finish(desc, out);
+}
+
+static int sm3_avx_final(struct shash_desc *desc, u8 *out)
+{
+ if (!crypto_simd_usable()) {
+ sm3_final(shash_desc_ctx(desc), out);
+ return 0;
+ }
+
+ kernel_fpu_begin();
+ sm3_base_do_finalize(desc, sm3_transform_avx);
+ kernel_fpu_end();
+
+ return sm3_base_finish(desc, out);
+}
+
+static struct shash_alg sm3_avx_alg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .init = sm3_base_init,
+ .update = sm3_avx_update,
+ .final = sm3_avx_final,
+ .finup = sm3_avx_finup,
+ .descsize = sizeof(struct sm3_state),
+ .base = {
+ .cra_name = "sm3",
+ .cra_driver_name = "sm3-avx",
+ .cra_priority = 300,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sm3_avx_mod_init(void)
+{
+ const char *feature_name;
+
+ if (!boot_cpu_has(X86_FEATURE_AVX)) {
+ pr_info("AVX instruction are not detected.\n");
+ return -ENODEV;
+ }
+
+ if (!boot_cpu_has(X86_FEATURE_BMI2)) {
+ pr_info("BMI2 instruction are not detected.\n");
+ return -ENODEV;
+ }
+
+ if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+ &feature_name)) {
+ pr_info("CPU feature '%s' is not supported.\n", feature_name);
+ return -ENODEV;
+ }
+
+ return crypto_register_shash(&sm3_avx_alg);
+}
+
+static void __exit sm3_avx_mod_exit(void)
+{
+ crypto_unregister_shash(&sm3_avx_alg);
+}
+
+module_init(sm3_avx_mod_init);
+module_exit(sm3_avx_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_DESCRIPTION("SM3 Secure Hash Algorithm, AVX assembler accelerated");
+MODULE_ALIAS_CRYPTO("sm3");
+MODULE_ALIAS_CRYPTO("sm3-avx");
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
index 2ee95a7769e6..7b0307acc410 100644
--- a/arch/x86/include/asm/xor.h
+++ b/arch/x86/include/asm/xor.h
@@ -57,7 +57,8 @@
op(i + 3, 3)
static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_sse_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
unsigned long lines = bytes >> 8;
@@ -108,7 +109,8 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_sse_2_pf64(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
unsigned long lines = bytes >> 8;
@@ -142,8 +144,9 @@ xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_sse_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
unsigned long lines = bytes >> 8;
@@ -201,8 +204,9 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_sse_3_pf64(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
unsigned long lines = bytes >> 8;
@@ -238,8 +242,10 @@ xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_sse_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
unsigned long lines = bytes >> 8;
@@ -304,8 +310,10 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_sse_4_pf64(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
unsigned long lines = bytes >> 8;
@@ -343,8 +351,11 @@ xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_sse_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
unsigned long lines = bytes >> 8;
@@ -416,8 +427,11 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_sse_5_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_sse_5_pf64(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
unsigned long lines = bytes >> 8;
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index 67ceb790e639..7a6b9474591e 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -21,7 +21,8 @@
#include <asm/fpu/api.h>
static void
-xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_pII_mmx_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
unsigned long lines = bytes >> 7;
@@ -64,8 +65,9 @@ xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_pII_mmx_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
unsigned long lines = bytes >> 7;
@@ -113,8 +115,10 @@ xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_pII_mmx_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
unsigned long lines = bytes >> 7;
@@ -168,8 +172,11 @@ xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
static void
-xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_pII_mmx_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
unsigned long lines = bytes >> 7;
@@ -248,7 +255,8 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
#undef BLOCK
static void
-xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_p5_mmx_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
unsigned long lines = bytes >> 6;
@@ -295,8 +303,9 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_p5_mmx_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
unsigned long lines = bytes >> 6;
@@ -352,8 +361,10 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_p5_mmx_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
unsigned long lines = bytes >> 6;
@@ -418,8 +429,11 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_p5_mmx_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
unsigned long lines = bytes >> 6;
diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h
index 0c4e5b5e3852..7f81dd5897f4 100644
--- a/arch/x86/include/asm/xor_avx.h
+++ b/arch/x86/include/asm/xor_avx.h
@@ -26,7 +26,8 @@
BLOCK4(8) \
BLOCK4(12)
-static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1)
+static void xor_avx_2(unsigned long bytes, unsigned long * __restrict p0,
+ const unsigned long * __restrict p1)
{
unsigned long lines = bytes >> 9;
@@ -52,8 +53,9 @@ do { \
kernel_fpu_end();
}
-static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1,
- unsigned long *p2)
+static void xor_avx_3(unsigned long bytes, unsigned long * __restrict p0,
+ const unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
unsigned long lines = bytes >> 9;
@@ -82,8 +84,10 @@ do { \
kernel_fpu_end();
}
-static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1,
- unsigned long *p2, unsigned long *p3)
+static void xor_avx_4(unsigned long bytes, unsigned long * __restrict p0,
+ const unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
unsigned long lines = bytes >> 9;
@@ -115,8 +119,11 @@ do { \
kernel_fpu_end();
}
-static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1,
- unsigned long *p2, unsigned long *p3, unsigned long *p4)
+static void xor_avx_5(unsigned long bytes, unsigned long * __restrict p0,
+ const unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
unsigned long lines = bytes >> 9;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 442765219c37..d6d7e84bb7f8 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -231,6 +231,13 @@ config CRYPTO_DH
help
Generic implementation of the Diffie-Hellman algorithm.
+config CRYPTO_DH_RFC7919_GROUPS
+ bool "Support for RFC 7919 FFDHE group parameters"
+ depends on CRYPTO_DH
+ select CRYPTO_RNG_DEFAULT
+ help
+ Provide support for RFC 7919 FFDHE group parameters. If unsure, say N.
+
config CRYPTO_ECC
tristate
select CRYPTO_RNG_DEFAULT
@@ -267,7 +274,7 @@ config CRYPTO_ECRDSA
config CRYPTO_SM2
tristate "SM2 algorithm"
- select CRYPTO_SM3
+ select CRYPTO_LIB_SM3
select CRYPTO_AKCIPHER
select CRYPTO_MANAGER
select MPILIB
@@ -425,6 +432,7 @@ config CRYPTO_LRW
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_GF128MUL
+ select CRYPTO_ECB
help
LRW: Liskov Rivest Wagner, a tweakable, non malleable, non movable
narrow block cipher mode for dm-crypt. Use it with cipher
@@ -999,6 +1007,7 @@ config CRYPTO_SHA3
config CRYPTO_SM3
tristate "SM3 digest algorithm"
select CRYPTO_HASH
+ select CRYPTO_LIB_SM3
help
SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3).
It is part of the Chinese Commercial Cryptography suite.
@@ -1007,6 +1016,19 @@ config CRYPTO_SM3
http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash
+config CRYPTO_SM3_AVX_X86_64
+ tristate "SM3 digest algorithm (x86_64/AVX)"
+ depends on X86 && 64BIT
+ select CRYPTO_HASH
+ select CRYPTO_LIB_SM3
+ help
+ SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3).
+ It is part of the Chinese Commercial Cryptography suite. This is
+ SM3 optimized implementation using Advanced Vector Extensions (AVX)
+ when available.
+
+ If unsure, say N.
+
config CRYPTO_STREEBOG
tristate "Streebog Hash Function"
select CRYPTO_HASH
@@ -1847,6 +1869,7 @@ config CRYPTO_JITTERENTROPY
config CRYPTO_KDF800108_CTR
tristate
+ select CRYPTO_HMAC
select CRYPTO_SHA256
config CRYPTO_USER_API
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 76fdaa16bd4a..d1c99288af3e 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -6,6 +6,7 @@
*/
#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/fips.h>
@@ -21,6 +22,11 @@
static LIST_HEAD(crypto_template_list);
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test);
+EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test);
+#endif
+
static inline void crypto_check_module_sig(struct module *mod)
{
if (fips_enabled && mod && !module_sig_ok(mod))
@@ -322,9 +328,17 @@ void crypto_alg_tested(const char *name, int err)
found:
q->cra_flags |= CRYPTO_ALG_DEAD;
alg = test->adult;
- if (err || list_empty(&alg->cra_list))
+
+ if (list_empty(&alg->cra_list))
goto complete;
+ if (err == -ECANCELED)
+ alg->cra_flags |= CRYPTO_ALG_FIPS_INTERNAL;
+ else if (err)
+ goto complete;
+ else
+ alg->cra_flags &= ~CRYPTO_ALG_FIPS_INTERNAL;
+
alg->cra_flags |= CRYPTO_ALG_TESTED;
/* Only satisfy larval waiters if we are the best. */
@@ -604,6 +618,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
{
struct crypto_larval *larval;
struct crypto_spawn *spawn;
+ u32 fips_internal = 0;
int err;
err = crypto_check_alg(&inst->alg);
@@ -626,11 +641,15 @@ int crypto_register_instance(struct crypto_template *tmpl,
spawn->inst = inst;
spawn->registered = true;
+ fips_internal |= spawn->alg->cra_flags;
+
crypto_mod_put(spawn->alg);
spawn = next;
}
+ inst->alg.cra_flags |= (fips_internal & CRYPTO_ALG_FIPS_INTERNAL);
+
larval = __crypto_register_alg(&inst->alg);
if (IS_ERR(larval))
goto unlock;
@@ -683,7 +702,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
if (IS_ERR(name))
return PTR_ERR(name);
- alg = crypto_find_alg(name, spawn->frontend, type, mask);
+ alg = crypto_find_alg(name, spawn->frontend,
+ type | CRYPTO_ALG_FIPS_INTERNAL, mask);
if (IS_ERR(alg))
return PTR_ERR(alg);
@@ -1002,7 +1022,13 @@ void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
}
while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
- *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u64 l = get_unaligned((u64 *)src1) ^
+ get_unaligned((u64 *)src2);
+ put_unaligned(l, (u64 *)dst);
+ } else {
+ *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
+ }
dst += 8;
src1 += 8;
src2 += 8;
@@ -1010,7 +1036,13 @@ void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
}
while (len >= 4 && !(relalign & 3)) {
- *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u32 l = get_unaligned((u32 *)src1) ^
+ get_unaligned((u32 *)src2);
+ put_unaligned(l, (u32 *)dst);
+ } else {
+ *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
+ }
dst += 4;
src1 += 4;
src2 += 4;
@@ -1018,7 +1050,13 @@ void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
}
while (len >= 2 && !(relalign & 1)) {
- *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u16 l = get_unaligned((u16 *)src1) ^
+ get_unaligned((u16 *)src2);
+ put_unaligned(l, (u16 *)dst);
+ } else {
+ *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
+ }
dst += 2;
src1 += 2;
src2 += 2;
diff --git a/crypto/api.c b/crypto/api.c
index 7ddfe946dd56..69508ae9345e 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -223,6 +223,8 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
else if (crypto_is_test_larval(larval) &&
!(alg->cra_flags & CRYPTO_ALG_TESTED))
alg = ERR_PTR(-EAGAIN);
+ else if (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL)
+ alg = ERR_PTR(-EAGAIN);
else if (!crypto_mod_get(alg))
alg = ERR_PTR(-EAGAIN);
crypto_mod_put(&larval->alg);
@@ -233,6 +235,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
u32 mask)
{
+ const u32 fips = CRYPTO_ALG_FIPS_INTERNAL;
struct crypto_alg *alg;
u32 test = 0;
@@ -240,8 +243,20 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
test |= CRYPTO_ALG_TESTED;
down_read(&crypto_alg_sem);
- alg = __crypto_alg_lookup(name, type | test, mask | test);
- if (!alg && test) {
+ alg = __crypto_alg_lookup(name, (type | test) & ~fips,
+ (mask | test) & ~fips);
+ if (alg) {
+ if (((type | mask) ^ fips) & fips)
+ mask |= fips;
+ mask &= fips;
+
+ if (!crypto_is_larval(alg) &&
+ ((type ^ alg->cra_flags) & mask)) {
+ /* Algorithm is disallowed in FIPS mode. */
+ crypto_mod_put(alg);
+ alg = ERR_PTR(-ENOENT);
+ }
+ } else if (test) {
alg = __crypto_alg_lookup(name, type, mask);
if (alg && !crypto_is_larval(alg)) {
/* Test failed */
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 4aff3eebec17..2deff81f8af5 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -35,7 +35,7 @@ void public_key_signature_free(struct public_key_signature *sig)
EXPORT_SYMBOL_GPL(public_key_signature_free);
/**
- * query_asymmetric_key - Get information about an aymmetric key.
+ * query_asymmetric_key - Get information about an asymmetric key.
* @params: Various parameters.
* @info: Where to put the information.
*/
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index da854c94f111..97a886cbe01c 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -22,7 +22,7 @@ struct x509_certificate {
time64_t valid_to;
const void *tbs; /* Signed data */
unsigned tbs_size; /* Size of signed data */
- unsigned raw_sig_size; /* Size of sigature */
+ unsigned raw_sig_size; /* Size of signature */
const void *raw_sig; /* Signature data */
const void *raw_serial; /* Raw serial number in ASN.1 */
unsigned raw_serial_size;
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index d8a91521144e..1a3855284091 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -170,8 +170,8 @@ dma_xor_aligned_offsets(struct dma_device *device, unsigned int offset,
*
* xor_blocks always uses the dest as a source so the
* ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
- * the calculation. The assumption with dma eninges is that they only
- * use the destination buffer as a source when it is explicity specified
+ * the calculation. The assumption with dma engines is that they only
+ * use the destination buffer as a source when it is explicitly specified
* in the source list.
*
* src_list note: if the dest is also a source it must be at index zero.
@@ -261,8 +261,8 @@ EXPORT_SYMBOL_GPL(async_xor_offs);
*
* xor_blocks always uses the dest as a source so the
* ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
- * the calculation. The assumption with dma eninges is that they only
- * use the destination buffer as a source when it is explicity specified
+ * the calculation. The assumption with dma engines is that they only
+ * use the destination buffer as a source when it is explicitly specified
* in the source list.
*
* src_list note: if the dest is also a source it must be at index zero.
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index 66db82e5a3b1..c9d218e53bcb 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -217,7 +217,7 @@ static int raid6_test(void)
err += test(12, &tests);
}
- /* the 24 disk case is special for ioatdma as it is the boudary point
+ /* the 24 disk case is special for ioatdma as it is the boundary point
* at which it needs to switch from 8-source ops to 16-source
* ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set)
*/
@@ -241,7 +241,7 @@ static void raid6_test_exit(void)
}
/* when compiled-in wait for drivers to load first (assumes dma drivers
- * are also compliled-in)
+ * are also compiled-in)
*/
late_initcall(raid6_test);
module_exit(raid6_test_exit);
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 670bf1a01d00..17f674a7cdff 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -253,7 +253,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
skcipher_request_set_tfm(skreq, ctx->enc);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
+ skcipher_request_set_callback(skreq, flags,
req->base.complete, req->base.data);
skcipher_request_set_crypt(skreq, src, dst,
req->cryptlen - authsize, req->iv);
diff --git a/crypto/cfb.c b/crypto/cfb.c
index 0d664dfb47bc..5c36b7b65e2a 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -1,4 +1,4 @@
-//SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
/*
* CFB: Cipher FeedBack mode
*
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index fb07da9920ee..6056a990c9f2 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -53,6 +53,7 @@ static void crypto_finalize_request(struct crypto_engine *engine,
dev_err(engine->dev, "failed to unprepare request\n");
}
}
+ lockdep_assert_in_softirq();
req->complete(req, err);
kthread_queue_work(engine->kworker, &engine->pump_requests);
diff --git a/crypto/dh.c b/crypto/dh.c
index 27e62a2a8027..4406aeb1ff61 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -10,11 +10,11 @@
#include <crypto/internal/kpp.h>
#include <crypto/kpp.h>
#include <crypto/dh.h>
+#include <crypto/rng.h>
#include <linux/mpi.h>
struct dh_ctx {
MPI p; /* Value is guaranteed to be set. */
- MPI q; /* Value is optional. */
MPI g; /* Value is guaranteed to be set. */
MPI xa; /* Value is guaranteed to be set. */
};
@@ -22,7 +22,6 @@ struct dh_ctx {
static void dh_clear_ctx(struct dh_ctx *ctx)
{
mpi_free(ctx->p);
- mpi_free(ctx->q);
mpi_free(ctx->g);
mpi_free(ctx->xa);
memset(ctx, 0, sizeof(*ctx));
@@ -62,12 +61,6 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
if (!ctx->p)
return -EINVAL;
- if (params->q && params->q_size) {
- ctx->q = mpi_read_raw_data(params->q, params->q_size);
- if (!ctx->q)
- return -EINVAL;
- }
-
ctx->g = mpi_read_raw_data(params->g, params->g_size);
if (!ctx->g)
return -EINVAL;
@@ -104,11 +97,12 @@ err_clear_ctx:
/*
* SP800-56A public key verification:
*
- * * If Q is provided as part of the domain paramenters, a full validation
- * according to SP800-56A section 5.6.2.3.1 is performed.
+ * * For the safe-prime groups in FIPS mode, Q can be computed
+ * trivially from P and a full validation according to SP800-56A
+ * section 5.6.2.3.1 is performed.
*
- * * If Q is not provided, a partial validation according to SP800-56A section
- * 5.6.2.3.2 is performed.
+ * * For all other sets of group parameters, only a partial validation
+ * according to SP800-56A section 5.6.2.3.2 is performed.
*/
static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
{
@@ -119,21 +113,40 @@ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
* Step 1: Verify that 2 <= y <= p - 2.
*
* The upper limit check is actually y < p instead of y < p - 1
- * as the mpi_sub_ui function is yet missing.
+ * in order to save one mpi_sub_ui() invocation here. Note that
+ * p - 1 is the non-trivial element of the subgroup of order 2 and
+ * thus, the check on y^q below would fail if y == p - 1.
*/
if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0)
return -EINVAL;
- /* Step 2: Verify that 1 = y^q mod p */
- if (ctx->q) {
- MPI val = mpi_alloc(0);
+ /*
+ * Step 2: Verify that 1 = y^q mod p
+ *
+ * For the safe-prime groups q = (p - 1)/2.
+ */
+ if (fips_enabled) {
+ MPI val, q;
int ret;
+ val = mpi_alloc(0);
if (!val)
return -ENOMEM;
- ret = mpi_powm(val, y, ctx->q, ctx->p);
+ q = mpi_alloc(mpi_get_nlimbs(ctx->p));
+ if (!q) {
+ mpi_free(val);
+ return -ENOMEM;
+ }
+
+ /*
+ * ->p is odd, so no need to explicitly subtract one
+ * from it before shifting to the right.
+ */
+ mpi_rshift(q, ctx->p, 1);
+ ret = mpi_powm(val, y, q, ctx->p);
+ mpi_free(q);
if (ret) {
mpi_free(val);
return ret;
@@ -263,13 +276,645 @@ static struct kpp_alg dh = {
},
};
+
+struct dh_safe_prime {
+ unsigned int max_strength;
+ unsigned int p_size;
+ const char *p;
+};
+
+static const char safe_prime_g[] = { 2 };
+
+struct dh_safe_prime_instance_ctx {
+ struct crypto_kpp_spawn dh_spawn;
+ const struct dh_safe_prime *safe_prime;
+};
+
+struct dh_safe_prime_tfm_ctx {
+ struct crypto_kpp *dh_tfm;
+};
+
+static void dh_safe_prime_free_instance(struct kpp_instance *inst)
+{
+ struct dh_safe_prime_instance_ctx *ctx = kpp_instance_ctx(inst);
+
+ crypto_drop_kpp(&ctx->dh_spawn);
+ kfree(inst);
+}
+
+static inline struct dh_safe_prime_instance_ctx *dh_safe_prime_instance_ctx(
+ struct crypto_kpp *tfm)
+{
+ return kpp_instance_ctx(kpp_alg_instance(tfm));
+}
+
+static int dh_safe_prime_init_tfm(struct crypto_kpp *tfm)
+{
+ struct dh_safe_prime_instance_ctx *inst_ctx =
+ dh_safe_prime_instance_ctx(tfm);
+ struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm);
+
+ tfm_ctx->dh_tfm = crypto_spawn_kpp(&inst_ctx->dh_spawn);
+ if (IS_ERR(tfm_ctx->dh_tfm))
+ return PTR_ERR(tfm_ctx->dh_tfm);
+
+ return 0;
+}
+
+static void dh_safe_prime_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm);
+
+ crypto_free_kpp(tfm_ctx->dh_tfm);
+}
+
+static u64 __add_u64_to_be(__be64 *dst, unsigned int n, u64 val)
+{
+ unsigned int i;
+
+ for (i = n; val && i > 0; --i) {
+ u64 tmp = be64_to_cpu(dst[i - 1]);
+
+ tmp += val;
+ val = tmp >= val ? 0 : 1;
+ dst[i - 1] = cpu_to_be64(tmp);
+ }
+
+ return val;
+}
+
+static void *dh_safe_prime_gen_privkey(const struct dh_safe_prime *safe_prime,
+ unsigned int *key_size)
+{
+ unsigned int n, oversampling_size;
+ __be64 *key;
+ int err;
+ u64 h, o;
+
+ /*
+ * Generate a private key following NIST SP800-56Ar3,
+ * sec. 5.6.1.1.1 and 5.6.1.1.3 resp..
+ *
+ * 5.6.1.1.1: choose key length N such that
+ * 2 * ->max_strength <= N <= log2(q) + 1 = ->p_size * 8 - 1
+ * with q = (p - 1) / 2 for the safe-prime groups.
+ * Choose the lower bound's next power of two for N in order to
+ * avoid excessively large private keys while still
+ * maintaining some extra reserve beyond the bare minimum in
+ * most cases. Note that for each entry in safe_prime_groups[],
+ * the following holds for such N:
+ * - N >= 256, in particular it is a multiple of 2^6 = 64
+ * bits and
+ * - N < log2(q) + 1, i.e. N respects the upper bound.
+ */
+ n = roundup_pow_of_two(2 * safe_prime->max_strength);
+ WARN_ON_ONCE(n & ((1u << 6) - 1));
+ n >>= 6; /* Convert N into units of u64. */
+
+ /*
+ * Reserve one extra u64 to hold the extra random bits
+ * required as per 5.6.1.1.3.
+ */
+ oversampling_size = (n + 1) * sizeof(__be64);
+ key = kmalloc(oversampling_size, GFP_KERNEL);
+ if (!key)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * 5.6.1.1.3, step 3 (and implicitly step 4): obtain N + 64
+ * random bits and interpret them as a big endian integer.
+ */
+ err = -EFAULT;
+ if (crypto_get_default_rng())
+ goto out_err;
+
+ err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)key,
+ oversampling_size);
+ crypto_put_default_rng();
+ if (err)
+ goto out_err;
+
+ /*
+ * 5.6.1.1.3, step 5 is implicit: 2^N < q and thus,
+ * M = min(2^N, q) = 2^N.
+ *
+ * For step 6, calculate
+ * key = (key[] mod (M - 1)) + 1 = (key[] mod (2^N - 1)) + 1.
+ *
+ * In order to avoid expensive divisions, note that
+ * 2^N mod (2^N - 1) = 1 and thus, for any integer h,
+ * 2^N * h mod (2^N - 1) = h mod (2^N - 1) always holds.
+ * The big endian integer key[] composed of n + 1 64bit words
+ * may be written as key[] = h * 2^N + l, with h = key[0]
+ * representing the 64 most significant bits and l
+ * corresponding to the remaining 2^N bits. With the remark
+ * from above,
+ * h * 2^N + l mod (2^N - 1) = l + h mod (2^N - 1).
+ * As both, l and h are less than 2^N, their sum after
+ * this first reduction is guaranteed to be <= 2^(N + 1) - 2.
+ * Or equivalently, that their sum can again be written as
+ * h' * 2^N + l' with h' now either zero or one and if one,
+ * then l' <= 2^N - 2. Thus, all bits at positions >= N will
+ * be zero after a second reduction:
+ * h' * 2^N + l' mod (2^N - 1) = l' + h' mod (2^N - 1).
+ * At this point, it is still possible that
+ * l' + h' = 2^N - 1, i.e. that l' + h' mod (2^N - 1)
+ * is zero. This condition will be detected below by means of
+ * the final increment overflowing in this case.
+ */
+ h = be64_to_cpu(key[0]);
+ h = __add_u64_to_be(key + 1, n, h);
+ h = __add_u64_to_be(key + 1, n, h);
+ WARN_ON_ONCE(h);
+
+ /* Increment to obtain the final result. */
+ o = __add_u64_to_be(key + 1, n, 1);
+ /*
+ * The overflow bit o from the increment is either zero or
+ * one. If zero, key[1:n] holds the final result in big-endian
+ * order. If one, key[1:n] is zero now, but needs to be set to
+ * one, c.f. above.
+ */
+ if (o)
+ key[n] = cpu_to_be64(1);
+
+ /* n is in units of u64, convert to bytes. */
+ *key_size = n << 3;
+ /* Strip the leading extra __be64, which is (virtually) zero by now. */
+ memmove(key, &key[1], *key_size);
+
+ return key;
+
+out_err:
+ kfree_sensitive(key);
+ return ERR_PTR(err);
+}
+
+static int dh_safe_prime_set_secret(struct crypto_kpp *tfm, const void *buffer,
+ unsigned int len)
+{
+ struct dh_safe_prime_instance_ctx *inst_ctx =
+ dh_safe_prime_instance_ctx(tfm);
+ struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm);
+ struct dh params = {};
+ void *buf = NULL, *key = NULL;
+ unsigned int buf_size;
+ int err;
+
+ if (buffer) {
+ err = __crypto_dh_decode_key(buffer, len, &params);
+ if (err)
+ return err;
+ if (params.p_size || params.g_size)
+ return -EINVAL;
+ }
+
+ params.p = inst_ctx->safe_prime->p;
+ params.p_size = inst_ctx->safe_prime->p_size;
+ params.g = safe_prime_g;
+ params.g_size = sizeof(safe_prime_g);
+
+ if (!params.key_size) {
+ key = dh_safe_prime_gen_privkey(inst_ctx->safe_prime,
+ &params.key_size);
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+ params.key = key;
+ }
+
+ buf_size = crypto_dh_key_len(&params);
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = crypto_dh_encode_key(buf, buf_size, &params);
+ if (err)
+ goto out;
+
+ err = crypto_kpp_set_secret(tfm_ctx->dh_tfm, buf, buf_size);
+out:
+ kfree_sensitive(buf);
+ kfree_sensitive(key);
+ return err;
+}
+
+static void dh_safe_prime_complete_req(struct crypto_async_request *dh_req,
+ int err)
+{
+ struct kpp_request *req = dh_req->data;
+
+ kpp_request_complete(req, err);
+}
+
+static struct kpp_request *dh_safe_prime_prepare_dh_req(struct kpp_request *req)
+{
+ struct dh_safe_prime_tfm_ctx *tfm_ctx =
+ kpp_tfm_ctx(crypto_kpp_reqtfm(req));
+ struct kpp_request *dh_req = kpp_request_ctx(req);
+
+ kpp_request_set_tfm(dh_req, tfm_ctx->dh_tfm);
+ kpp_request_set_callback(dh_req, req->base.flags,
+ dh_safe_prime_complete_req, req);
+
+ kpp_request_set_input(dh_req, req->src, req->src_len);
+ kpp_request_set_output(dh_req, req->dst, req->dst_len);
+
+ return dh_req;
+}
+
+static int dh_safe_prime_generate_public_key(struct kpp_request *req)
+{
+ struct kpp_request *dh_req = dh_safe_prime_prepare_dh_req(req);
+
+ return crypto_kpp_generate_public_key(dh_req);
+}
+
+static int dh_safe_prime_compute_shared_secret(struct kpp_request *req)
+{
+ struct kpp_request *dh_req = dh_safe_prime_prepare_dh_req(req);
+
+ return crypto_kpp_compute_shared_secret(dh_req);
+}
+
+static unsigned int dh_safe_prime_max_size(struct crypto_kpp *tfm)
+{
+ struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm);
+
+ return crypto_kpp_maxsize(tfm_ctx->dh_tfm);
+}
+
+static int __maybe_unused __dh_safe_prime_create(
+ struct crypto_template *tmpl, struct rtattr **tb,
+ const struct dh_safe_prime *safe_prime)
+{
+ struct kpp_instance *inst;
+ struct dh_safe_prime_instance_ctx *ctx;
+ const char *dh_name;
+ struct kpp_alg *dh_alg;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_KPP, &mask);
+ if (err)
+ return err;
+
+ dh_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(dh_name))
+ return PTR_ERR(dh_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ ctx = kpp_instance_ctx(inst);
+
+ err = crypto_grab_kpp(&ctx->dh_spawn, kpp_crypto_instance(inst),
+ dh_name, 0, mask);
+ if (err)
+ goto err_free_inst;
+
+ err = -EINVAL;
+ dh_alg = crypto_spawn_kpp_alg(&ctx->dh_spawn);
+ if (strcmp(dh_alg->base.cra_name, "dh"))
+ goto err_free_inst;
+
+ ctx->safe_prime = safe_prime;
+
+ err = crypto_inst_setname(kpp_crypto_instance(inst),
+ tmpl->name, &dh_alg->base);
+ if (err)
+ goto err_free_inst;
+
+ inst->alg.set_secret = dh_safe_prime_set_secret;
+ inst->alg.generate_public_key = dh_safe_prime_generate_public_key;
+ inst->alg.compute_shared_secret = dh_safe_prime_compute_shared_secret;
+ inst->alg.max_size = dh_safe_prime_max_size;
+ inst->alg.init = dh_safe_prime_init_tfm;
+ inst->alg.exit = dh_safe_prime_exit_tfm;
+ inst->alg.reqsize = sizeof(struct kpp_request) + dh_alg->reqsize;
+ inst->alg.base.cra_priority = dh_alg->base.cra_priority;
+ inst->alg.base.cra_module = THIS_MODULE;
+ inst->alg.base.cra_ctxsize = sizeof(struct dh_safe_prime_tfm_ctx);
+
+ inst->free = dh_safe_prime_free_instance;
+
+ err = kpp_register_instance(tmpl, inst);
+ if (err)
+ goto err_free_inst;
+
+ return 0;
+
+err_free_inst:
+ dh_safe_prime_free_instance(inst);
+
+ return err;
+}
+
+#ifdef CONFIG_CRYPTO_DH_RFC7919_GROUPS
+
+static const struct dh_safe_prime ffdhe2048_prime = {
+ .max_strength = 112,
+ .p_size = 256,
+ .p =
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a"
+ "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95"
+ "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9"
+ "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a"
+ "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0"
+ "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35"
+ "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72"
+ "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a"
+ "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb"
+ "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4"
+ "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70"
+ "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61"
+ "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83"
+ "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05"
+ "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa"
+ "\x88\x6b\x42\x38\x61\x28\x5c\x97\xff\xff\xff\xff\xff\xff\xff\xff",
+};
+
+static const struct dh_safe_prime ffdhe3072_prime = {
+ .max_strength = 128,
+ .p_size = 384,
+ .p =
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a"
+ "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95"
+ "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9"
+ "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a"
+ "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0"
+ "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35"
+ "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72"
+ "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a"
+ "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb"
+ "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4"
+ "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70"
+ "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61"
+ "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83"
+ "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05"
+ "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa"
+ "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b"
+ "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07"
+ "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c"
+ "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44"
+ "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff"
+ "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d"
+ "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e"
+ "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c"
+ "\x25\xe4\x1d\x2b\x66\xc6\x2e\x37\xff\xff\xff\xff\xff\xff\xff\xff",
+};
+
+static const struct dh_safe_prime ffdhe4096_prime = {
+ .max_strength = 152,
+ .p_size = 512,
+ .p =
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a"
+ "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95"
+ "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9"
+ "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a"
+ "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0"
+ "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35"
+ "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72"
+ "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a"
+ "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb"
+ "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4"
+ "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70"
+ "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61"
+ "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83"
+ "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05"
+ "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa"
+ "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b"
+ "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07"
+ "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c"
+ "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44"
+ "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff"
+ "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d"
+ "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e"
+ "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c"
+ "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb"
+ "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18"
+ "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a"
+ "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32"
+ "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38"
+ "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c"
+ "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf"
+ "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1"
+ "\xc6\x8a\x00\x7e\x5e\x65\x5f\x6a\xff\xff\xff\xff\xff\xff\xff\xff",
+};
+
+static const struct dh_safe_prime ffdhe6144_prime = {
+ .max_strength = 176,
+ .p_size = 768,
+ .p =
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a"
+ "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95"
+ "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9"
+ "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a"
+ "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0"
+ "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35"
+ "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72"
+ "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a"
+ "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb"
+ "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4"
+ "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70"
+ "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61"
+ "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83"
+ "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05"
+ "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa"
+ "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b"
+ "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07"
+ "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c"
+ "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44"
+ "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff"
+ "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d"
+ "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e"
+ "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c"
+ "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb"
+ "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18"
+ "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a"
+ "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32"
+ "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38"
+ "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c"
+ "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf"
+ "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1"
+ "\xc6\x8a\x00\x7e\x5e\x0d\xd9\x02\x0b\xfd\x64\xb6\x45\x03\x6c\x7a"
+ "\x4e\x67\x7d\x2c\x38\x53\x2a\x3a\x23\xba\x44\x42\xca\xf5\x3e\xa6"
+ "\x3b\xb4\x54\x32\x9b\x76\x24\xc8\x91\x7b\xdd\x64\xb1\xc0\xfd\x4c"
+ "\xb3\x8e\x8c\x33\x4c\x70\x1c\x3a\xcd\xad\x06\x57\xfc\xcf\xec\x71"
+ "\x9b\x1f\x5c\x3e\x4e\x46\x04\x1f\x38\x81\x47\xfb\x4c\xfd\xb4\x77"
+ "\xa5\x24\x71\xf7\xa9\xa9\x69\x10\xb8\x55\x32\x2e\xdb\x63\x40\xd8"
+ "\xa0\x0e\xf0\x92\x35\x05\x11\xe3\x0a\xbe\xc1\xff\xf9\xe3\xa2\x6e"
+ "\x7f\xb2\x9f\x8c\x18\x30\x23\xc3\x58\x7e\x38\xda\x00\x77\xd9\xb4"
+ "\x76\x3e\x4e\x4b\x94\xb2\xbb\xc1\x94\xc6\x65\x1e\x77\xca\xf9\x92"
+ "\xee\xaa\xc0\x23\x2a\x28\x1b\xf6\xb3\xa7\x39\xc1\x22\x61\x16\x82"
+ "\x0a\xe8\xdb\x58\x47\xa6\x7c\xbe\xf9\xc9\x09\x1b\x46\x2d\x53\x8c"
+ "\xd7\x2b\x03\x74\x6a\xe7\x7f\x5e\x62\x29\x2c\x31\x15\x62\xa8\x46"
+ "\x50\x5d\xc8\x2d\xb8\x54\x33\x8a\xe4\x9f\x52\x35\xc9\x5b\x91\x17"
+ "\x8c\xcf\x2d\xd5\xca\xce\xf4\x03\xec\x9d\x18\x10\xc6\x27\x2b\x04"
+ "\x5b\x3b\x71\xf9\xdc\x6b\x80\xd6\x3f\xdd\x4a\x8e\x9a\xdb\x1e\x69"
+ "\x62\xa6\x95\x26\xd4\x31\x61\xc1\xa4\x1d\x57\x0d\x79\x38\xda\xd4"
+ "\xa4\x0e\x32\x9c\xd0\xe4\x0e\x65\xff\xff\xff\xff\xff\xff\xff\xff",
+};
+
+static const struct dh_safe_prime ffdhe8192_prime = {
+ .max_strength = 200,
+ .p_size = 1024,
+ .p =
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a"
+ "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95"
+ "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9"
+ "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a"
+ "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0"
+ "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35"
+ "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72"
+ "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a"
+ "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb"
+ "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4"
+ "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70"
+ "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61"
+ "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83"
+ "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05"
+ "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa"
+ "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b"
+ "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07"
+ "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c"
+ "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44"
+ "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff"
+ "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d"
+ "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e"
+ "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c"
+ "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb"
+ "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18"
+ "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a"
+ "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32"
+ "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38"
+ "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c"
+ "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf"
+ "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1"
+ "\xc6\x8a\x00\x7e\x5e\x0d\xd9\x02\x0b\xfd\x64\xb6\x45\x03\x6c\x7a"
+ "\x4e\x67\x7d\x2c\x38\x53\x2a\x3a\x23\xba\x44\x42\xca\xf5\x3e\xa6"
+ "\x3b\xb4\x54\x32\x9b\x76\x24\xc8\x91\x7b\xdd\x64\xb1\xc0\xfd\x4c"
+ "\xb3\x8e\x8c\x33\x4c\x70\x1c\x3a\xcd\xad\x06\x57\xfc\xcf\xec\x71"
+ "\x9b\x1f\x5c\x3e\x4e\x46\x04\x1f\x38\x81\x47\xfb\x4c\xfd\xb4\x77"
+ "\xa5\x24\x71\xf7\xa9\xa9\x69\x10\xb8\x55\x32\x2e\xdb\x63\x40\xd8"
+ "\xa0\x0e\xf0\x92\x35\x05\x11\xe3\x0a\xbe\xc1\xff\xf9\xe3\xa2\x6e"
+ "\x7f\xb2\x9f\x8c\x18\x30\x23\xc3\x58\x7e\x38\xda\x00\x77\xd9\xb4"
+ "\x76\x3e\x4e\x4b\x94\xb2\xbb\xc1\x94\xc6\x65\x1e\x77\xca\xf9\x92"
+ "\xee\xaa\xc0\x23\x2a\x28\x1b\xf6\xb3\xa7\x39\xc1\x22\x61\x16\x82"
+ "\x0a\xe8\xdb\x58\x47\xa6\x7c\xbe\xf9\xc9\x09\x1b\x46\x2d\x53\x8c"
+ "\xd7\x2b\x03\x74\x6a\xe7\x7f\x5e\x62\x29\x2c\x31\x15\x62\xa8\x46"
+ "\x50\x5d\xc8\x2d\xb8\x54\x33\x8a\xe4\x9f\x52\x35\xc9\x5b\x91\x17"
+ "\x8c\xcf\x2d\xd5\xca\xce\xf4\x03\xec\x9d\x18\x10\xc6\x27\x2b\x04"
+ "\x5b\x3b\x71\xf9\xdc\x6b\x80\xd6\x3f\xdd\x4a\x8e\x9a\xdb\x1e\x69"
+ "\x62\xa6\x95\x26\xd4\x31\x61\xc1\xa4\x1d\x57\x0d\x79\x38\xda\xd4"
+ "\xa4\x0e\x32\x9c\xcf\xf4\x6a\xaa\x36\xad\x00\x4c\xf6\x00\xc8\x38"
+ "\x1e\x42\x5a\x31\xd9\x51\xae\x64\xfd\xb2\x3f\xce\xc9\x50\x9d\x43"
+ "\x68\x7f\xeb\x69\xed\xd1\xcc\x5e\x0b\x8c\xc3\xbd\xf6\x4b\x10\xef"
+ "\x86\xb6\x31\x42\xa3\xab\x88\x29\x55\x5b\x2f\x74\x7c\x93\x26\x65"
+ "\xcb\x2c\x0f\x1c\xc0\x1b\xd7\x02\x29\x38\x88\x39\xd2\xaf\x05\xe4"
+ "\x54\x50\x4a\xc7\x8b\x75\x82\x82\x28\x46\xc0\xba\x35\xc3\x5f\x5c"
+ "\x59\x16\x0c\xc0\x46\xfd\x82\x51\x54\x1f\xc6\x8c\x9c\x86\xb0\x22"
+ "\xbb\x70\x99\x87\x6a\x46\x0e\x74\x51\xa8\xa9\x31\x09\x70\x3f\xee"
+ "\x1c\x21\x7e\x6c\x38\x26\xe5\x2c\x51\xaa\x69\x1e\x0e\x42\x3c\xfc"
+ "\x99\xe9\xe3\x16\x50\xc1\x21\x7b\x62\x48\x16\xcd\xad\x9a\x95\xf9"
+ "\xd5\xb8\x01\x94\x88\xd9\xc0\xa0\xa1\xfe\x30\x75\xa5\x77\xe2\x31"
+ "\x83\xf8\x1d\x4a\x3f\x2f\xa4\x57\x1e\xfc\x8c\xe0\xba\x8a\x4f\xe8"
+ "\xb6\x85\x5d\xfe\x72\xb0\xa6\x6e\xde\xd2\xfb\xab\xfb\xe5\x8a\x30"
+ "\xfa\xfa\xbe\x1c\x5d\x71\xa8\x7e\x2f\x74\x1e\xf8\xc1\xfe\x86\xfe"
+ "\xa6\xbb\xfd\xe5\x30\x67\x7f\x0d\x97\xd1\x1d\x49\xf7\xa8\x44\x3d"
+ "\x08\x22\xe5\x06\xa9\xf4\x61\x4e\x01\x1e\x2a\x94\x83\x8f\xf8\x8c"
+ "\xd6\x8c\x8b\xb7\xc5\xc6\x42\x4c\xff\xff\xff\xff\xff\xff\xff\xff",
+};
+
+static int dh_ffdhe2048_create(struct crypto_template *tmpl,
+ struct rtattr **tb)
+{
+ return __dh_safe_prime_create(tmpl, tb, &ffdhe2048_prime);
+}
+
+static int dh_ffdhe3072_create(struct crypto_template *tmpl,
+ struct rtattr **tb)
+{
+ return __dh_safe_prime_create(tmpl, tb, &ffdhe3072_prime);
+}
+
+static int dh_ffdhe4096_create(struct crypto_template *tmpl,
+ struct rtattr **tb)
+{
+ return __dh_safe_prime_create(tmpl, tb, &ffdhe4096_prime);
+}
+
+static int dh_ffdhe6144_create(struct crypto_template *tmpl,
+ struct rtattr **tb)
+{
+ return __dh_safe_prime_create(tmpl, tb, &ffdhe6144_prime);
+}
+
+static int dh_ffdhe8192_create(struct crypto_template *tmpl,
+ struct rtattr **tb)
+{
+ return __dh_safe_prime_create(tmpl, tb, &ffdhe8192_prime);
+}
+
+static struct crypto_template crypto_ffdhe_templates[] = {
+ {
+ .name = "ffdhe2048",
+ .create = dh_ffdhe2048_create,
+ .module = THIS_MODULE,
+ },
+ {
+ .name = "ffdhe3072",
+ .create = dh_ffdhe3072_create,
+ .module = THIS_MODULE,
+ },
+ {
+ .name = "ffdhe4096",
+ .create = dh_ffdhe4096_create,
+ .module = THIS_MODULE,
+ },
+ {
+ .name = "ffdhe6144",
+ .create = dh_ffdhe6144_create,
+ .module = THIS_MODULE,
+ },
+ {
+ .name = "ffdhe8192",
+ .create = dh_ffdhe8192_create,
+ .module = THIS_MODULE,
+ },
+};
+
+#else /* ! CONFIG_CRYPTO_DH_RFC7919_GROUPS */
+
+static struct crypto_template crypto_ffdhe_templates[] = {};
+
+#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */
+
+
static int dh_init(void)
{
- return crypto_register_kpp(&dh);
+ int err;
+
+ err = crypto_register_kpp(&dh);
+ if (err)
+ return err;
+
+ err = crypto_register_templates(crypto_ffdhe_templates,
+ ARRAY_SIZE(crypto_ffdhe_templates));
+ if (err) {
+ crypto_unregister_kpp(&dh);
+ return err;
+ }
+
+ return 0;
}
static void dh_exit(void)
{
+ crypto_unregister_templates(crypto_ffdhe_templates,
+ ARRAY_SIZE(crypto_ffdhe_templates));
crypto_unregister_kpp(&dh);
}
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c
index 9fd5a42eea15..2d499879328b 100644
--- a/crypto/dh_helper.c
+++ b/crypto/dh_helper.c
@@ -10,7 +10,7 @@
#include <crypto/dh.h>
#include <crypto/kpp.h>
-#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 4 * sizeof(int))
+#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int))
static inline u8 *dh_pack_data(u8 *dst, u8 *end, const void *src, size_t size)
{
@@ -28,7 +28,7 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
static inline unsigned int dh_data_size(const struct dh *p)
{
- return p->key_size + p->p_size + p->q_size + p->g_size;
+ return p->key_size + p->p_size + p->g_size;
}
unsigned int crypto_dh_key_len(const struct dh *p)
@@ -53,11 +53,9 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
ptr = dh_pack_data(ptr, end, &params->key_size,
sizeof(params->key_size));
ptr = dh_pack_data(ptr, end, &params->p_size, sizeof(params->p_size));
- ptr = dh_pack_data(ptr, end, &params->q_size, sizeof(params->q_size));
ptr = dh_pack_data(ptr, end, &params->g_size, sizeof(params->g_size));
ptr = dh_pack_data(ptr, end, params->key, params->key_size);
ptr = dh_pack_data(ptr, end, params->p, params->p_size);
- ptr = dh_pack_data(ptr, end, params->q, params->q_size);
ptr = dh_pack_data(ptr, end, params->g, params->g_size);
if (ptr != end)
return -EINVAL;
@@ -65,7 +63,7 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
}
EXPORT_SYMBOL_GPL(crypto_dh_encode_key);
-int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
+int __crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
{
const u8 *ptr = buf;
struct kpp_secret secret;
@@ -79,28 +77,36 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
ptr = dh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
ptr = dh_unpack_data(&params->p_size, ptr, sizeof(params->p_size));
- ptr = dh_unpack_data(&params->q_size, ptr, sizeof(params->q_size));
ptr = dh_unpack_data(&params->g_size, ptr, sizeof(params->g_size));
if (secret.len != crypto_dh_key_len(params))
return -EINVAL;
+ /* Don't allocate memory. Set pointers to data within
+ * the given buffer
+ */
+ params->key = (void *)ptr;
+ params->p = (void *)(ptr + params->key_size);
+ params->g = (void *)(ptr + params->key_size + params->p_size);
+
+ return 0;
+}
+
+int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
+{
+ int err;
+
+ err = __crypto_dh_decode_key(buf, len, params);
+ if (err)
+ return err;
+
/*
* Don't permit the buffer for 'key' or 'g' to be larger than 'p', since
* some drivers assume otherwise.
*/
if (params->key_size > params->p_size ||
- params->g_size > params->p_size || params->q_size > params->p_size)
+ params->g_size > params->p_size)
return -EINVAL;
- /* Don't allocate memory. Set pointers to data within
- * the given buffer
- */
- params->key = (void *)ptr;
- params->p = (void *)(ptr + params->key_size);
- params->q = (void *)(ptr + params->key_size + params->p_size);
- params->g = (void *)(ptr + params->key_size + params->p_size +
- params->q_size);
-
/*
* Don't permit 'p' to be 0. It's not a prime number, and it's subject
* to corner cases such as 'mod 0' being undefined or
@@ -109,10 +115,6 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
if (memchr_inv(params->p, 0, params->p_size) == NULL)
return -EINVAL;
- /* It is permissible to not provide Q. */
- if (params->q_size == 0)
- params->q = NULL;
-
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 25856aa7ccbf..3610ff0b6739 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -15,6 +15,7 @@
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
+#include <linux/fips.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -51,6 +52,9 @@ static int hmac_setkey(struct crypto_shash *parent,
SHASH_DESC_ON_STACK(shash, hash);
unsigned int i;
+ if (fips_enabled && (keylen < 112 / 8))
+ return -EINVAL;
+
shash->tfm = hash;
if (keylen > bs) {
diff --git a/crypto/kpp.c b/crypto/kpp.c
index 313b2c699963..7aa6ba4b60a4 100644
--- a/crypto/kpp.c
+++ b/crypto/kpp.c
@@ -68,9 +68,17 @@ static int crypto_kpp_init_tfm(struct crypto_tfm *tfm)
return 0;
}
+static void crypto_kpp_free_instance(struct crypto_instance *inst)
+{
+ struct kpp_instance *kpp = kpp_instance(inst);
+
+ kpp->free(kpp);
+}
+
static const struct crypto_type crypto_kpp_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_kpp_init_tfm,
+ .free = crypto_kpp_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_kpp_show,
#endif
@@ -87,6 +95,15 @@ struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alloc_kpp);
+int crypto_grab_kpp(struct crypto_kpp_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ spawn->base.frontend = &crypto_kpp_type;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_kpp);
+
static void kpp_prepare_alg(struct kpp_alg *alg)
{
struct crypto_alg *base = &alg->base;
@@ -111,5 +128,17 @@ void crypto_unregister_kpp(struct kpp_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_kpp);
+int kpp_register_instance(struct crypto_template *tmpl,
+ struct kpp_instance *inst)
+{
+ if (WARN_ON(!inst->free))
+ return -EINVAL;
+
+ kpp_prepare_alg(&inst->alg);
+
+ return crypto_register_instance(tmpl, kpp_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(kpp_register_instance);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Key-agreement Protocol Primitives");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index bcf09fbc750a..8d59a66b6525 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -428,3 +428,4 @@ module_exit(lrw_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LRW block cipher mode");
MODULE_ALIAS_CRYPTO("lrw");
+MODULE_SOFTDEP("pre: ecb");
diff --git a/crypto/memneq.c b/crypto/memneq.c
index afed1bd16aee..fb11608b1ec1 100644
--- a/crypto/memneq.c
+++ b/crypto/memneq.c
@@ -60,6 +60,7 @@
*/
#include <crypto/algapi.h>
+#include <asm/unaligned.h>
#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
@@ -71,7 +72,8 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size)
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
while (size >= sizeof(unsigned long)) {
- neq |= *(unsigned long *)a ^ *(unsigned long *)b;
+ neq |= get_unaligned((unsigned long *)a) ^
+ get_unaligned((unsigned long *)b);
OPTIMIZER_HIDE_VAR(neq);
a += sizeof(unsigned long);
b += sizeof(unsigned long);
@@ -95,18 +97,24 @@ static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (sizeof(unsigned long) == 8) {
- neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b);
+ neq |= get_unaligned((unsigned long *)a) ^
+ get_unaligned((unsigned long *)b);
OPTIMIZER_HIDE_VAR(neq);
- neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8);
+ neq |= get_unaligned((unsigned long *)(a + 8)) ^
+ get_unaligned((unsigned long *)(b + 8));
OPTIMIZER_HIDE_VAR(neq);
} else if (sizeof(unsigned int) == 4) {
- neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b);
+ neq |= get_unaligned((unsigned int *)a) ^
+ get_unaligned((unsigned int *)b);
OPTIMIZER_HIDE_VAR(neq);
- neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4);
+ neq |= get_unaligned((unsigned int *)(a + 4)) ^
+ get_unaligned((unsigned int *)(b + 4));
OPTIMIZER_HIDE_VAR(neq);
- neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8);
+ neq |= get_unaligned((unsigned int *)(a + 8)) ^
+ get_unaligned((unsigned int *)(b + 8));
OPTIMIZER_HIDE_VAR(neq);
- neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12);
+ neq |= get_unaligned((unsigned int *)(a + 12)) ^
+ get_unaligned((unsigned int *)(b + 12));
OPTIMIZER_HIDE_VAR(neq);
} else
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 8ac3e73e8ea6..3285e3af43e1 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -385,15 +385,15 @@ static int pkcs1pad_sign(struct akcipher_request *req)
struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
const struct rsa_asn1_template *digest_info = ictx->digest_info;
int err;
- unsigned int ps_end, digest_size = 0;
+ unsigned int ps_end, digest_info_size = 0;
if (!ctx->key_size)
return -EINVAL;
if (digest_info)
- digest_size = digest_info->size;
+ digest_info_size = digest_info->size;
- if (req->src_len + digest_size > ctx->key_size - 11)
+ if (req->src_len + digest_info_size > ctx->key_size - 11)
return -EOVERFLOW;
if (req->dst_len < ctx->key_size) {
@@ -406,7 +406,7 @@ static int pkcs1pad_sign(struct akcipher_request *req)
if (!req_ctx->in_buf)
return -ENOMEM;
- ps_end = ctx->key_size - digest_size - req->src_len - 2;
+ ps_end = ctx->key_size - digest_info_size - req->src_len - 2;
req_ctx->in_buf[0] = 0x01;
memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
req_ctx->in_buf[ps_end] = 0x00;
@@ -441,6 +441,8 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
const struct rsa_asn1_template *digest_info = ictx->digest_info;
+ const unsigned int sig_size = req->src_len;
+ const unsigned int digest_size = req->dst_len;
unsigned int dst_len;
unsigned int pos;
u8 *out_buf;
@@ -476,6 +478,8 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
pos++;
if (digest_info) {
+ if (digest_info->size > dst_len - pos)
+ goto done;
if (crypto_memneq(out_buf + pos, digest_info->data,
digest_info->size))
goto done;
@@ -485,20 +489,19 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
err = 0;
- if (req->dst_len != dst_len - pos) {
+ if (digest_size != dst_len - pos) {
err = -EKEYREJECTED;
req->dst_len = dst_len - pos;
goto done;
}
/* Extract appended digest. */
sg_pcopy_to_buffer(req->src,
- sg_nents_for_len(req->src,
- req->src_len + req->dst_len),
+ sg_nents_for_len(req->src, sig_size + digest_size),
req_ctx->out_buf + ctx->key_size,
- req->dst_len, ctx->key_size);
+ digest_size, sig_size);
/* Do the actual verification step. */
if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
- req->dst_len) != 0)
+ digest_size) != 0)
err = -EKEYREJECTED;
done:
kfree_sensitive(req_ctx->out_buf);
@@ -534,14 +537,15 @@ static int pkcs1pad_verify(struct akcipher_request *req)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ const unsigned int sig_size = req->src_len;
+ const unsigned int digest_size = req->dst_len;
int err;
- if (WARN_ON(req->dst) ||
- WARN_ON(!req->dst_len) ||
- !ctx->key_size || req->src_len < ctx->key_size)
+ if (WARN_ON(req->dst) || WARN_ON(!digest_size) ||
+ !ctx->key_size || sig_size != ctx->key_size)
return -EINVAL;
- req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL);
+ req_ctx->out_buf = kmalloc(ctx->key_size + digest_size, GFP_KERNEL);
if (!req_ctx->out_buf)
return -ENOMEM;
@@ -554,8 +558,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
/* Reuse input buffer, output to a new buffer */
akcipher_request_set_crypt(&req_ctx->child_req, req->src,
- req_ctx->out_sg, req->src_len,
- ctx->key_size);
+ req_ctx->out_sg, sig_size, ctx->key_size);
err = crypto_akcipher_encrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
@@ -621,6 +624,11 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn);
+ if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) {
+ err = -EINVAL;
+ goto err_free_inst;
+ }
+
err = -ENAMETOOLONG;
hash_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(hash_name)) {
diff --git a/crypto/sm2.c b/crypto/sm2.c
index db8a4a265669..f3e1592965c0 100644
--- a/crypto/sm2.c
+++ b/crypto/sm2.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SM2 asymmetric public-key algorithm
* as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and
@@ -13,7 +13,7 @@
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
-#include <crypto/sm3_base.h>
+#include <crypto/sm3.h>
#include <crypto/rng.h>
#include <crypto/sm2.h>
#include "sm2signature.asn1.h"
@@ -213,7 +213,7 @@ int sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
return 0;
}
-static int sm2_z_digest_update(struct shash_desc *desc,
+static int sm2_z_digest_update(struct sm3_state *sctx,
MPI m, unsigned int pbytes)
{
static const unsigned char zero[32];
@@ -226,20 +226,20 @@ static int sm2_z_digest_update(struct shash_desc *desc,
if (inlen < pbytes) {
/* padding with zero */
- crypto_sm3_update(desc, zero, pbytes - inlen);
- crypto_sm3_update(desc, in, inlen);
+ sm3_update(sctx, zero, pbytes - inlen);
+ sm3_update(sctx, in, inlen);
} else if (inlen > pbytes) {
/* skip the starting zero */
- crypto_sm3_update(desc, in + inlen - pbytes, pbytes);
+ sm3_update(sctx, in + inlen - pbytes, pbytes);
} else {
- crypto_sm3_update(desc, in, inlen);
+ sm3_update(sctx, in, inlen);
}
kfree(in);
return 0;
}
-static int sm2_z_digest_update_point(struct shash_desc *desc,
+static int sm2_z_digest_update_point(struct sm3_state *sctx,
MPI_POINT point, struct mpi_ec_ctx *ec, unsigned int pbytes)
{
MPI x, y;
@@ -249,8 +249,8 @@ static int sm2_z_digest_update_point(struct shash_desc *desc,
y = mpi_new(0);
if (!mpi_ec_get_affine(x, y, point, ec) &&
- !sm2_z_digest_update(desc, x, pbytes) &&
- !sm2_z_digest_update(desc, y, pbytes))
+ !sm2_z_digest_update(sctx, x, pbytes) &&
+ !sm2_z_digest_update(sctx, y, pbytes))
ret = 0;
mpi_free(x);
@@ -265,7 +265,7 @@ int sm2_compute_z_digest(struct crypto_akcipher *tfm,
struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
uint16_t bits_len;
unsigned char entl[2];
- SHASH_DESC_ON_STACK(desc, NULL);
+ struct sm3_state sctx;
unsigned int pbytes;
if (id_len > (USHRT_MAX / 8) || !ec->Q)
@@ -278,17 +278,17 @@ int sm2_compute_z_digest(struct crypto_akcipher *tfm,
pbytes = MPI_NBYTES(ec->p);
/* ZA = H256(ENTLA | IDA | a | b | xG | yG | xA | yA) */
- sm3_base_init(desc);
- crypto_sm3_update(desc, entl, 2);
- crypto_sm3_update(desc, id, id_len);
-
- if (sm2_z_digest_update(desc, ec->a, pbytes) ||
- sm2_z_digest_update(desc, ec->b, pbytes) ||
- sm2_z_digest_update_point(desc, ec->G, ec, pbytes) ||
- sm2_z_digest_update_point(desc, ec->Q, ec, pbytes))
+ sm3_init(&sctx);
+ sm3_update(&sctx, entl, 2);
+ sm3_update(&sctx, id, id_len);
+
+ if (sm2_z_digest_update(&sctx, ec->a, pbytes) ||
+ sm2_z_digest_update(&sctx, ec->b, pbytes) ||
+ sm2_z_digest_update_point(&sctx, ec->G, ec, pbytes) ||
+ sm2_z_digest_update_point(&sctx, ec->Q, ec, pbytes))
return -EINVAL;
- crypto_sm3_final(desc, dgst);
+ sm3_final(&sctx, dgst);
return 0;
}
EXPORT_SYMBOL(sm2_compute_z_digest);
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 193c4584bd00..a215c1c37e73 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2017 ARM Limited or its affiliates.
* Written by Gilad Ben-Yossef <gilad@benyossef.com>
+ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#include <crypto/internal/hash.h>
@@ -26,143 +27,29 @@ const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
};
EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
-static inline u32 p0(u32 x)
-{
- return x ^ rol32(x, 9) ^ rol32(x, 17);
-}
-
-static inline u32 p1(u32 x)
-{
- return x ^ rol32(x, 15) ^ rol32(x, 23);
-}
-
-static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c)
-{
- return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c));
-}
-
-static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g)
-{
- return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g));
-}
-
-static inline u32 t(unsigned int n)
-{
- return (n < 16) ? SM3_T1 : SM3_T2;
-}
-
-static void sm3_expand(u32 *t, u32 *w, u32 *wt)
-{
- int i;
- unsigned int tmp;
-
- /* load the input */
- for (i = 0; i <= 15; i++)
- w[i] = get_unaligned_be32((__u32 *)t + i);
-
- for (i = 16; i <= 67; i++) {
- tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15);
- w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6];
- }
-
- for (i = 0; i <= 63; i++)
- wt[i] = w[i] ^ w[i + 4];
-}
-
-static void sm3_compress(u32 *w, u32 *wt, u32 *m)
-{
- u32 ss1;
- u32 ss2;
- u32 tt1;
- u32 tt2;
- u32 a, b, c, d, e, f, g, h;
- int i;
-
- a = m[0];
- b = m[1];
- c = m[2];
- d = m[3];
- e = m[4];
- f = m[5];
- g = m[6];
- h = m[7];
-
- for (i = 0; i <= 63; i++) {
-
- ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
-
- ss2 = ss1 ^ rol32(a, 12);
-
- tt1 = ff(i, a, b, c) + d + ss2 + *wt;
- wt++;
-
- tt2 = gg(i, e, f, g) + h + ss1 + *w;
- w++;
-
- d = c;
- c = rol32(b, 9);
- b = a;
- a = tt1;
- h = g;
- g = rol32(f, 19);
- f = e;
- e = p0(tt2);
- }
-
- m[0] = a ^ m[0];
- m[1] = b ^ m[1];
- m[2] = c ^ m[2];
- m[3] = d ^ m[3];
- m[4] = e ^ m[4];
- m[5] = f ^ m[5];
- m[6] = g ^ m[6];
- m[7] = h ^ m[7];
-
- a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0;
-}
-
-static void sm3_transform(struct sm3_state *sst, u8 const *src)
-{
- unsigned int w[68];
- unsigned int wt[64];
-
- sm3_expand((u32 *)src, w, wt);
- sm3_compress(w, wt, sst->state);
-
- memzero_explicit(w, sizeof(w));
- memzero_explicit(wt, sizeof(wt));
-}
-
-static void sm3_generic_block_fn(struct sm3_state *sst, u8 const *src,
- int blocks)
-{
- while (blocks--) {
- sm3_transform(sst, src);
- src += SM3_BLOCK_SIZE;
- }
-}
-
-int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
+static int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
+ sm3_update(shash_desc_ctx(desc), data, len);
+ return 0;
}
-EXPORT_SYMBOL(crypto_sm3_update);
-int crypto_sm3_final(struct shash_desc *desc, u8 *out)
+static int crypto_sm3_final(struct shash_desc *desc, u8 *out)
{
- sm3_base_do_finalize(desc, sm3_generic_block_fn);
- return sm3_base_finish(desc, out);
+ sm3_final(shash_desc_ctx(desc), out);
+ return 0;
}
-EXPORT_SYMBOL(crypto_sm3_final);
-int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
+static int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
- sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
- return crypto_sm3_final(desc, hash);
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+
+ if (len)
+ sm3_update(sctx, data, len);
+ sm3_final(sctx, hash);
+ return 0;
}
-EXPORT_SYMBOL(crypto_sm3_finup);
static struct shash_alg sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
@@ -174,6 +61,7 @@ static struct shash_alg sm3_alg = {
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
+ .cra_priority = 100,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 00149657a4bc..2bacf8384f59 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -724,200 +724,6 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
return crypto_wait_req(ret, wait);
}
-struct test_mb_ahash_data {
- struct scatterlist sg[XBUFSIZE];
- char result[64];
- struct ahash_request *req;
- struct crypto_wait wait;
- char *xbuf[XBUFSIZE];
-};
-
-static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb,
- int *rc)
-{
- int i, err = 0;
-
- /* Fire up a bunch of concurrent requests */
- for (i = 0; i < num_mb; i++)
- rc[i] = crypto_ahash_digest(data[i].req);
-
- /* Wait for all requests to finish */
- for (i = 0; i < num_mb; i++) {
- rc[i] = crypto_wait_req(rc[i], &data[i].wait);
-
- if (rc[i]) {
- pr_info("concurrent request %d error %d\n", i, rc[i]);
- err = rc[i];
- }
- }
-
- return err;
-}
-
-static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
- int secs, u32 num_mb)
-{
- unsigned long start, end;
- int bcount;
- int ret = 0;
- int *rc;
-
- rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
- if (!rc)
- return -ENOMEM;
-
- for (start = jiffies, end = start + secs * HZ, bcount = 0;
- time_before(jiffies, end); bcount++) {
- ret = do_mult_ahash_op(data, num_mb, rc);
- if (ret)
- goto out;
- }
-
- pr_cont("%d operations in %d seconds (%llu bytes)\n",
- bcount * num_mb, secs, (u64)bcount * blen * num_mb);
-
-out:
- kfree(rc);
- return ret;
-}
-
-static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
- u32 num_mb)
-{
- unsigned long cycles = 0;
- int ret = 0;
- int i;
- int *rc;
-
- rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
- if (!rc)
- return -ENOMEM;
-
- /* Warm-up run. */
- for (i = 0; i < 4; i++) {
- ret = do_mult_ahash_op(data, num_mb, rc);
- if (ret)
- goto out;
- }
-
- /* The real thing. */
- for (i = 0; i < 8; i++) {
- cycles_t start, end;
-
- start = get_cycles();
- ret = do_mult_ahash_op(data, num_mb, rc);
- end = get_cycles();
-
- if (ret)
- goto out;
-
- cycles += end - start;
- }
-
- pr_cont("1 operation in %lu cycles (%d bytes)\n",
- (cycles + 4) / (8 * num_mb), blen);
-
-out:
- kfree(rc);
- return ret;
-}
-
-static void test_mb_ahash_speed(const char *algo, unsigned int secs,
- struct hash_speed *speed, u32 num_mb)
-{
- struct test_mb_ahash_data *data;
- struct crypto_ahash *tfm;
- unsigned int i, j, k;
- int ret;
-
- data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
- if (!data)
- return;
-
- tfm = crypto_alloc_ahash(algo, 0, 0);
- if (IS_ERR(tfm)) {
- pr_err("failed to load transform for %s: %ld\n",
- algo, PTR_ERR(tfm));
- goto free_data;
- }
-
- for (i = 0; i < num_mb; ++i) {
- if (testmgr_alloc_buf(data[i].xbuf))
- goto out;
-
- crypto_init_wait(&data[i].wait);
-
- data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!data[i].req) {
- pr_err("alg: hash: Failed to allocate request for %s\n",
- algo);
- goto out;
- }
-
- ahash_request_set_callback(data[i].req, 0, crypto_req_done,
- &data[i].wait);
-
- sg_init_table(data[i].sg, XBUFSIZE);
- for (j = 0; j < XBUFSIZE; j++) {
- sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE);
- memset(data[i].xbuf[j], 0xff, PAGE_SIZE);
- }
- }
-
- pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
- get_driver_name(crypto_ahash, tfm));
-
- for (i = 0; speed[i].blen != 0; i++) {
- /* For some reason this only tests digests. */
- if (speed[i].blen != speed[i].plen)
- continue;
-
- if (speed[i].blen > XBUFSIZE * PAGE_SIZE) {
- pr_err("template (%u) too big for tvmem (%lu)\n",
- speed[i].blen, XBUFSIZE * PAGE_SIZE);
- goto out;
- }
-
- if (klen)
- crypto_ahash_setkey(tfm, tvmem[0], klen);
-
- for (k = 0; k < num_mb; k++)
- ahash_request_set_crypt(data[k].req, data[k].sg,
- data[k].result, speed[i].blen);
-
- pr_info("test%3u "
- "(%5u byte blocks,%5u bytes per update,%4u updates): ",
- i, speed[i].blen, speed[i].plen,
- speed[i].blen / speed[i].plen);
-
- if (secs) {
- ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
- num_mb);
- cond_resched();
- } else {
- ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
- }
-
-
- if (ret) {
- pr_err("At least one hashing failed ret=%d\n", ret);
- break;
- }
- }
-
-out:
- for (k = 0; k < num_mb; ++k)
- ahash_request_free(data[k].req);
-
- for (k = 0; k < num_mb; ++k)
- testmgr_free_buf(data[k].xbuf);
-
- crypto_free_ahash(tfm);
-
-free_data:
- kfree(data);
-}
-
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
char *out, int secs)
{
@@ -1667,8 +1473,8 @@ static inline int tcrypt_test(const char *alg)
pr_debug("testing %s\n", alg);
ret = alg_test(alg, alg, 0, 0);
- /* non-fips algs return -EINVAL in fips mode */
- if (fips_enabled && ret == -EINVAL)
+ /* non-fips algs return -EINVAL or -ECANCELED in fips mode */
+ if (fips_enabled && (ret == -EINVAL || ret == -ECANCELED))
ret = 0;
return ret;
}
@@ -2571,33 +2377,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
if (mode > 400 && mode < 500) break;
fallthrough;
case 422:
- test_mb_ahash_speed("sha1", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 423:
- test_mb_ahash_speed("sha256", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 424:
- test_mb_ahash_speed("sha512", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 425:
- test_mb_ahash_speed("sm3", sec, generic_hash_speed_template,
- num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 426:
- test_mb_ahash_speed("streebog256", sec,
- generic_hash_speed_template, num_mb);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 427:
- test_mb_ahash_speed("streebog512", sec,
- generic_hash_speed_template, num_mb);
+ test_ahash_speed("sm3", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 499:
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 5831d4bbc64f..2d632a285869 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -55,9 +55,6 @@ MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
static unsigned int fuzz_iterations = 100;
module_param(fuzz_iterations, uint, 0644);
MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
-
-DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test);
-EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test);
#endif
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
@@ -1854,6 +1851,9 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
}
for (i = 0; i < num_vecs; i++) {
+ if (fips_enabled && vecs[i].fips_skip)
+ continue;
+
err = test_hash_vec(&vecs[i], i, req, desc, tsgl, hashstate);
if (err)
goto out;
@@ -4650,7 +4650,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "dh",
.test = alg_test_kpp,
- .fips_allowed = 1,
.suite = {
.kpp = __VECS(dh_tv_template)
}
@@ -4973,6 +4972,43 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(essiv_aes_cbc_tv_template)
}
}, {
+#if IS_ENABLED(CONFIG_CRYPTO_DH_RFC7919_GROUPS)
+ .alg = "ffdhe2048(dh)",
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+ .kpp = __VECS(ffdhe2048_dh_tv_template)
+ }
+ }, {
+ .alg = "ffdhe3072(dh)",
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+ .kpp = __VECS(ffdhe3072_dh_tv_template)
+ }
+ }, {
+ .alg = "ffdhe4096(dh)",
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+ .kpp = __VECS(ffdhe4096_dh_tv_template)
+ }
+ }, {
+ .alg = "ffdhe6144(dh)",
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+ .kpp = __VECS(ffdhe6144_dh_tv_template)
+ }
+ }, {
+ .alg = "ffdhe8192(dh)",
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+ .kpp = __VECS(ffdhe8192_dh_tv_template)
+ }
+ }, {
+#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */
.alg = "gcm(aes)",
.generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)",
.test = alg_test_aead,
@@ -5613,6 +5649,13 @@ static int alg_find_test(const char *alg)
return -1;
}
+static int alg_fips_disabled(const char *driver, const char *alg)
+{
+ pr_info("alg: %s (%s) is disabled due to FIPS\n", alg, driver);
+
+ return -ECANCELED;
+}
+
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
{
int i;
@@ -5649,9 +5692,13 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
if (i < 0 && j < 0)
goto notest;
- if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) ||
- (j >= 0 && !alg_test_descs[j].fips_allowed)))
- goto non_fips_alg;
+ if (fips_enabled) {
+ if (j >= 0 && !alg_test_descs[j].fips_allowed)
+ return -EINVAL;
+
+ if (i >= 0 && !alg_test_descs[i].fips_allowed)
+ goto non_fips_alg;
+ }
rc = 0;
if (i >= 0)
@@ -5681,9 +5728,13 @@ test_done:
notest:
printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver);
+
+ if (type & CRYPTO_ALG_FIPS_INTERNAL)
+ return alg_fips_disabled(driver, alg);
+
return 0;
non_fips_alg:
- return -EINVAL;
+ return alg_fips_disabled(driver, alg);
}
#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index a253d66ba1c1..d1aa90993bbd 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -33,6 +33,7 @@
* @ksize: Length of @key in bytes (0 if no key)
* @setkey_error: Expected error from setkey()
* @digest_error: Expected error from digest()
+ * @fips_skip: Skip the test vector in FIPS mode
*/
struct hash_testvec {
const char *key;
@@ -42,6 +43,7 @@ struct hash_testvec {
unsigned short ksize;
int setkey_error;
int digest_error;
+ bool fips_skip;
};
/*
@@ -1244,17 +1246,15 @@ static const struct kpp_testvec dh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x01\x00" /* type */
- "\x15\x02" /* len */
+ "\x11\x02" /* len */
"\x00\x01\x00\x00" /* key_size */
"\x00\x01\x00\x00" /* p_size */
- "\x00\x00\x00\x00" /* q_size */
"\x01\x00\x00\x00" /* g_size */
#else
"\x00\x01" /* type */
- "\x02\x15" /* len */
+ "\x02\x11" /* len */
"\x00\x00\x01\x00" /* key_size */
"\x00\x00\x01\x00" /* p_size */
- "\x00\x00\x00\x00" /* q_size */
"\x00\x00\x00\x01" /* g_size */
#endif
/* xa */
@@ -1344,7 +1344,7 @@ static const struct kpp_testvec dh_tv_template[] = {
"\xd3\x34\x49\xad\x64\xa6\xb1\xc0\x59\x28\x75\x60\xa7\x8a\xb0\x11"
"\x56\x89\x42\x74\x11\xf5\xf6\x5e\x6f\x16\x54\x6a\xb1\x76\x4d\x50"
"\x8a\x68\xc1\x5b\x82\xb9\x0d\x00\x32\x50\xed\x88\x87\x48\x92\x17",
- .secret_size = 533,
+ .secret_size = 529,
.b_public_size = 256,
.expected_a_public_size = 256,
.expected_ss_size = 256,
@@ -1353,17 +1353,15 @@ static const struct kpp_testvec dh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x01\x00" /* type */
- "\x15\x02" /* len */
+ "\x11\x02" /* len */
"\x00\x01\x00\x00" /* key_size */
"\x00\x01\x00\x00" /* p_size */
- "\x00\x00\x00\x00" /* q_size */
"\x01\x00\x00\x00" /* g_size */
#else
"\x00\x01" /* type */
- "\x02\x15" /* len */
+ "\x02\x11" /* len */
"\x00\x00\x01\x00" /* key_size */
"\x00\x00\x01\x00" /* p_size */
- "\x00\x00\x00\x00" /* q_size */
"\x00\x00\x00\x01" /* g_size */
#endif
/* xa */
@@ -1453,13 +1451,1442 @@ static const struct kpp_testvec dh_tv_template[] = {
"\x5e\x5a\x64\xbd\xf6\x85\x04\xe8\x28\x6a\xac\xef\xce\x19\x8e\x9a"
"\xfe\x75\xc0\x27\x69\xe3\xb3\x7b\x21\xa7\xb1\x16\xa4\x85\x23\xee"
"\xb0\x1b\x04\x6e\xbd\xab\x16\xde\xfd\x86\x6b\xa9\x95\xd7\x0b\xfd",
- .secret_size = 533,
+ .secret_size = 529,
.b_public_size = 256,
.expected_a_public_size = 256,
.expected_ss_size = 256,
}
};
+static const struct kpp_testvec ffdhe2048_dh_tv_template[] __maybe_unused = {
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x01" /* len */
+ "\x00\x01\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x01\x10" /* len */
+ "\x00\x00\x01\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#endif
+ /* xa */
+ "\x23\x7d\xd0\x06\xfd\x7a\xe5\x7a\x08\xda\x98\x31\xc0\xb3\xd5\x85"
+ "\xe2\x0d\x2a\x91\x5f\x78\x4b\xa6\x62\xd0\xa6\x35\xd4\xef\x86\x39"
+ "\xf1\xdb\x71\x5e\xb0\x11\x2e\xee\x91\x3a\xaa\xf9\xe3\xdf\x8d\x8b"
+ "\x48\x41\xde\xe8\x78\x53\xc5\x5f\x93\xd2\x79\x0d\xbe\x8d\x83\xe8"
+ "\x8f\x00\xd2\xde\x13\x18\x04\x05\x20\x6d\xda\xfa\x1d\x0b\x24\x52"
+ "\x3a\x18\x2b\xe1\x1e\xae\x15\x3b\x0f\xaa\x09\x09\xf6\x01\x98\xe9"
+ "\x81\x5d\x6b\x83\x6e\x55\xf1\x5d\x6f\x6f\x0d\x9d\xa8\x72\x32\x63"
+ "\x60\xe6\x0b\xc5\x22\xe2\xf9\x46\x58\xa2\x1c\x2a\xb0\xd5\xaf\xe3"
+ "\x5b\x03\xb7\x36\xb7\xba\x55\x20\x08\x7c\x51\xd4\x89\x42\x9c\x14"
+ "\x23\xe2\x71\x3e\x15\x2a\x0d\x34\x8a\xde\xad\x84\x11\x15\x72\x18"
+ "\x42\x43\x0a\xe2\x58\x29\xb3\x90\x0f\x56\xd8\x8a\x0f\x0e\xbc\x0e"
+ "\x9c\xe7\xd5\xe6\x5b\xbf\x06\x64\x38\x12\xa5\x8d\x5b\x68\x34\xdd"
+ "\x75\x48\xc9\xa7\xa3\x58\x5a\x1c\xe1\xb2\xc5\xe3\x39\x03\xcf\xab"
+ "\xc2\x14\x07\xaf\x55\x80\xc7\x63\xe4\x03\xeb\xe9\x0a\x25\x61\x85"
+ "\x1d\x0e\x81\x52\x7b\xbc\x4a\x0c\xc8\x59\x6a\xac\x18\xfb\x8c\x0c"
+ "\xb4\x79\xbd\xa1\x4c\xbb\x02\xc9\xd5\x13\x88\x3d\x25\xaa\x77\x49",
+ .b_public =
+ "\x5c\x00\x6f\xda\xfe\x4c\x0c\xc2\x18\xff\xa9\xec\x7a\xbe\x8a\x51"
+ "\x64\x6b\x57\xf8\xed\xe2\x36\x77\xc1\x23\xbf\x56\xa6\x48\x76\x34"
+ "\x0e\xf3\x68\x05\x45\x6a\x98\x5b\x9e\x8b\xc0\x11\x29\xcb\x5b\x66"
+ "\x2d\xc2\xeb\x4c\xf1\x7d\x85\x30\xaa\xd5\xf5\xb8\xd3\x62\x1e\x97"
+ "\x1e\x34\x18\xf8\x76\x8c\x10\xca\x1f\xe4\x5d\x62\xe1\xbe\x61\xef"
+ "\xaf\x2c\x8d\x97\x15\xa5\x86\xd5\xd3\x12\x6f\xec\xe2\xa4\xb2\x5a"
+ "\x35\x1d\xd4\x91\xa6\xef\x13\x09\x65\x9c\x45\xc0\x12\xad\x7f\xee"
+ "\x93\x5d\xfa\x89\x26\x7d\xae\xee\xea\x8c\xa3\xcf\x04\x2d\xa0\xc7"
+ "\xd9\x14\x62\xaf\xdf\xa0\x33\xd7\x5e\x83\xa2\xe6\x0e\x0e\x5d\x77"
+ "\xce\xe6\x72\xe4\xec\x9d\xff\x72\x9f\x38\x95\x19\x96\xba\x4c\xe3"
+ "\x5f\xb8\x46\x4a\x1d\xe9\x62\x7b\xa8\xdc\xe7\x61\x90\x6b\xb9\xd4"
+ "\xad\x0b\xa3\x06\xb3\x70\xfa\xea\x2b\xc4\x2c\xde\x43\x37\xf6\x8d"
+ "\x72\xf0\x86\x9a\xbb\x3b\x8e\x7a\x71\x03\x30\x30\x2a\x5d\xcd\x1e"
+ "\xe4\xd3\x08\x07\x75\x17\x17\x72\x1e\x77\x6c\x98\x0d\x29\x7f\xac"
+ "\xe7\xb2\xee\xa9\x1c\x33\x9d\x08\x39\xe1\xd8\x5b\xe5\xbc\x48\xb2"
+ "\xb6\xdf\xcd\xa0\x42\x06\xcc\xfb\xed\x60\x6f\xbc\x57\xac\x09\x45",
+ .expected_a_public =
+ "\x8b\xdb\xc1\xf7\xc6\xba\xa1\x38\x95\x6a\xa1\xb6\x04\x5e\xae\x52"
+ "\x72\xfc\xef\x2d\x9d\x71\x05\x9c\xd3\x02\xa9\xfb\x55\x0f\xfa\xc9"
+ "\xb4\x34\x51\xa3\x28\x89\x8d\x93\x92\xcb\xd9\xb5\xb9\x66\xfc\x67"
+ "\x15\x92\x6f\x73\x85\x15\xe2\xfc\x11\x6b\x97\x8c\x4b\x0f\x12\xfa"
+ "\x8d\x72\x76\x9b\x8f\x3b\xfe\x31\xbe\x42\x88\x4c\xd2\xb2\x70\xa6"
+ "\xa5\xe3\x7e\x73\x07\x12\x36\xaa\xc9\x5c\x83\xe1\xf1\x46\x41\x4f"
+ "\x7c\x52\xaf\xdc\xa4\xe6\x82\xa3\x86\x83\x47\x5a\x12\x3a\x0c\xe3"
+ "\xdd\xdb\x94\x03\x2a\x59\x91\xa0\x19\xe5\xf8\x07\xdd\x54\x6a\x22"
+ "\x43\xb7\xf3\x74\xd7\xb9\x30\xfe\x9c\xe8\xd1\xcf\x06\x43\x68\xb9"
+ "\x54\x8f\x54\xa2\xe5\x3c\xf2\xc3\x4c\xee\xd4\x7c\x5d\x0e\xb1\x7b"
+ "\x16\x68\xb5\xb3\x7d\xd4\x11\x83\x5c\x77\x17\xc4\xf0\x59\x76\x7a"
+ "\x83\x40\xe5\xd9\x4c\x76\x23\x5b\x17\x6d\xee\x4a\x92\x68\x4b\x89"
+ "\xa0\x6d\x23\x8c\x80\x31\x33\x3a\x12\xf4\x50\xa6\xcb\x13\x97\x01"
+ "\xb8\x2c\xe6\xd2\x38\xdf\xd0\x7f\xc6\x27\x19\x0e\xb2\x07\xfd\x1f"
+ "\x1b\x9c\x1b\x87\xf9\x73\x6a\x3f\x7f\xb0\xf9\x2f\x3c\x19\x9f\xc9"
+ "\x8f\x97\x21\x0e\x8e\xbb\x1a\x17\x20\x15\xdd\xc6\x42\x60\xae\x4d",
+ .expected_ss =
+ "\xf3\x0e\x64\x7b\x66\xd7\x82\x7e\xab\x7e\x4a\xbe\x13\x6f\x43\x3d"
+ "\xea\x4f\x1f\x8b\x9d\x41\x56\x71\xe1\x06\x96\x02\x68\xfa\x44\x6e"
+ "\xe7\xf2\x26\xd4\x01\x4a\xf0\x28\x25\x76\xad\xd7\xe0\x17\x74\xfe"
+ "\xf9\xe1\x6d\xd3\xf7\xc7\xdf\xc0\x62\xa5\xf3\x4e\x1b\x5c\x77\x2a"
+ "\xfb\x0b\x87\xc3\xde\x1e\xc1\xe0\xd3\x7a\xb8\x02\x02\xec\x9c\x97"
+ "\xfb\x34\xa0\x20\x10\x23\x87\xb2\x9a\x72\xe3\x3d\xb2\x18\x50\xf3"
+ "\x6a\xd3\xd3\x19\xc4\x36\xd5\x59\xd6\xd6\xa7\x5c\xc3\xf9\x09\x33"
+ "\xa1\xf5\xb9\x4b\xf3\x0b\xe1\x4f\x79\x6b\x45\xf2\xec\x8b\xe5\x69"
+ "\x9f\xc6\x05\x01\xfe\x3a\x13\xfd\x6d\xea\x03\x83\x29\x7c\x7f\xf5"
+ "\x41\x55\x95\xde\x7e\x62\xae\xaf\x28\xdb\x7c\xa9\x90\x1e\xb2\xb1"
+ "\x1b\xef\xf1\x2e\xde\x47\xaa\xa8\x92\x9a\x49\x3d\xc0\xe0\x8d\xbb"
+ "\x0c\x42\x86\xaf\x00\xce\xb0\xab\x22\x7c\xe9\xbe\xb9\x72\x2f\xcf"
+ "\x5e\x5d\x62\x52\x2a\xd1\xfe\xcc\xa2\xf3\x40\xfd\x01\xa7\x54\x0a"
+ "\xa1\xfb\x1c\xf2\x44\xa6\x47\x30\x5a\xba\x2a\x05\xff\xd0\x6c\xab"
+ "\xeb\xe6\x8f\xf6\xd7\x73\xa3\x0e\x6c\x0e\xcf\xfd\x8e\x16\x5d\xe0"
+ "\x2c\x11\x05\x82\x3c\x22\x16\x6c\x52\x61\xcf\xbb\xff\xf8\x06\xd0",
+ .secret_size = 272,
+ .b_public_size = 256,
+ .expected_a_public_size = 256,
+ .expected_ss_size = 256,
+ },
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x00" /* len */
+ "\x00\x00\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00", /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x00\x10" /* len */
+ "\x00\x00\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00", /* g_size */
+#endif
+ .b_secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x01" /* len */
+ "\x00\x01\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x01\x10" /* len */
+ "\x00\x00\x01\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#endif
+ /* xa */
+ "\x23\x7d\xd0\x06\xfd\x7a\xe5\x7a\x08\xda\x98\x31\xc0\xb3\xd5\x85"
+ "\xe2\x0d\x2a\x91\x5f\x78\x4b\xa6\x62\xd0\xa6\x35\xd4\xef\x86\x39"
+ "\xf1\xdb\x71\x5e\xb0\x11\x2e\xee\x91\x3a\xaa\xf9\xe3\xdf\x8d\x8b"
+ "\x48\x41\xde\xe8\x78\x53\xc5\x5f\x93\xd2\x79\x0d\xbe\x8d\x83\xe8"
+ "\x8f\x00\xd2\xde\x13\x18\x04\x05\x20\x6d\xda\xfa\x1d\x0b\x24\x52"
+ "\x3a\x18\x2b\xe1\x1e\xae\x15\x3b\x0f\xaa\x09\x09\xf6\x01\x98\xe9"
+ "\x81\x5d\x6b\x83\x6e\x55\xf1\x5d\x6f\x6f\x0d\x9d\xa8\x72\x32\x63"
+ "\x60\xe6\x0b\xc5\x22\xe2\xf9\x46\x58\xa2\x1c\x2a\xb0\xd5\xaf\xe3"
+ "\x5b\x03\xb7\x36\xb7\xba\x55\x20\x08\x7c\x51\xd4\x89\x42\x9c\x14"
+ "\x23\xe2\x71\x3e\x15\x2a\x0d\x34\x8a\xde\xad\x84\x11\x15\x72\x18"
+ "\x42\x43\x0a\xe2\x58\x29\xb3\x90\x0f\x56\xd8\x8a\x0f\x0e\xbc\x0e"
+ "\x9c\xe7\xd5\xe6\x5b\xbf\x06\x64\x38\x12\xa5\x8d\x5b\x68\x34\xdd"
+ "\x75\x48\xc9\xa7\xa3\x58\x5a\x1c\xe1\xb2\xc5\xe3\x39\x03\xcf\xab"
+ "\xc2\x14\x07\xaf\x55\x80\xc7\x63\xe4\x03\xeb\xe9\x0a\x25\x61\x85"
+ "\x1d\x0e\x81\x52\x7b\xbc\x4a\x0c\xc8\x59\x6a\xac\x18\xfb\x8c\x0c"
+ "\xb4\x79\xbd\xa1\x4c\xbb\x02\xc9\xd5\x13\x88\x3d\x25\xaa\x77\x49",
+ .b_public =
+ "\x8b\xdb\xc1\xf7\xc6\xba\xa1\x38\x95\x6a\xa1\xb6\x04\x5e\xae\x52"
+ "\x72\xfc\xef\x2d\x9d\x71\x05\x9c\xd3\x02\xa9\xfb\x55\x0f\xfa\xc9"
+ "\xb4\x34\x51\xa3\x28\x89\x8d\x93\x92\xcb\xd9\xb5\xb9\x66\xfc\x67"
+ "\x15\x92\x6f\x73\x85\x15\xe2\xfc\x11\x6b\x97\x8c\x4b\x0f\x12\xfa"
+ "\x8d\x72\x76\x9b\x8f\x3b\xfe\x31\xbe\x42\x88\x4c\xd2\xb2\x70\xa6"
+ "\xa5\xe3\x7e\x73\x07\x12\x36\xaa\xc9\x5c\x83\xe1\xf1\x46\x41\x4f"
+ "\x7c\x52\xaf\xdc\xa4\xe6\x82\xa3\x86\x83\x47\x5a\x12\x3a\x0c\xe3"
+ "\xdd\xdb\x94\x03\x2a\x59\x91\xa0\x19\xe5\xf8\x07\xdd\x54\x6a\x22"
+ "\x43\xb7\xf3\x74\xd7\xb9\x30\xfe\x9c\xe8\xd1\xcf\x06\x43\x68\xb9"
+ "\x54\x8f\x54\xa2\xe5\x3c\xf2\xc3\x4c\xee\xd4\x7c\x5d\x0e\xb1\x7b"
+ "\x16\x68\xb5\xb3\x7d\xd4\x11\x83\x5c\x77\x17\xc4\xf0\x59\x76\x7a"
+ "\x83\x40\xe5\xd9\x4c\x76\x23\x5b\x17\x6d\xee\x4a\x92\x68\x4b\x89"
+ "\xa0\x6d\x23\x8c\x80\x31\x33\x3a\x12\xf4\x50\xa6\xcb\x13\x97\x01"
+ "\xb8\x2c\xe6\xd2\x38\xdf\xd0\x7f\xc6\x27\x19\x0e\xb2\x07\xfd\x1f"
+ "\x1b\x9c\x1b\x87\xf9\x73\x6a\x3f\x7f\xb0\xf9\x2f\x3c\x19\x9f\xc9"
+ "\x8f\x97\x21\x0e\x8e\xbb\x1a\x17\x20\x15\xdd\xc6\x42\x60\xae\x4d",
+ .secret_size = 16,
+ .b_secret_size = 272,
+ .b_public_size = 256,
+ .expected_a_public_size = 256,
+ .expected_ss_size = 256,
+ .genkey = true,
+ },
+};
+
+static const struct kpp_testvec ffdhe3072_dh_tv_template[] __maybe_unused = {
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x90\x01" /* len */
+ "\x80\x01\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x01\x90" /* len */
+ "\x00\x00\x01\x80" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#endif
+ /* xa */
+ "\x6b\xb4\x97\x23\xfa\xc8\x5e\xa9\x7b\x63\xe7\x3e\x0e\x99\xc3\xb9"
+ "\xda\xb7\x48\x0d\xc3\xb1\xbf\x4f\x17\xc7\xa9\x51\xf6\x64\xff\xc4"
+ "\x31\x58\x87\x25\x83\x2c\x00\xf0\x41\x29\xf7\xee\xf9\xe6\x36\x76"
+ "\xd6\x3a\x24\xbe\xa7\x07\x0b\x93\xc7\x9f\x6c\x75\x0a\x26\x75\x76"
+ "\xe3\x0c\x42\xe0\x00\x04\x69\xd9\xec\x0b\x59\x54\x28\x8f\xd7\x9a"
+ "\x63\xf4\x5b\xdf\x85\x65\xc4\xe1\x95\x27\x4a\x42\xad\x36\x47\xa9"
+ "\x0a\xf8\x14\x1c\xf3\x94\x3b\x7e\x47\x99\x35\xa8\x18\xec\x70\x10"
+ "\xdf\xcb\xd2\x78\x88\xc1\x2d\x59\x93\xc1\xa4\x6d\xd7\x1d\xb9\xd5"
+ "\xf8\x30\x06\x7f\x98\x90\x0c\x74\x5e\x89\x2f\x64\x5a\xad\x5f\x53"
+ "\xb2\xa3\xa8\x83\xbf\xfc\x37\xef\xb8\x36\x0a\x5c\x62\x81\x64\x74"
+ "\x16\x2f\x45\x39\x2a\x91\x26\x87\xc0\x12\xcc\x75\x11\xa3\xa1\xc5"
+ "\xae\x20\xcf\xcb\x20\x25\x6b\x7a\x31\x93\x9d\x38\xb9\x57\x72\x46"
+ "\xd4\x84\x65\x87\xf1\xb5\xd3\xab\xfc\xc3\x4d\x40\x92\x94\x1e\xcd"
+ "\x1c\x87\xec\x3f\xcd\xbe\xd0\x95\x6b\x40\x02\xdd\x62\xeb\x0a\xda"
+ "\x4f\xbe\x8e\x32\x48\x8b\x6d\x83\xa0\x96\x62\x23\xec\x83\x91\x44"
+ "\xf9\x72\x01\xac\xa0\xe4\x72\x1d\x5a\x75\x05\x57\x90\xae\x7e\xb4"
+ "\x71\x39\x01\x05\xdc\xe9\xee\xcb\xf0\x61\x28\x91\x69\x8c\x31\x03"
+ "\x7a\x92\x15\xa1\x58\x67\x3d\x70\x82\xa6\x2c\xfe\x10\x56\x58\xd3"
+ "\x94\x67\xe1\xbe\xee\xc1\x64\x5c\x4b\xc8\x28\x3d\xc5\x66\x3a\xab"
+ "\x22\xc1\x7e\xa1\xbb\xf3\x19\x3b\xda\x46\x82\x45\xd4\x3c\x7c\xc6"
+ "\xce\x1f\x7f\x95\xa2\x17\xff\x88\xba\xd6\x4d\xdb\xd2\xea\xde\x39"
+ "\xd6\xa5\x18\x73\xbb\x64\x6e\x79\xe9\xdc\x3f\x92\x7f\xda\x1f\x49"
+ "\x33\x70\x65\x73\xa2\xd9\x06\xb8\x1b\x29\x29\x1a\xe0\xa3\xe6\x05"
+ "\x9a\xa8\xc2\x4e\x7a\x78\x1d\x22\x57\x21\xc8\xa3\x8d\x66\x3e\x23",
+ .b_public =
+ "\x73\x40\x8b\xce\xe8\x6a\x1c\x03\x50\x54\x42\x36\x22\xc6\x1d\xe8"
+ "\xe1\xef\x5c\x89\xa5\x55\xc1\xc4\x1c\xd7\x4f\xee\x5d\xba\x62\x60"
+ "\xfe\x93\x2f\xfd\x93\x2c\x8f\x70\xc6\x47\x17\x25\xb2\x95\xd7\x7d"
+ "\x41\x81\x4d\x52\x1c\xbe\x4d\x57\x3e\x26\x51\x28\x03\x8f\x67\xf5"
+ "\x22\x16\x1c\x67\xf7\x62\xcb\xfd\xa3\xee\x8d\xe0\xfa\x15\x9a\x53"
+ "\xbe\x7b\x9f\xc0\x12\x7a\xfc\x5e\x77\x2d\x60\x06\xba\x71\xc5\xca"
+ "\xd7\x26\xaf\x3b\xba\x6f\xd3\xc4\x82\x57\x19\x26\xb0\x16\x7b\xbd"
+ "\x83\xf2\x21\x03\x79\xff\x0a\x6f\xc5\x7b\x00\x15\xad\x5b\xf4\x42"
+ "\x1f\xcb\x7f\x3d\x34\x77\x3c\xc3\xe0\x38\xa5\x40\x51\xbe\x6f\xd9"
+ "\xc9\x77\x9c\xfc\x0d\xc1\x8e\xef\x0f\xaa\x5e\xa8\xbb\x16\x4a\x3e"
+ "\x26\x55\xae\xc1\xb6\x3e\xfd\x73\xf7\x59\xd2\xe5\x4b\x91\x8e\x28"
+ "\x77\x1e\x5a\xe2\xcd\xce\x92\x35\xbb\x1e\xbb\xcf\x79\x94\xdf\x31"
+ "\xde\x31\xa8\x75\xf6\xe0\xaa\x2e\xe9\x4f\x44\xc8\xba\xb9\xab\x80"
+ "\x29\xa1\xea\x58\x2e\x40\x96\xa0\x1a\xf5\x2c\x38\x47\x43\x5d\x26"
+ "\x2c\xd8\xad\xea\xd3\xad\xe8\x51\x49\xad\x45\x2b\x25\x7c\xde\xe4"
+ "\xaf\x03\x2a\x39\x26\x86\x66\x10\xbc\xa8\x71\xda\xe0\xe8\xf1\xdd"
+ "\x50\xff\x44\xb2\xd3\xc7\xff\x66\x63\xf6\x42\xe3\x97\x9d\x9e\xf4"
+ "\xa6\x89\xb9\xab\x12\x17\xf2\x85\x56\x9c\x6b\x24\x71\x83\x57\x7d"
+ "\x3c\x7b\x2b\x88\x92\x19\xd7\x1a\x00\xd5\x38\x94\x43\x60\x4d\xa7"
+ "\x12\x9e\x0d\xf6\x5c\x9a\xd3\xe2\x9e\xb1\x21\xe8\xe2\x9e\xe9\x1e"
+ "\x9d\xa5\x94\x95\xa6\x3d\x12\x15\xd8\x8b\xac\xe0\x8c\xde\xe6\x40"
+ "\x98\xaa\x5e\x55\x4f\x3d\x86\x87\x0d\xe3\xc6\x68\x15\xe6\xde\x17"
+ "\x78\x21\xc8\x6c\x06\xc7\x94\x56\xb4\xaf\xa2\x35\x0b\x0c\x97\xd7"
+ "\xa4\x12\xee\xf4\xd2\xef\x80\x28\xb3\xee\xe9\x15\x8b\x01\x32\x79",
+ .expected_a_public =
+ "\x1b\x6a\xba\xea\xa3\xcc\x50\x69\xa9\x41\x89\xaf\x04\xe1\x44\x22"
+ "\x97\x20\xd1\xf6\x1e\xcb\x64\x36\x6f\xee\x0b\x16\xc1\xd9\x91\xbe"
+ "\x57\xc8\xd9\xf2\xa1\x96\x91\xec\x41\xc7\x79\x00\x1a\x48\x25\x55"
+ "\xbe\xf3\x20\x8c\x38\xc6\x7b\xf2\x8b\x5a\xc3\xb5\x87\x0a\x86\x3d"
+ "\xb7\xd6\xce\xb0\x96\x2e\x5d\xc4\x00\x5e\x42\xe4\xe5\x50\x4f\xb8"
+ "\x6f\x18\xa4\xe1\xd3\x20\xfc\x3c\xf5\x0a\xff\x23\xa6\x5b\xb4\x17"
+ "\x3e\x7b\xdf\xb9\xb5\x3c\x1b\x76\x29\xcd\xb4\x46\x4f\x27\x8f\xd2"
+ "\xe8\x27\x66\xdb\xe8\xb3\xf5\xe1\xd0\x04\xcd\x89\xff\xba\x76\x67"
+ "\xe8\x4d\xcf\x86\x1c\x8a\xd1\xcf\x99\x27\xfb\xa9\x78\xcc\x94\xaf"
+ "\x3d\x04\xfd\x25\xc0\x47\xfa\x29\x80\x05\xf4\xde\xad\xdb\xab\x12"
+ "\xb0\x2b\x8e\xca\x02\x06\x6d\xad\x3e\x09\xb1\x22\xa3\xf5\x4c\x6d"
+ "\x69\x99\x58\x8b\xd8\x45\x2e\xe0\xc9\x3c\xf7\x92\xce\x21\x90\x6b"
+ "\x3b\x65\x9f\x64\x79\x8d\x67\x22\x1a\x37\xd3\xee\x51\xe2\xe7\x5a"
+ "\x93\x51\xaa\x3c\x4b\x04\x16\x32\xef\xe3\x66\xbe\x18\x94\x88\x64"
+ "\x79\xce\x06\x3f\xb8\xd6\xee\xdc\x13\x79\x6f\x20\x14\xc2\x6b\xce"
+ "\xc8\xda\x42\xa5\x93\x5b\xe4\x7f\x1a\xe6\xda\x0f\xb3\xc1\x5f\x30"
+ "\x50\x76\xe8\x37\x3d\xca\x77\x2c\xa8\xe4\x3b\xf9\x6f\xe0\x17\xed"
+ "\x0e\xef\xb7\x31\x14\xb5\xea\xd9\x39\x22\x89\xb6\x40\x57\xcc\x84"
+ "\xef\x73\xa7\xe9\x27\x21\x85\x89\xfa\xaf\x03\xda\x9c\x8b\xfd\x52"
+ "\x7d\xb0\xa4\xe4\xf9\xd8\x90\x55\xc4\x39\xd6\x9d\xaf\x3b\xce\xac"
+ "\xaa\x36\x14\x7a\x9b\x8b\x12\x43\xe1\xca\x61\xae\x46\x5b\xe7\xe5"
+ "\x88\x32\x80\xa0\x2d\x51\xbb\x2f\xea\xeb\x3c\x71\xb2\xae\xce\xca"
+ "\x61\xd2\x76\xe0\x45\x46\x78\x4e\x09\x2d\xc2\x54\xc2\xa9\xc7\xa8"
+ "\x55\x8e\x72\xa4\x8b\x8a\xc9\x01\xdb\xe9\x58\x11\xa1\xc4\xe7\x12",
+ .expected_ss =
+ "\x47\x8e\xb2\x19\x09\xf0\x46\x99\x6b\x41\x86\xf7\x34\xad\xbf\x2a"
+ "\x18\x1b\x7d\xec\xa9\xb2\x47\x2f\x40\xfb\x9a\x64\x30\x44\xf3\x4c"
+ "\x01\x67\xad\x57\x5a\xbc\xd4\xc8\xef\x7e\x8a\x14\x74\x1d\x6d\x8c"
+ "\x7b\xce\xc5\x57\x5f\x95\xe8\x72\xba\xdf\xa3\xcd\x00\xbe\x09\x4c"
+ "\x06\x72\xe7\x17\xb0\xe5\xe5\xb7\x20\xa5\xcb\xd9\x68\x99\xad\x3f"
+ "\xde\xf3\xde\x1d\x1c\x00\x74\xd2\xd1\x57\x55\x5d\xce\x76\x0c\xc4"
+ "\x7a\xc4\x65\x7c\x19\x17\x0a\x09\x66\x7d\x3a\xab\xf7\x61\x3a\xe3"
+ "\x5b\xac\xcf\x69\xb0\x8b\xee\x5d\x28\x36\xbb\x3f\x74\xce\x6e\x38"
+ "\x1e\x39\xab\x26\xca\x89\xdc\x58\x59\xcb\x95\xe4\xbc\xd6\x19\x48"
+ "\xd0\x55\x68\x7b\xb4\x27\x95\x3c\xd9\x58\x10\x4f\x8f\x55\x1c\x3f"
+ "\x04\xce\x89\x1f\x82\x28\xe9\x48\x17\x47\x8f\xee\xb7\x8f\xeb\xb1"
+ "\x29\xa8\x23\x18\x73\x33\x9f\x83\x08\xca\xcd\x54\x6e\xca\xec\x78"
+ "\x7b\x16\x83\x3f\xdb\x0a\xef\xfd\x87\x94\x19\x08\x6e\x6e\x22\x57"
+ "\xd7\xd2\x79\xf9\xf6\xeb\xe0\x6c\x93\x9d\x95\xfa\x41\x7a\xa9\xd6"
+ "\x2a\xa3\x26\x9b\x24\x1b\x8b\xa0\xed\x04\xb2\xe4\x6c\x4e\xc4\x3f"
+ "\x61\xe5\xe0\x4d\x09\x28\xaf\x58\x35\x25\x0b\xd5\x38\x18\x69\x51"
+ "\x18\x51\x73\x7b\x28\x19\x9f\xe4\x69\xfc\x2c\x25\x08\x99\x8f\x62"
+ "\x65\x62\xa5\x28\xf1\xf4\xfb\x02\x29\x27\xb0\x5e\xbb\x4f\xf9\x1a"
+ "\xa7\xc4\x38\x63\x5b\x01\xfe\x00\x66\xe3\x47\x77\x21\x85\x17\xd5"
+ "\x34\x19\xd3\x87\xab\x44\x62\x08\x59\xb2\x6b\x1f\x21\x0c\x23\x84"
+ "\xf7\xba\x92\x67\xf9\x16\x85\x6a\xe0\xeb\xe7\x4f\x06\x80\x81\x81"
+ "\x28\x9c\xe8\x2e\x71\x97\x48\xe0\xd1\xbc\xce\xe9\x42\x2c\x89\xdf"
+ "\x0b\xa9\xa1\x07\x84\x33\x78\x7f\x49\x2f\x1c\x55\xc3\x7f\xc3\x37"
+ "\x40\xdf\x13\xf4\xa0\x21\x79\x6e\x3a\xe3\xb8\x23\x9e\x8a\x6e\x9c",
+ .secret_size = 400,
+ .b_public_size = 384,
+ .expected_a_public_size = 384,
+ .expected_ss_size = 384,
+ },
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x00" /* len */
+ "\x00\x00\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00", /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x00\x10" /* len */
+ "\x00\x00\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00", /* g_size */
+#endif
+ .b_secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x90\x01" /* len */
+ "\x80\x01\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x01\x90" /* len */
+ "\x00\x00\x01\x80" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#endif
+ /* xa */
+ "\x6b\xb4\x97\x23\xfa\xc8\x5e\xa9\x7b\x63\xe7\x3e\x0e\x99\xc3\xb9"
+ "\xda\xb7\x48\x0d\xc3\xb1\xbf\x4f\x17\xc7\xa9\x51\xf6\x64\xff\xc4"
+ "\x31\x58\x87\x25\x83\x2c\x00\xf0\x41\x29\xf7\xee\xf9\xe6\x36\x76"
+ "\xd6\x3a\x24\xbe\xa7\x07\x0b\x93\xc7\x9f\x6c\x75\x0a\x26\x75\x76"
+ "\xe3\x0c\x42\xe0\x00\x04\x69\xd9\xec\x0b\x59\x54\x28\x8f\xd7\x9a"
+ "\x63\xf4\x5b\xdf\x85\x65\xc4\xe1\x95\x27\x4a\x42\xad\x36\x47\xa9"
+ "\x0a\xf8\x14\x1c\xf3\x94\x3b\x7e\x47\x99\x35\xa8\x18\xec\x70\x10"
+ "\xdf\xcb\xd2\x78\x88\xc1\x2d\x59\x93\xc1\xa4\x6d\xd7\x1d\xb9\xd5"
+ "\xf8\x30\x06\x7f\x98\x90\x0c\x74\x5e\x89\x2f\x64\x5a\xad\x5f\x53"
+ "\xb2\xa3\xa8\x83\xbf\xfc\x37\xef\xb8\x36\x0a\x5c\x62\x81\x64\x74"
+ "\x16\x2f\x45\x39\x2a\x91\x26\x87\xc0\x12\xcc\x75\x11\xa3\xa1\xc5"
+ "\xae\x20\xcf\xcb\x20\x25\x6b\x7a\x31\x93\x9d\x38\xb9\x57\x72\x46"
+ "\xd4\x84\x65\x87\xf1\xb5\xd3\xab\xfc\xc3\x4d\x40\x92\x94\x1e\xcd"
+ "\x1c\x87\xec\x3f\xcd\xbe\xd0\x95\x6b\x40\x02\xdd\x62\xeb\x0a\xda"
+ "\x4f\xbe\x8e\x32\x48\x8b\x6d\x83\xa0\x96\x62\x23\xec\x83\x91\x44"
+ "\xf9\x72\x01\xac\xa0\xe4\x72\x1d\x5a\x75\x05\x57\x90\xae\x7e\xb4"
+ "\x71\x39\x01\x05\xdc\xe9\xee\xcb\xf0\x61\x28\x91\x69\x8c\x31\x03"
+ "\x7a\x92\x15\xa1\x58\x67\x3d\x70\x82\xa6\x2c\xfe\x10\x56\x58\xd3"
+ "\x94\x67\xe1\xbe\xee\xc1\x64\x5c\x4b\xc8\x28\x3d\xc5\x66\x3a\xab"
+ "\x22\xc1\x7e\xa1\xbb\xf3\x19\x3b\xda\x46\x82\x45\xd4\x3c\x7c\xc6"
+ "\xce\x1f\x7f\x95\xa2\x17\xff\x88\xba\xd6\x4d\xdb\xd2\xea\xde\x39"
+ "\xd6\xa5\x18\x73\xbb\x64\x6e\x79\xe9\xdc\x3f\x92\x7f\xda\x1f\x49"
+ "\x33\x70\x65\x73\xa2\xd9\x06\xb8\x1b\x29\x29\x1a\xe0\xa3\xe6\x05"
+ "\x9a\xa8\xc2\x4e\x7a\x78\x1d\x22\x57\x21\xc8\xa3\x8d\x66\x3e\x23",
+ .b_public =
+ "\x1b\x6a\xba\xea\xa3\xcc\x50\x69\xa9\x41\x89\xaf\x04\xe1\x44\x22"
+ "\x97\x20\xd1\xf6\x1e\xcb\x64\x36\x6f\xee\x0b\x16\xc1\xd9\x91\xbe"
+ "\x57\xc8\xd9\xf2\xa1\x96\x91\xec\x41\xc7\x79\x00\x1a\x48\x25\x55"
+ "\xbe\xf3\x20\x8c\x38\xc6\x7b\xf2\x8b\x5a\xc3\xb5\x87\x0a\x86\x3d"
+ "\xb7\xd6\xce\xb0\x96\x2e\x5d\xc4\x00\x5e\x42\xe4\xe5\x50\x4f\xb8"
+ "\x6f\x18\xa4\xe1\xd3\x20\xfc\x3c\xf5\x0a\xff\x23\xa6\x5b\xb4\x17"
+ "\x3e\x7b\xdf\xb9\xb5\x3c\x1b\x76\x29\xcd\xb4\x46\x4f\x27\x8f\xd2"
+ "\xe8\x27\x66\xdb\xe8\xb3\xf5\xe1\xd0\x04\xcd\x89\xff\xba\x76\x67"
+ "\xe8\x4d\xcf\x86\x1c\x8a\xd1\xcf\x99\x27\xfb\xa9\x78\xcc\x94\xaf"
+ "\x3d\x04\xfd\x25\xc0\x47\xfa\x29\x80\x05\xf4\xde\xad\xdb\xab\x12"
+ "\xb0\x2b\x8e\xca\x02\x06\x6d\xad\x3e\x09\xb1\x22\xa3\xf5\x4c\x6d"
+ "\x69\x99\x58\x8b\xd8\x45\x2e\xe0\xc9\x3c\xf7\x92\xce\x21\x90\x6b"
+ "\x3b\x65\x9f\x64\x79\x8d\x67\x22\x1a\x37\xd3\xee\x51\xe2\xe7\x5a"
+ "\x93\x51\xaa\x3c\x4b\x04\x16\x32\xef\xe3\x66\xbe\x18\x94\x88\x64"
+ "\x79\xce\x06\x3f\xb8\xd6\xee\xdc\x13\x79\x6f\x20\x14\xc2\x6b\xce"
+ "\xc8\xda\x42\xa5\x93\x5b\xe4\x7f\x1a\xe6\xda\x0f\xb3\xc1\x5f\x30"
+ "\x50\x76\xe8\x37\x3d\xca\x77\x2c\xa8\xe4\x3b\xf9\x6f\xe0\x17\xed"
+ "\x0e\xef\xb7\x31\x14\xb5\xea\xd9\x39\x22\x89\xb6\x40\x57\xcc\x84"
+ "\xef\x73\xa7\xe9\x27\x21\x85\x89\xfa\xaf\x03\xda\x9c\x8b\xfd\x52"
+ "\x7d\xb0\xa4\xe4\xf9\xd8\x90\x55\xc4\x39\xd6\x9d\xaf\x3b\xce\xac"
+ "\xaa\x36\x14\x7a\x9b\x8b\x12\x43\xe1\xca\x61\xae\x46\x5b\xe7\xe5"
+ "\x88\x32\x80\xa0\x2d\x51\xbb\x2f\xea\xeb\x3c\x71\xb2\xae\xce\xca"
+ "\x61\xd2\x76\xe0\x45\x46\x78\x4e\x09\x2d\xc2\x54\xc2\xa9\xc7\xa8"
+ "\x55\x8e\x72\xa4\x8b\x8a\xc9\x01\xdb\xe9\x58\x11\xa1\xc4\xe7\x12",
+ .secret_size = 16,
+ .b_secret_size = 400,
+ .b_public_size = 384,
+ .expected_a_public_size = 384,
+ .expected_ss_size = 384,
+ .genkey = true,
+ },
+};
+
+static const struct kpp_testvec ffdhe4096_dh_tv_template[] __maybe_unused = {
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x02" /* len */
+ "\x00\x02\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x02\x10" /* len */
+ "\x00\x00\x02\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#endif
+ /* xa */
+ "\x1a\x48\xf3\x6c\x61\x03\x42\x43\xd7\x42\x3b\xfa\xdb\x55\x6f\xa2"
+ "\xe1\x79\x52\x0b\x47\xc5\x03\x60\x2f\x26\xb9\x1a\x14\x15\x1a\xd9"
+ "\xe0\xbb\xa7\x82\x63\x41\xec\x26\x55\x00\xab\xe5\x21\x9d\x31\x14"
+ "\x0e\xe2\xc2\xb2\xb8\x37\xe6\xc3\x5a\xab\xae\x25\xdb\x71\x1e\xed"
+ "\xe8\x75\x9a\x04\xa7\x92\x2a\x99\x7e\xc0\x5b\x64\x75\x7f\xe5\xb5"
+ "\xdb\x6c\x95\x4f\xe9\xdc\x39\x76\x79\xb0\xf7\x00\x30\x8e\x86\xe7"
+ "\x36\xd1\xd2\x0c\x68\x7b\x94\xe9\x91\x85\x08\x86\xbc\x64\x87\xd2"
+ "\xf5\x5b\xaf\x03\xf6\x5f\x28\x25\xf1\xa3\x20\x5c\x1b\xb5\x26\x45"
+ "\x9a\x47\xab\xd6\xad\x49\xab\x92\x8e\x62\x6f\x48\x31\xea\xf6\x76"
+ "\xff\xa2\xb6\x28\x78\xef\x59\xc3\x71\x5d\xa8\xd9\x70\x89\xcc\xe2"
+ "\x63\x58\x5e\x3a\xa2\xa2\x88\xbf\x77\x20\x84\x33\x65\x64\x4e\x73"
+ "\xe5\x08\xd5\x89\x23\xd6\x07\xac\x29\x65\x2e\x02\xa8\x35\x96\x48"
+ "\xe7\x5d\x43\x6a\x42\xcc\xda\x98\xc4\x75\x90\x2e\xf6\xc4\xbf\xd4"
+ "\xbc\x31\x14\x0d\x54\x30\x11\xb2\xc9\xcf\xbb\xba\xbc\xc6\xf2\xcf"
+ "\xfe\x4a\x9d\xf3\xec\x78\x5d\x5d\xb4\x99\xd0\x67\x0f\x5a\x21\x1c"
+ "\x7b\x95\x2b\xcf\x49\x44\x94\x05\x1a\x21\x81\x25\x7f\xe3\x8a\x2a"
+ "\xdd\x88\xac\x44\x94\x23\x20\x3b\x75\xf6\x2a\x8a\x45\xf8\xb5\x1f"
+ "\xb9\x8b\xeb\xab\x9b\x38\x23\x26\xf1\x0f\x34\x47\x4f\x7f\xe1\x9e"
+ "\x84\x84\x78\xe5\xe3\x49\xeb\xcc\x2f\x02\x85\xa4\x18\x91\xde\x1a"
+ "\x60\x54\x33\x81\xd5\xae\xdb\x23\x9c\x4d\xa4\xdb\x22\x5b\xdf\xf4"
+ "\x8e\x05\x2b\x60\xba\xe8\x75\xfc\x34\x99\xcf\x35\xe1\x06\xba\xdc"
+ "\x79\x2a\x5e\xec\x1c\xbe\x79\x33\x63\x1c\xe7\x5f\x1e\x30\xd6\x1b"
+ "\xdb\x11\xb8\xea\x63\xff\xfe\x1a\x3c\x24\xf4\x78\x9c\xcc\x5d\x9a"
+ "\xc9\x2d\xc4\x9a\xd4\xa7\x65\x84\x98\xdb\x66\x76\xf0\x34\x31\x9f"
+ "\xce\xb5\xfb\x28\x07\xde\x1e\x0d\x9b\x01\x64\xeb\x2a\x37\x2f\x20"
+ "\xa5\x95\x72\x2b\x54\x51\x59\x91\xea\x50\x54\x0f\x2e\xb0\x1d\xf6"
+ "\xb9\x46\x43\xf9\xd0\x13\x21\x20\x47\x61\x1a\x1c\x30\xc6\x9e\x75"
+ "\x22\xe4\xf2\xb1\xab\x01\xdc\x5b\x3c\x1e\xa2\x6d\xc0\xb9\x9a\x2a"
+ "\x84\x61\xea\x85\x63\xa0\x77\xd0\xeb\x20\x68\xd5\x95\x6a\x1b\x8f"
+ "\x1f\x9a\xba\x44\x49\x8c\x77\xa6\xd9\xa0\x14\xf8\x7d\x9b\x4e\xfa"
+ "\xdc\x4f\x1c\x4d\x60\x50\x26\x7f\xd6\xc1\x91\x2b\xa6\x37\x5d\x94"
+ "\x69\xb2\x47\x59\xd6\xc3\x59\xbb\xd6\x9b\x71\x52\x85\x7a\xcb\x2d",
+ .b_public =
+ "\x24\x38\x02\x02\x2f\xeb\x54\xdd\x73\x21\x91\x4a\xd8\xa4\x0a\xbf"
+ "\xf4\xf5\x9a\x45\xb5\xcd\x42\xa3\x57\xcc\x65\x4a\x23\x2e\xee\x59"
+ "\xba\x6f\x14\x89\xae\x2e\x14\x0a\x72\x77\x23\x7f\x6c\x2e\xba\x52"
+ "\x3f\x71\xbf\xe4\x60\x03\x16\xaa\x61\xf5\x80\x1d\x8a\x45\x9e\x53"
+ "\x7b\x07\xd9\x7e\xfe\xaf\xcb\xda\xff\x20\x71\xba\x89\x39\x75\xc3"
+ "\xb3\x65\x0c\xb1\xa7\xfa\x4a\xe7\xe0\x85\xc5\x4e\x91\x47\x41\xf4"
+ "\xdd\xcd\xc5\x3d\x17\x12\xed\xee\xc0\x31\xb1\xaf\xc1\xd5\x3c\x07"
+ "\xa1\x5a\xc4\x05\x45\xe3\x10\x0c\xc3\x14\xae\x65\xca\x40\xae\x31"
+ "\x5c\x13\x0d\x32\x85\xa7\x6e\xf4\x5e\x29\x3d\x4e\xd3\xd7\x49\x58"
+ "\xe1\x73\xbb\x0a\x7b\xd6\x13\xea\x49\xd7\x20\x3d\x31\xaa\x77\xab"
+ "\x21\x74\xe9\x2f\xe9\x5e\xbe\x2f\xb4\xa2\x79\xf2\xbc\xcc\x51\x94"
+ "\xd2\x1d\xb2\xe6\xc5\x39\x66\xd7\xe5\x46\x75\x53\x76\xed\x49\xea"
+ "\x3b\xdd\x01\x27\xdb\x83\xa5\x9f\xd2\xee\xc8\xde\x9e\xde\xd2\xe7"
+ "\x99\xad\x9c\xe0\x71\x66\x29\xd8\x0d\xfe\xdc\xd1\xbc\xc7\x9a\xbe"
+ "\x8b\x26\x46\x57\xb6\x79\xfa\xad\x8b\x45\x2e\xb5\xe5\x89\x34\x01"
+ "\x93\x00\x9d\xe9\x58\x74\x8b\xda\x07\x92\xb5\x01\x4a\xe1\x44\x36"
+ "\xc7\x6c\xde\xc8\x7a\x17\xd0\xde\xee\x68\x92\xb5\xde\x21\x2b\x1c"
+ "\xbc\x65\x30\x1e\xae\x15\x3d\x9a\xaf\x20\xa3\xc4\x21\x70\xfb\x2f"
+ "\x36\x72\x31\xc0\xe8\x85\xdf\xc5\x50\x4c\x90\x10\x32\xa4\xc7\xee"
+ "\x59\x5a\x21\xf4\xf1\x33\xcf\xbe\xac\x67\xb1\x40\x7c\x0b\x3f\x64"
+ "\xe5\xd2\x2d\xb7\x7d\x0f\xce\xf7\x9b\x05\xee\x37\x61\xd2\x61\x9e"
+ "\x1a\x80\x2e\x79\xe6\x1b\x25\xb3\x61\x3d\x53\xe7\xe5\x97\x9a\xc2"
+ "\x39\xb1\xe3\x91\xc6\xee\x96\x2e\xa9\xb4\xb8\xad\xd8\x04\x3e\x11"
+ "\x31\x67\xb8\x6a\xcb\x6e\x1a\x4c\x7f\x74\xc7\x1f\x09\xd1\xd0\x6b"
+ "\x17\xde\xea\xe8\x0b\xe6\x6a\xee\x2f\xe3\x5b\x9c\x59\x5d\x00\x57"
+ "\xbf\x24\x25\xba\x22\x34\xb9\xc5\x3c\xc4\x57\x26\xd0\x6d\x89\xee"
+ "\x67\x79\x3c\x70\xf9\xc3\xb4\x30\xf0\x2e\xca\xfa\x74\x00\xd1\x00"
+ "\x6d\x03\x97\xd5\x08\x3f\x0b\x8e\xb8\x1d\xa3\x91\x7f\xa9\x3a\xf0"
+ "\x37\x57\x46\x87\x82\xa3\xb5\x8f\x51\xaa\xc7\x7b\xfe\x86\x26\xb9"
+ "\xfa\xe6\x1e\xee\x92\x9d\x3a\xed\x5b\x5e\x3f\xe5\xca\x5e\x13\x01"
+ "\xdd\x4c\x8d\x85\xf0\x60\x61\xb7\x60\x24\x83\x9f\xbe\x72\x21\x81"
+ "\x55\x7e\x7e\x6d\xf3\x28\xc8\x77\x5a\xae\x5a\x32\x86\xd5\x61\xad",
+ .expected_a_public =
+ "\x1f\xff\xd6\xc4\x59\xf3\x4a\x9e\x81\x74\x4d\x27\xa7\xc6\x6b\x35"
+ "\xd8\xf5\xb3\x24\x97\x82\xe7\x2e\xf3\x21\x91\x23\x2f\x3d\x57\x7f"
+ "\x15\x8c\x84\x71\xe7\x25\x35\xe8\x07\x14\x06\x4c\x83\xdc\x55\x4a"
+ "\xf8\x45\xc5\xe9\xfa\x6e\xae\x6e\xcf\x4d\x11\x91\x26\x16\x6f\x86"
+ "\x89\x78\xaa\xb4\x25\x54\xb2\x74\x07\xe5\x26\x26\x0c\xad\xa4\x57"
+ "\x59\x61\x66\x71\x43\x22\xff\x49\x51\xa4\x76\x0e\x55\x7b\x60\x45"
+ "\x4f\xaf\xbd\x9c\xec\x64\x3f\x80\x0b\x0c\x31\x41\xf0\xfe\x2c\xb7"
+ "\x0a\xbe\xa5\x71\x08\x0d\x8d\x1e\x8a\x77\x9a\xd2\x90\x31\x96\xd0"
+ "\x3b\x31\xdc\xc6\x18\x59\x43\xa1\x19\x5a\x84\x68\x29\xad\x5e\x58"
+ "\xa2\x50\x3e\x83\xf5\x7a\xbd\x88\x17\x60\x89\x98\x9c\x19\x89\x27"
+ "\x89\xfc\x33\x87\x42\xd5\xde\x19\x14\xf2\x95\x82\x10\x87\xad\x82"
+ "\xdd\x6b\x51\x2d\x8d\x0e\x81\x4b\xde\xb3\x35\x6c\x0f\x4b\x56\x45"
+ "\x48\x87\xe9\x5a\xf9\x70\x10\x30\x8e\xa1\xbb\xa4\x70\xbf\xa0\xab"
+ "\x10\x31\x3c\x2c\xdc\xc4\xed\xe3\x51\xdc\xee\xd2\xa5\x5c\x4e\x6e"
+ "\xf6\xed\x60\x5a\xeb\xf3\x02\x19\x2a\x95\xe9\x46\xff\x37\x1b\xf0"
+ "\x1d\x10\x4a\x8f\x4f\x3a\x6e\xf5\xfc\x02\x6d\x09\x7d\xea\x69\x7b"
+ "\x13\xb0\xb6\x80\x5c\x15\x20\xa8\x4d\x15\x56\x11\x72\x49\xdb\x48"
+ "\x54\x40\x66\xd5\xcd\x17\x3a\x26\x95\xf6\xd7\xf2\x59\xa3\xda\xbb"
+ "\x26\xd0\xe5\x46\xbf\xee\x0e\x7d\xf1\xe0\x11\x02\x4d\xd3\xdc\xe2"
+ "\x3f\xc2\x51\x7e\xc7\x90\x33\x3c\x1c\xa0\x4c\x69\xcc\x1e\xc7\xac"
+ "\x17\xe0\xe5\xf4\x8c\x05\x64\x34\xfe\x84\x70\xd7\x6b\xed\xab\xf5"
+ "\x88\x9d\x3e\x4c\x5a\x9e\xd4\x74\xfd\xdd\x91\xd5\xd4\xcb\xbf\xf8"
+ "\xb7\x56\xb5\xe9\x22\xa6\x6d\x7a\x44\x05\x41\xbf\xdb\x61\x28\xc6"
+ "\x99\x49\x87\x3d\x28\x77\xf8\x83\x23\x7e\xa9\xa7\xee\x20\xdb\x6d"
+ "\x21\x50\xb7\xc9\x52\x57\x53\xa3\xcf\xdf\xd0\xf9\xb9\x62\x96\x89"
+ "\xf5\x5c\xa9\x8a\x11\x95\x01\x25\xc9\x81\x15\x76\xae\xf0\xc7\xc5"
+ "\x50\xae\x6f\xb5\xd2\x8a\x8e\x9a\xd4\x30\x55\xc6\xe9\x2c\x81\x6e"
+ "\x95\xf6\x45\x89\x55\x28\x34\x7b\xe5\x72\x9a\x2a\xe2\x98\x09\x35"
+ "\xe0\xe9\x75\x94\xe9\x34\x95\xb9\x13\x6e\xd5\xa1\x62\x5a\x1c\x94"
+ "\x28\xed\x84\x46\x76\x6d\x10\x37\x71\xa3\x31\x46\x64\xe4\x59\x44"
+ "\x17\x70\x1c\x23\xc9\x7e\xf6\xab\x8a\x24\xae\x25\xe2\xb2\x5f\x33"
+ "\xe4\xd7\xd3\x34\x2a\x49\x22\x16\x15\x9b\x90\x40\xda\x99\xd5\xaf",
+ .expected_ss =
+ "\xe2\xce\x0e\x4b\x64\xf3\x84\x62\x38\xfd\xe3\x6f\x69\x40\x22\xb0"
+ "\x73\x27\x03\x12\x82\xa4\x6e\x03\x57\xec\x3d\xa0\xc1\x4f\x4b\x09"
+ "\xa1\xd4\xe0\x1a\x5d\x91\x2e\x08\xad\x57\xfa\xcc\x55\x90\x5f\xa0"
+ "\x52\x27\x62\x8d\xe5\x2d\xa1\x5f\xf0\x30\x43\x77\x4e\x3f\x02\x58"
+ "\xcb\xa0\x51\xae\x1d\x24\xf9\x0a\xd1\x36\x0b\x95\x0f\x07\xd9\xf7"
+ "\xe2\x36\x14\x2f\xf0\x11\xc2\xc9\xaf\x66\x4e\x0d\xb4\x60\x01\x4e"
+ "\xa8\x49\xc6\xec\x5f\xb2\xbc\x05\x48\x91\x4e\xe1\xc3\x99\x9f\xeb"
+ "\x4a\xc1\xde\x05\x9a\x65\x39\x7d\x2f\x89\x85\xb2\xcf\xec\x25\x27"
+ "\x5f\x1c\x11\x63\xcf\x7b\x86\x98\x39\xae\xc2\x16\x8f\x79\xd1\x20"
+ "\xd0\xb4\xa0\xba\x44\xd8\xf5\x3a\x0a\x08\x4c\xd1\xb9\xdd\x0a\x5b"
+ "\x9e\x62\xf3\x52\x0c\x84\x12\x43\x9b\xd7\xdf\x86\x71\x03\xdd\x04"
+ "\x98\x55\x0c\x7b\xe2\xe8\x03\x17\x25\x84\xd9\xbd\xe1\xce\x64\xbe"
+ "\xca\x55\xd4\x5b\xef\x61\x5b\x68\x4b\x80\x37\x40\xae\x28\x87\x81"
+ "\x55\x34\x96\x50\x21\x47\x49\xc0\xda\x26\x46\xb8\xe8\xcc\x5a\x27"
+ "\x9c\x9d\x0a\x3d\xcc\x4c\x63\x27\x81\x82\x2e\xf4\xa8\x91\x37\x3e"
+ "\xa7\x34\x6a\x0f\x60\x44\xdd\x2e\xdc\xf9\x19\xf2\x2e\x81\x05\x51"
+ "\x16\xbc\xc0\x85\xa5\xd5\x08\x09\x1f\xcd\xed\xa4\xc5\xdb\x16\x43"
+ "\xb5\x7a\x71\x66\x19\x2e\xef\x13\xbc\x40\x39\x0a\x00\x45\x7e\x61"
+ "\xe9\x68\x60\x83\x00\x70\xd1\x71\xd3\xa2\x61\x3e\x00\x46\x93\x0d"
+ "\xbf\xe6\xa2\x07\xe6\x40\x1a\xf4\x57\xc6\x67\x39\xd8\xd7\x6b\xc5"
+ "\xa5\xd8\x38\x78\x12\xb4\x97\x12\xbe\x97\x13\xef\xe4\x74\x0c\xe0"
+ "\x75\x89\x64\xf4\xe8\x85\xda\x84\x7b\x1d\xfe\xdd\x21\xba\xda\x01"
+ "\x52\xdc\x59\xe5\x47\x50\x7e\x15\x20\xd0\x43\x37\x6e\x48\x39\x00"
+ "\xee\xd9\x54\x6d\x00\x65\xc9\x4b\x85\xa2\x8a\x40\x55\xd0\x63\x0c"
+ "\xb5\x7a\x0d\x37\x67\x27\x73\x18\x7f\x5a\xf5\x0e\x22\xb9\xb0\x3f"
+ "\xda\xf1\xec\x7c\x24\x01\x49\xa9\x09\x0e\x0f\xc4\xa9\xef\xc8\x2b"
+ "\x13\xd1\x0a\x6f\xf8\x92\x4b\x1d\xdd\x6c\x9c\x35\xde\x75\x46\x32"
+ "\xe6\xfb\xda\x58\xba\x81\x08\xca\xa9\xb6\x69\x71\x96\x2a\x1f\x2e"
+ "\x25\xe0\x37\xfe\xee\x4d\x27\xaa\x04\xda\x95\xbb\x93\xcf\x8f\xa2"
+ "\x1d\x67\x35\xe3\x51\x8f\x87\x3b\xa9\x62\x05\xee\x44\xb7\x2e\xd0"
+ "\x07\x63\x32\xf5\xcd\x64\x18\x20\xcf\x22\x42\x28\x22\x1a\xa8\xbb"
+ "\x74\x8a\x6f\x2a\xea\x8a\x48\x0a\xad\xd7\xed\xba\xa3\x89\x37\x01",
+ .secret_size = 528,
+ .b_public_size = 512,
+ .expected_a_public_size = 512,
+ .expected_ss_size = 512,
+ },
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x00" /* len */
+ "\x00\x00\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00", /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x00\x10" /* len */
+ "\x00\x00\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00", /* g_size */
+#endif
+ .b_secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x02" /* len */
+ "\x00\x02\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x02\x10" /* len */
+ "\x00\x00\x02\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#endif
+ /* xa */
+ "\x1a\x48\xf3\x6c\x61\x03\x42\x43\xd7\x42\x3b\xfa\xdb\x55\x6f\xa2"
+ "\xe1\x79\x52\x0b\x47\xc5\x03\x60\x2f\x26\xb9\x1a\x14\x15\x1a\xd9"
+ "\xe0\xbb\xa7\x82\x63\x41\xec\x26\x55\x00\xab\xe5\x21\x9d\x31\x14"
+ "\x0e\xe2\xc2\xb2\xb8\x37\xe6\xc3\x5a\xab\xae\x25\xdb\x71\x1e\xed"
+ "\xe8\x75\x9a\x04\xa7\x92\x2a\x99\x7e\xc0\x5b\x64\x75\x7f\xe5\xb5"
+ "\xdb\x6c\x95\x4f\xe9\xdc\x39\x76\x79\xb0\xf7\x00\x30\x8e\x86\xe7"
+ "\x36\xd1\xd2\x0c\x68\x7b\x94\xe9\x91\x85\x08\x86\xbc\x64\x87\xd2"
+ "\xf5\x5b\xaf\x03\xf6\x5f\x28\x25\xf1\xa3\x20\x5c\x1b\xb5\x26\x45"
+ "\x9a\x47\xab\xd6\xad\x49\xab\x92\x8e\x62\x6f\x48\x31\xea\xf6\x76"
+ "\xff\xa2\xb6\x28\x78\xef\x59\xc3\x71\x5d\xa8\xd9\x70\x89\xcc\xe2"
+ "\x63\x58\x5e\x3a\xa2\xa2\x88\xbf\x77\x20\x84\x33\x65\x64\x4e\x73"
+ "\xe5\x08\xd5\x89\x23\xd6\x07\xac\x29\x65\x2e\x02\xa8\x35\x96\x48"
+ "\xe7\x5d\x43\x6a\x42\xcc\xda\x98\xc4\x75\x90\x2e\xf6\xc4\xbf\xd4"
+ "\xbc\x31\x14\x0d\x54\x30\x11\xb2\xc9\xcf\xbb\xba\xbc\xc6\xf2\xcf"
+ "\xfe\x4a\x9d\xf3\xec\x78\x5d\x5d\xb4\x99\xd0\x67\x0f\x5a\x21\x1c"
+ "\x7b\x95\x2b\xcf\x49\x44\x94\x05\x1a\x21\x81\x25\x7f\xe3\x8a\x2a"
+ "\xdd\x88\xac\x44\x94\x23\x20\x3b\x75\xf6\x2a\x8a\x45\xf8\xb5\x1f"
+ "\xb9\x8b\xeb\xab\x9b\x38\x23\x26\xf1\x0f\x34\x47\x4f\x7f\xe1\x9e"
+ "\x84\x84\x78\xe5\xe3\x49\xeb\xcc\x2f\x02\x85\xa4\x18\x91\xde\x1a"
+ "\x60\x54\x33\x81\xd5\xae\xdb\x23\x9c\x4d\xa4\xdb\x22\x5b\xdf\xf4"
+ "\x8e\x05\x2b\x60\xba\xe8\x75\xfc\x34\x99\xcf\x35\xe1\x06\xba\xdc"
+ "\x79\x2a\x5e\xec\x1c\xbe\x79\x33\x63\x1c\xe7\x5f\x1e\x30\xd6\x1b"
+ "\xdb\x11\xb8\xea\x63\xff\xfe\x1a\x3c\x24\xf4\x78\x9c\xcc\x5d\x9a"
+ "\xc9\x2d\xc4\x9a\xd4\xa7\x65\x84\x98\xdb\x66\x76\xf0\x34\x31\x9f"
+ "\xce\xb5\xfb\x28\x07\xde\x1e\x0d\x9b\x01\x64\xeb\x2a\x37\x2f\x20"
+ "\xa5\x95\x72\x2b\x54\x51\x59\x91\xea\x50\x54\x0f\x2e\xb0\x1d\xf6"
+ "\xb9\x46\x43\xf9\xd0\x13\x21\x20\x47\x61\x1a\x1c\x30\xc6\x9e\x75"
+ "\x22\xe4\xf2\xb1\xab\x01\xdc\x5b\x3c\x1e\xa2\x6d\xc0\xb9\x9a\x2a"
+ "\x84\x61\xea\x85\x63\xa0\x77\xd0\xeb\x20\x68\xd5\x95\x6a\x1b\x8f"
+ "\x1f\x9a\xba\x44\x49\x8c\x77\xa6\xd9\xa0\x14\xf8\x7d\x9b\x4e\xfa"
+ "\xdc\x4f\x1c\x4d\x60\x50\x26\x7f\xd6\xc1\x91\x2b\xa6\x37\x5d\x94"
+ "\x69\xb2\x47\x59\xd6\xc3\x59\xbb\xd6\x9b\x71\x52\x85\x7a\xcb\x2d",
+ .b_public =
+ "\x1f\xff\xd6\xc4\x59\xf3\x4a\x9e\x81\x74\x4d\x27\xa7\xc6\x6b\x35"
+ "\xd8\xf5\xb3\x24\x97\x82\xe7\x2e\xf3\x21\x91\x23\x2f\x3d\x57\x7f"
+ "\x15\x8c\x84\x71\xe7\x25\x35\xe8\x07\x14\x06\x4c\x83\xdc\x55\x4a"
+ "\xf8\x45\xc5\xe9\xfa\x6e\xae\x6e\xcf\x4d\x11\x91\x26\x16\x6f\x86"
+ "\x89\x78\xaa\xb4\x25\x54\xb2\x74\x07\xe5\x26\x26\x0c\xad\xa4\x57"
+ "\x59\x61\x66\x71\x43\x22\xff\x49\x51\xa4\x76\x0e\x55\x7b\x60\x45"
+ "\x4f\xaf\xbd\x9c\xec\x64\x3f\x80\x0b\x0c\x31\x41\xf0\xfe\x2c\xb7"
+ "\x0a\xbe\xa5\x71\x08\x0d\x8d\x1e\x8a\x77\x9a\xd2\x90\x31\x96\xd0"
+ "\x3b\x31\xdc\xc6\x18\x59\x43\xa1\x19\x5a\x84\x68\x29\xad\x5e\x58"
+ "\xa2\x50\x3e\x83\xf5\x7a\xbd\x88\x17\x60\x89\x98\x9c\x19\x89\x27"
+ "\x89\xfc\x33\x87\x42\xd5\xde\x19\x14\xf2\x95\x82\x10\x87\xad\x82"
+ "\xdd\x6b\x51\x2d\x8d\x0e\x81\x4b\xde\xb3\x35\x6c\x0f\x4b\x56\x45"
+ "\x48\x87\xe9\x5a\xf9\x70\x10\x30\x8e\xa1\xbb\xa4\x70\xbf\xa0\xab"
+ "\x10\x31\x3c\x2c\xdc\xc4\xed\xe3\x51\xdc\xee\xd2\xa5\x5c\x4e\x6e"
+ "\xf6\xed\x60\x5a\xeb\xf3\x02\x19\x2a\x95\xe9\x46\xff\x37\x1b\xf0"
+ "\x1d\x10\x4a\x8f\x4f\x3a\x6e\xf5\xfc\x02\x6d\x09\x7d\xea\x69\x7b"
+ "\x13\xb0\xb6\x80\x5c\x15\x20\xa8\x4d\x15\x56\x11\x72\x49\xdb\x48"
+ "\x54\x40\x66\xd5\xcd\x17\x3a\x26\x95\xf6\xd7\xf2\x59\xa3\xda\xbb"
+ "\x26\xd0\xe5\x46\xbf\xee\x0e\x7d\xf1\xe0\x11\x02\x4d\xd3\xdc\xe2"
+ "\x3f\xc2\x51\x7e\xc7\x90\x33\x3c\x1c\xa0\x4c\x69\xcc\x1e\xc7\xac"
+ "\x17\xe0\xe5\xf4\x8c\x05\x64\x34\xfe\x84\x70\xd7\x6b\xed\xab\xf5"
+ "\x88\x9d\x3e\x4c\x5a\x9e\xd4\x74\xfd\xdd\x91\xd5\xd4\xcb\xbf\xf8"
+ "\xb7\x56\xb5\xe9\x22\xa6\x6d\x7a\x44\x05\x41\xbf\xdb\x61\x28\xc6"
+ "\x99\x49\x87\x3d\x28\x77\xf8\x83\x23\x7e\xa9\xa7\xee\x20\xdb\x6d"
+ "\x21\x50\xb7\xc9\x52\x57\x53\xa3\xcf\xdf\xd0\xf9\xb9\x62\x96\x89"
+ "\xf5\x5c\xa9\x8a\x11\x95\x01\x25\xc9\x81\x15\x76\xae\xf0\xc7\xc5"
+ "\x50\xae\x6f\xb5\xd2\x8a\x8e\x9a\xd4\x30\x55\xc6\xe9\x2c\x81\x6e"
+ "\x95\xf6\x45\x89\x55\x28\x34\x7b\xe5\x72\x9a\x2a\xe2\x98\x09\x35"
+ "\xe0\xe9\x75\x94\xe9\x34\x95\xb9\x13\x6e\xd5\xa1\x62\x5a\x1c\x94"
+ "\x28\xed\x84\x46\x76\x6d\x10\x37\x71\xa3\x31\x46\x64\xe4\x59\x44"
+ "\x17\x70\x1c\x23\xc9\x7e\xf6\xab\x8a\x24\xae\x25\xe2\xb2\x5f\x33"
+ "\xe4\xd7\xd3\x34\x2a\x49\x22\x16\x15\x9b\x90\x40\xda\x99\xd5\xaf",
+ .secret_size = 16,
+ .b_secret_size = 528,
+ .b_public_size = 512,
+ .expected_a_public_size = 512,
+ .expected_ss_size = 512,
+ .genkey = true,
+ },
+};
+
+static const struct kpp_testvec ffdhe6144_dh_tv_template[] __maybe_unused = {
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x03" /* len */
+ "\x00\x03\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x03\x10" /* len */
+ "\x00\x00\x03\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#endif
+ /* xa */
+ "\x63\x3e\x6f\xe0\xfe\x9f\x4a\x01\x62\x77\xce\xf1\xc7\xcc\x49\x4d"
+ "\x92\x53\x56\xe3\x39\x15\x81\xb2\xcd\xdc\xaf\x5e\xbf\x31\x1f\x69"
+ "\xce\x41\x35\x24\xaa\x46\x53\xb5\xb7\x3f\x2b\xad\x95\x14\xfb\xe4"
+ "\x9a\x61\xcd\x0f\x1f\x02\xee\xa4\x79\x2c\x9d\x1a\x7c\x62\x82\x39"
+ "\xdd\x43\xcc\x58\x9f\x62\x47\x56\x1d\x0f\xc2\x67\xbc\x24\xd0\xf9"
+ "\x0a\x50\x1b\x10\xe7\xbb\xd1\xc2\x01\xbb\xc4\x4c\xda\x12\x60\x0e"
+ "\x95\x2b\xde\x09\xd6\x67\xe1\xbc\x4c\xb9\x67\xdf\xd0\x1f\x97\xb4"
+ "\xde\xcb\x6b\x78\x83\x51\x74\x33\x01\x7f\xf6\x0a\x95\x69\x93\x00"
+ "\x2a\xc3\x75\x8e\xef\xbe\x53\x11\x6d\xc4\xd0\x9f\x6d\x63\x48\xc1"
+ "\x91\x1f\x7d\x88\xa7\x90\x78\xd1\x7e\x52\x42\x10\x01\xb4\x27\x95"
+ "\x91\x43\xcc\x82\x91\x86\x62\xa0\x9d\xef\x65\x6e\x67\xcf\x19\x11"
+ "\x35\x37\x5e\x94\x97\x83\xa6\x83\x1c\x7e\x8a\x3e\x32\xb0\xce\xff"
+ "\x20\xdc\x7b\x6e\x18\xd9\x6b\x27\x31\xfc\xc3\xef\x47\x8d\xbe\x34"
+ "\x2b\xc7\x60\x74\x3c\x93\xb3\x8e\x54\x77\x4e\x73\xe6\x40\x72\x35"
+ "\xb0\xf0\x06\x53\x43\xbe\xd0\xc3\x87\xcc\x38\x96\xa9\x10\xa0\xd6"
+ "\x17\xed\xa5\x6a\xf4\xf6\xaa\x77\x40\xed\x7d\x2e\x58\x0f\x5b\x04"
+ "\x5a\x41\x12\x95\x22\xcb\xa3\xce\x8b\x6d\x6d\x89\xec\x7c\x1d\x25"
+ "\x27\x52\x50\xa0\x5b\x93\x8c\x5d\x3f\x56\xb9\xa6\x5e\xe5\xf7\x9b"
+ "\xc7\x9a\x4a\x2e\x79\xb5\xca\x29\x58\x52\xa0\x63\xe4\x9d\xeb\x4c"
+ "\x4c\xa8\x37\x0b\xe9\xa0\x18\xf1\x86\xf6\x4d\x32\xfb\x9e\x4f\xb3"
+ "\x7b\x5d\x58\x78\x70\xbd\x56\xac\x99\x75\x25\x71\x66\x76\x4e\x5e"
+ "\x67\x4f\xb1\x17\xa7\x8b\x55\x12\x87\x01\x4e\xd1\x66\xef\xd0\x70"
+ "\xaf\x14\x34\xee\x2a\x76\x49\x25\xa6\x2e\x43\x37\x75\x7d\x1a\xad"
+ "\x08\xd5\x01\x85\x9c\xe1\x20\xd8\x38\x5c\x57\xa5\xed\x9d\x46\x3a"
+ "\xb7\x46\x60\x29\x8b\xc4\x21\x50\x0a\x30\x9c\x57\x42\xe4\x35\xf8"
+ "\x12\x5c\x4f\xa2\x20\xc2\xc9\x43\xe3\x6d\x20\xbc\xdf\xb8\x37\x33"
+ "\x45\x43\x06\x4e\x08\x6f\x8a\xcd\x61\xc3\x1b\x05\x28\x82\xbe\xf0"
+ "\x48\x33\xe5\x93\xc9\x1a\x61\x16\x67\x03\x9d\x47\x9d\x74\xeb\xae"
+ "\x13\xf2\xb4\x1b\x09\x11\xf5\x15\xcb\x28\xfd\x50\xe0\xbc\x58\x36"
+ "\x38\x91\x2c\x07\x27\x1f\x49\x68\xf4\xce\xad\xf7\xba\xec\x5d\x3d"
+ "\xfd\x27\xe2\xcf\xf4\x56\xfe\x08\xa6\x11\x61\xcb\x6c\x9f\xf9\x3c"
+ "\x57\x0b\x8b\xaa\x00\x16\x18\xba\x1f\xe8\x4f\x01\xe2\x79\x2a\x0b"
+ "\xc1\xbd\x52\xef\xe6\xf7\x5a\x66\xfe\x07\x3b\x50\x6b\xbb\xcb\x39"
+ "\x3c\x94\xf6\x21\x0d\x68\x69\xa4\xed\x2e\xb5\x85\x03\x11\x38\x79"
+ "\xec\xb5\x22\x23\xdf\x9e\xad\xb4\xbe\xd7\xc7\xdf\xea\x30\x23\x8a"
+ "\xb7\x21\x0a\x9d\xbd\x99\x13\x7d\x5f\x7e\xaf\x28\x54\x3f\xca\x5e"
+ "\xf4\xfc\x05\x0d\x65\x67\xd8\xf6\x8e\x90\x9d\x0d\xcf\x62\x82\xd6"
+ "\x9f\x02\xf8\xca\xfa\x42\x24\x7f\x4d\xb7\xfc\x92\xa6\x4a\x51\xc4"
+ "\xd8\xae\x19\x87\xc6\xa3\x83\xbe\x7b\x6d\xc3\xf5\xb8\xad\x4a\x05"
+ "\x78\x84\x3a\x15\x2e\x40\xbe\x79\xa9\xc0\x12\xa1\x48\x39\xc3\xdb"
+ "\x47\x4f\x7d\xea\x6d\xc7\xfa\x2c\x4e\xe9\xa5\x85\x81\xea\x6c\xcd"
+ "\x8a\xe5\x74\x17\x76\x31\x31\x75\x96\x83\xca\x81\xbb\x5c\xa9\x79"
+ "\x2c\xbd\x09\xfe\xe4\x86\x0d\x8c\x76\x9c\xbc\xe8\x93\xe4\xd0\xe4"
+ "\x0f\xf8\xff\x24\x7e\x66\x61\x69\xfb\xe4\x46\x08\x94\x99\xa5\x53"
+ "\xd7\xe4\x29\x72\x86\x86\xe8\x1d\x37\xfa\xcb\xd0\x8d\x51\xd0\xbf"
+ "\x81\xcf\x55\xb9\xc5\x78\x8c\x74\xa0\x16\x3a\xd2\x19\x94\x29\x6a"
+ "\x5e\xec\xd3\x20\xa0\xb2\xfd\xce\xd4\x14\xa3\x39\x10\xa9\xf4\x4e"
+ "\xba\x21\x09\x5c\xe6\x61\x43\x51\xae\xc4\x71\xd7\x21\xef\x98\x39",
+ .b_public =
+ "\x30\x31\xbe\x43\xd0\x14\x22\x6b\x4b\x8c\x9a\xca\xc6\xdd\xe5\x99"
+ "\xce\xb8\x30\x23\xb6\xa8\x8c\x4d\xfa\xef\xad\xa6\x6a\x21\x50\xa6"
+ "\x45\x2d\x19\x2a\x29\x81\xc5\xac\xb4\xa8\x5f\x6d\x5b\xc8\x5f\x12"
+ "\x35\x21\xfb\x37\xaa\x0c\x79\xeb\xd4\x83\x01\xda\xa3\xf3\x51\x6e"
+ "\x17\xf9\xef\x3f\xbd\x2f\xd2\x43\x82\x12\x48\xeb\x61\x4c\x8e\xf2"
+ "\x6c\x76\xf9\x6d\x42\x2a\xcb\x10\x13\x3b\xf6\x9b\xcd\x46\x1e\xa2"
+ "\xa7\x2c\x08\x56\xd2\x42\xf5\x03\xf0\x3e\xef\xa2\xa2\xf2\x4c\xf2"
+ "\xdb\x4f\xeb\x40\x15\x53\x27\xf7\xd4\x8e\x58\x23\xf5\x2c\x88\x04"
+ "\x1e\xb1\xb6\xe3\xd6\x9c\x49\x08\xa1\x4b\xb8\x33\xe4\x75\x85\xa1"
+ "\x86\x97\xce\x1d\xe9\x9f\xe2\xd8\xf2\x7e\xad\xdc\x8a\x4d\xbd\x06"
+ "\x52\x00\x9a\x2c\x69\xdd\x02\x0c\x69\x5a\xf9\x1d\xfd\xdc\xfb\x82"
+ "\xb2\xe5\xf3\x24\xba\xd1\x09\x76\x90\xb5\x7a\x92\xa6\x6b\x97\xc0"
+ "\xce\x13\x9b\x4b\xbc\x30\x91\xb2\x13\x8b\x57\x6c\x8b\x66\x6e\x58"
+ "\x3e\x91\x50\xc7\x6c\xe1\x18\xec\xbf\x69\xcd\xcb\xa0\xbc\x0d\x05"
+ "\xc4\xf8\x45\x92\xe0\x05\xd3\x08\xb3\x30\x19\xc8\x80\xf8\x17\x9f"
+ "\x1e\x6a\x49\x8e\x43\xef\x7a\x49\xa5\x93\xd9\xed\xd1\x07\x03\xe4"
+ "\xa3\x55\xeb\x1e\x2f\x69\xd7\x40\x8f\x6e\x1c\xb6\x94\xfb\xba\x4e"
+ "\x46\xd0\x38\x71\x00\x88\x93\x6a\x55\xfc\x16\x95\x1f\xb1\xf6\x2f"
+ "\x26\x45\x50\x54\x30\x62\x62\xe8\x80\xe5\x24\x0b\xe4\x15\x6b\x32"
+ "\x16\xc2\x30\x9b\x56\xb4\xc9\x5e\x50\xb4\x27\x82\x86\x01\xda\x68"
+ "\x44\x4b\x15\x81\x31\x13\x52\xd8\x08\xbc\xae\xf3\xa5\x94\x1c\x81"
+ "\xe8\x42\xd6\x42\xd6\xff\x99\x58\x0f\x61\x3e\x82\x9e\x2d\x13\x03"
+ "\x54\x02\x74\xf4\x6b\x43\x43\xce\x54\x44\x36\x3f\x55\xfa\xb2\x56"
+ "\xdc\xac\xb5\x65\x89\xbe\x36\xd2\x58\x65\x79\x4c\xf3\xe2\x01\xf1"
+ "\x69\x96\x29\x20\x5d\xee\xf5\x8a\x8b\x9f\x72\xf7\x27\x02\xde\x3b"
+ "\xc7\x52\x19\xdc\x8e\x22\x36\x09\x14\x59\x07\xbb\x1e\x49\x69\x4f"
+ "\x00\x7b\x9a\x5d\x23\xe9\xbe\x0d\x52\x90\xa3\x0d\xde\xe7\x80\x57"
+ "\x53\x69\x39\xe6\xf8\x33\xeb\x92\x0d\x9e\x04\x8b\x16\x16\x16\x1c"
+ "\xa9\xe6\xe3\x0e\x0a\xc6\xf6\x61\xd1\x44\x2b\x3e\x5e\x02\xfe\xaa"
+ "\xe3\xf3\x8f\xf9\xc8\x20\x37\xad\xbc\x95\xb8\xc5\xe7\x95\xda\xfb"
+ "\x80\x5b\xf6\x40\x28\xae\xc1\x4c\x09\xde\xff\x1e\xbf\x51\xd2\xfe"
+ "\x08\xdc\xb0\x48\x21\xf5\x4c\x43\xdc\x7b\x69\x83\xc8\x69\x5c\xc4"
+ "\xa9\x98\x76\x4b\xc4\x4a\xac\x1d\xa5\x52\xe3\x35\x43\xdd\x30\xd4"
+ "\xa0\x51\x9c\xc2\x62\x4c\x7e\xa5\xfb\xd3\x2c\x8a\x09\x7f\x53\xa3"
+ "\xcd\xca\x58\x1b\x4c\xaf\xba\x21\x8b\x88\x1d\xc0\xe9\x0a\x17\x30"
+ "\x33\xd6\xa2\xa5\x49\x50\x61\x3b\xff\x37\x71\x66\xef\x61\xbc\xb2"
+ "\x53\x82\xe5\x70\xef\x32\xff\x9d\x97\xe0\x82\xe0\xbb\x49\xc2\x29"
+ "\x58\x89\xdd\xe9\x62\x52\xfb\xba\x22\xa6\xd9\x16\xfa\x55\xb3\x06"
+ "\xed\x6d\x70\x6e\xdc\x47\x7c\x67\x1a\xcc\x27\x98\xd4\xd7\xe6\xf0"
+ "\xf8\x9f\x51\x3e\xf0\xee\xad\xb6\x78\x69\x71\xb5\xcb\x09\xa3\xa6"
+ "\x3f\x29\x24\x46\xe0\x65\xbc\x9f\x6c\xe9\xf9\x49\x49\x96\x75\xe5"
+ "\xe1\xff\x82\x70\xf4\x7e\xff\x8f\xec\x47\x98\x6d\x5b\x88\x60\xee"
+ "\x43\xb1\xe2\x14\xc1\x49\x95\x74\x46\xd3\x3f\x73\xb2\xe9\x88\xe0"
+ "\xd3\xb1\xc4\x2c\xef\xee\xdd\x6c\xc5\xa1\x29\xef\x86\xd2\x36\x8a"
+ "\x2f\x7c\x9d\x28\x0a\x6d\xc9\x5a\xdb\xd4\x04\x06\x36\x96\x09\x03"
+ "\x71\x5d\x38\x67\xa2\x08\x2a\x04\xe7\xd6\x51\x5a\x19\x9d\xe7\xf1"
+ "\x5d\x6f\xe2\xff\x48\x37\xb7\x8b\xb1\x14\xb4\x96\xcd\xf0\xa7\xbd"
+ "\xef\x20\xff\x0a\x8d\x08\xb7\x15\x98\x5a\x13\xd2\xda\x2a\x27\x75",
+ .expected_a_public =
+ "\x45\x96\x5a\xb7\x78\x5c\xa4\x4d\x39\xb2\x5f\xc8\xc2\xaa\x1a\xf4"
+ "\xa6\x68\xf6\x6f\x7e\xa8\x4a\x5b\x0e\xba\x0a\x99\x85\xf9\x63\xd4"
+ "\x58\x21\x6d\xa8\x3c\xf4\x05\x10\xb0\x0d\x6f\x1c\xa0\x17\x85\xae"
+ "\x68\xbf\xcc\x00\xc8\x86\x1b\x24\x31\xc9\x49\x23\x91\xe0\x71\x29"
+ "\x06\x39\x39\x93\x49\x9c\x75\x18\x1a\x8b\x61\x73\x1c\x7f\x37\xd5"
+ "\xf1\xab\x20\x5e\x62\x25\xeb\x58\xd5\xfa\xc9\x7f\xad\x57\xd5\xcc"
+ "\x0d\xc1\x7a\x2b\x33\x2a\x76\x84\x33\x26\x97\xcf\x47\x9d\x72\x2a"
+ "\xc9\x39\xde\xa8\x42\x27\x2d\xdc\xee\x00\x60\xd2\x4f\x13\xe0\xde"
+ "\xd5\xc7\xf6\x7d\x8b\x2a\x43\x49\x40\x99\xc2\x61\x84\x8e\x57\x09"
+ "\x7c\xcc\x19\x46\xbd\x4c\xd2\x7c\x7d\x02\x4d\x88\xdf\x58\x24\x80"
+ "\xeb\x19\x3b\x2a\x13\x2b\x19\x85\x3c\xd8\x31\x03\x00\xa4\xd4\x57"
+ "\x23\x2c\x24\x37\xb3\x62\xea\x35\x29\xd0\x2c\xac\xfd\xbd\xdf\x3d"
+ "\xa6\xce\xfa\x0d\x5b\xb6\x15\x8b\xe3\x58\xe9\xad\x99\x87\x29\x51"
+ "\x8d\x97\xd7\xa9\x55\xf0\x72\x6e\x4e\x58\xcb\x2b\x4d\xbd\xd0\x48"
+ "\x7d\x14\x86\xdb\x3f\xa2\x5f\x6e\x35\x4a\xe1\x70\xb1\x53\x72\xb7"
+ "\xbc\xe9\x3d\x1b\x33\xc0\x54\x6f\x43\x55\x76\x85\x7f\x9b\xa5\xb3"
+ "\xc1\x1d\xd3\xfe\xe2\xd5\x96\x3d\xdd\x92\x04\xb1\xad\x75\xdb\x13"
+ "\x4e\x49\xfc\x35\x34\xc5\xda\x13\x98\xb8\x12\xbe\xda\x90\x55\x7c"
+ "\x11\x6c\xbe\x2b\x8c\x51\x29\x23\xc1\x51\xbc\x0c\x1c\xe2\x20\xfc"
+ "\xfe\xf2\xaa\x71\x9b\x21\xdf\x25\x1f\x68\x21\x7e\xe1\xc9\x87\xa0"
+ "\x20\xf6\x8d\x4f\x27\x8c\x3c\x0f\x9d\xf4\x69\x25\xaa\x49\xab\x94"
+ "\x22\x5a\x92\x3a\xba\xb4\xc2\x8c\x5a\xaa\x04\xbf\x46\xc5\xaa\x93"
+ "\xab\x0d\xe9\x54\x6c\x3a\x64\xa6\xa2\x21\x66\xee\x1c\x10\x21\x84"
+ "\xf2\x9e\xcc\x57\xac\xc2\x25\x62\xad\xbb\x59\xef\x25\x61\x6c\x81"
+ "\x38\x8a\xdc\x8c\xeb\x7b\x18\x1d\xaf\xa9\xc5\x9a\xf4\x49\x26\x8a"
+ "\x25\xc4\x3e\x31\x95\x28\xef\xf7\x72\xe9\xc5\xaa\x59\x72\x2b\x67"
+ "\x47\xe8\x6b\x51\x05\x24\xb8\x18\xb3\x34\x0f\x8c\x2b\x80\xba\x61"
+ "\x1c\xbe\x9e\x9a\x7c\xe3\x60\x5e\x49\x02\xff\x50\x8a\x64\x28\x64"
+ "\x46\x7b\x83\x14\x72\x6e\x59\x9b\x56\x09\xb4\xf0\xde\x52\xc3\xf3"
+ "\x58\x17\x6a\xae\xb1\x0f\xf4\x39\xcc\xd8\xce\x4d\xe1\x51\x17\x88"
+ "\xe4\x98\xd9\xd1\xa9\x55\xbc\xbf\x7e\xc4\x51\x96\xdb\x44\x1d\xcd"
+ "\x8d\x74\xad\xa7\x8f\x87\x83\x75\xfc\x36\xb7\xd2\xd4\x89\x16\x97"
+ "\xe4\xc6\x2a\xe9\x65\xc8\xca\x1c\xbd\x86\xaf\x57\x80\xf7\xdd\x42"
+ "\xc0\x3b\x3f\x87\x51\x02\x2f\xf8\xd8\x68\x0f\x3d\x95\x2d\xf1\x67"
+ "\x09\xa6\x5d\x0b\x7e\x01\xb4\xb2\x32\x01\xa8\xd0\x58\x0d\xe6\xa2"
+ "\xd8\x4b\x22\x10\x7d\x11\xf3\xc2\x4e\xb8\x43\x8e\x31\x79\x59\xe2"
+ "\xc4\x96\x29\x17\x40\x06\x0d\xdf\xdf\xc3\x02\x30\x2a\xd1\x8e\xf2"
+ "\xee\x2d\xd2\x12\x63\x5a\x1d\x3c\xba\x4a\xc4\x56\x90\xc6\x12\x0b"
+ "\xe0\x04\x3f\x35\x59\x8e\x40\x75\xf4\x4c\x10\x61\xb9\x30\x89\x7c"
+ "\x8d\x0e\x25\xb7\x5a\x6b\x97\x05\xc6\x37\x80\x6e\x94\x56\xa8\x5f"
+ "\x03\x94\x59\xc8\xc5\x3e\xdc\x23\xe5\x68\x4f\xd7\xbb\x6d\x7e\xc1"
+ "\x8d\xf9\xcc\x3f\x38\xad\x77\xb3\x18\x61\xed\x04\xc0\x71\xa7\x96"
+ "\xb1\xaf\x1d\x69\x78\xda\x6d\x89\x8b\x50\x75\x99\x44\xb3\xb2\x75"
+ "\xd1\xc8\x14\x40\xa1\x0a\xbf\xc4\x45\xc4\xee\x12\x90\x76\x26\x64"
+ "\xb7\x73\x2e\x0b\x0c\xfa\xc3\x55\x29\x24\x1b\x7a\x00\x27\x07\x26"
+ "\x36\xf0\x38\x1a\xe3\xb7\xc4\x8d\x1c\x9c\xa9\xc0\xc1\x45\x91\x9e"
+ "\x86\xdd\x82\x94\x45\xfa\xcd\x5a\x19\x12\x7d\xef\xda\x17\xad\x21"
+ "\x17\x89\x8b\x45\xa7\xf5\xed\x51\x9e\x58\x13\xdc\x84\xa4\xe6\x37",
+ .expected_ss =
+ "\x9a\x9c\x1c\xb7\x73\x2f\xf2\x12\xed\x59\x01\xbb\x75\xf7\xf5\xe4"
+ "\xa0\xa8\xbc\x3f\x3f\xb6\xf7\x74\x6e\xc4\xba\x6d\x6c\x4d\x93\x31"
+ "\x2b\xa7\xa4\xb3\x47\x8f\x77\x04\xb5\xa5\xab\xca\x6b\x5a\xe2\x86"
+ "\x02\x60\xca\xb4\xd7\x5e\xe0\x0f\x73\xdd\xa2\x38\x7c\xae\x0f\x5a"
+ "\x1a\xd7\xfd\xb6\xc8\x6f\xdd\xe0\x98\xd5\x07\xea\x1f\x2a\xbb\x9e"
+ "\xef\x01\x24\x04\xee\xf5\x89\xb1\x12\x26\x54\x95\xef\xcb\x84\xe9"
+ "\xae\x05\xef\x63\x25\x15\x65\x79\x79\x79\x91\xc3\x76\x72\xb4\x85"
+ "\x86\xd9\xd3\x03\xb0\xff\x04\x96\x05\x3c\xde\xbf\x47\x34\x76\x70"
+ "\x17\xd2\x24\x83\xb9\xbb\xcf\x70\x7c\xb8\xc6\x7b\x4e\x01\x86\x36"
+ "\xc7\xc5\xe5\x8b\x7c\x69\x74\x9a\xfe\x1f\x58\x85\x0f\x00\xf8\x4e"
+ "\xf1\x56\xdc\xd1\x11\x28\x2c\xcf\x6c\xb9\xc9\x57\x17\x2e\x19\x19"
+ "\x55\xb3\x4c\xd8\xfb\xe7\x6f\x70\x63\xf9\x53\x45\xdd\xd5\x62\x95"
+ "\xd3\x7d\x7e\xa0\x00\x1a\x62\x9f\x96\x0a\x5d\x0a\x25\x02\xbb\xff"
+ "\x5a\xe8\x9e\x5a\x66\x08\x93\xbc\x92\xaf\xd2\x28\x04\x97\xc1\x54"
+ "\xfe\xcc\x0a\x25\xa2\xf4\x1d\x5a\x9a\xb1\x3e\x9c\xba\x78\xe2\xcf"
+ "\x71\x70\xe3\x40\xea\xba\x69\x9b\x03\xdd\x99\x26\x09\x84\x9d\x69"
+ "\x4d\x3d\x0b\xe9\x3f\x51\xcd\x05\xe5\x00\xaf\x2c\xd3\xf6\xc0\x68"
+ "\xb5\x23\x53\x33\x14\xbd\x39\x1c\xbd\x1b\xe6\x72\x90\xcc\xc2\x86"
+ "\x1a\x42\x83\x55\xb3\xed\x0b\x62\x6d\x0e\xbb\x9e\x2a\x42\x32\x05"
+ "\x3f\xf2\x2c\xc8\x9f\x3c\xd2\xb1\x0b\xb6\x4c\xa0\x22\x36\xee\xb9"
+ "\x55\x23\x3e\x80\xc7\x28\x7c\x39\x11\xd3\x4a\x96\x2e\xef\x52\x34"
+ "\xf2\xda\xb1\xc6\xf5\x02\x10\xbf\x56\x6b\x50\x56\xcd\x2c\xfe\xe1"
+ "\x94\x14\x19\x24\x6e\x9a\xdf\x0c\xb8\xe2\xb8\xd5\xa3\xc1\x22\x8e"
+ "\x84\x92\x00\x16\xf1\x3f\x83\xf6\x36\x31\xa5\x38\xc6\xcf\xf8\x9b"
+ "\x03\xc7\x6f\xb9\xa1\x04\xdf\x20\x0f\x0b\x0f\x70\xff\x57\x36\x7f"
+ "\xb3\x6b\xcb\x8f\x48\xf7\xb2\xdb\x85\x05\xd1\xfe\x34\x05\xf6\x57"
+ "\xb4\x5b\xcc\x3f\x0e\xba\x36\x59\xb0\xfd\x4d\xf6\xf4\x5e\xd2\x65"
+ "\x1d\x98\x87\xb4\x5e\xff\x29\xaa\x84\x9b\x44\x0f\x06\x36\x61\xbd"
+ "\xdb\x51\xda\x56\xc2\xd6\x19\xe2\x57\x4f\xd0\x29\x71\xc8\xe4\xd6"
+ "\xfb\x8c\xd0\xfc\x4f\x25\x09\xa6\xfc\x67\xe2\xb8\xac\xd3\x88\x8f"
+ "\x1f\xf6\xa1\xe3\x45\xa6\x34\xe3\xb1\x6b\xb7\x37\x0e\x06\xc7\x63"
+ "\xde\xac\x3b\xac\x07\x91\x64\xcc\x12\x10\x46\x85\x14\x0b\x6b\x03"
+ "\xba\x4a\x85\xae\xc5\x8c\xa5\x9d\x36\x38\x33\xca\x42\x9c\x4b\x0c"
+ "\x46\xe1\x77\xe9\x1f\x80\xfe\xb7\x1d\x5a\xf4\xc6\x11\x26\x78\xea"
+ "\x81\x25\x77\x47\xed\x8b\x59\xc2\x6b\x49\xff\x83\x56\xec\xa5\xf0"
+ "\xe0\x8b\x15\xd4\x99\x40\x2a\x65\x2a\x98\xf4\x71\x35\x63\x84\x08"
+ "\x4d\xcd\x71\x85\x55\xbc\xa4\x1c\x90\x93\x03\x41\xde\xed\x78\x62"
+ "\x07\x30\x50\xac\x60\x21\x06\xc3\xab\xa4\x04\xc0\xc2\x32\x07\xc4"
+ "\x1f\x2f\xec\xe2\x32\xbf\xbe\x5e\x50\x5b\x2a\x19\x71\x44\x37\x76"
+ "\x8b\xbc\xdb\x73\x98\x65\x78\xc9\x33\x97\x7e\xdc\x60\xa8\x87\xf2"
+ "\xb5\x96\x55\x7f\x44\x07\xcb\x3b\xf3\xd7\x82\xfd\x77\x21\x82\x21"
+ "\x1a\x8b\xa2\xf5\x1f\x66\xd0\x57\x00\x4f\xa9\xa5\x33\xb8\x69\x91"
+ "\xe8\x2e\xf7\x73\x47\x89\x30\x9b\xb1\xfd\xe1\x5d\x11\xfd\x84\xd9"
+ "\xa2\x91\x1f\x8a\xa7\x7a\x77\x8e\x3b\x10\x1d\x0a\x59\x50\x34\xb0"
+ "\xc3\x90\x9f\x56\xb7\x43\xeb\x51\x99\x2b\x8e\x6d\x7b\x58\xe7\xc0"
+ "\x7f\x3d\xa0\x27\x50\xf2\x6e\xc8\x1e\x7f\x84\xb3\xe1\xf7\x09\x85"
+ "\xd2\x9b\x56\x6b\xba\xa5\x19\x2e\xec\xd8\x5c\xf5\x4e\x43\x36\x2e"
+ "\x89\x85\x41\x7f\x9c\x91\x2e\x62\xc3\x41\xcf\x0e\xa1\x7f\xeb\x50",
+ .secret_size = 784,
+ .b_public_size = 768,
+ .expected_a_public_size = 768,
+ .expected_ss_size = 768,
+ },
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x00" /* len */
+ "\x00\x00\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00", /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x00\x10" /* len */
+ "\x00\x00\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00", /* g_size */
+#endif
+ .b_secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x03" /* len */
+ "\x00\x03\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x03\x10" /* len */
+ "\x00\x00\x03\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#endif
+ /* xa */
+ "\x63\x3e\x6f\xe0\xfe\x9f\x4a\x01\x62\x77\xce\xf1\xc7\xcc\x49\x4d"
+ "\x92\x53\x56\xe3\x39\x15\x81\xb2\xcd\xdc\xaf\x5e\xbf\x31\x1f\x69"
+ "\xce\x41\x35\x24\xaa\x46\x53\xb5\xb7\x3f\x2b\xad\x95\x14\xfb\xe4"
+ "\x9a\x61\xcd\x0f\x1f\x02\xee\xa4\x79\x2c\x9d\x1a\x7c\x62\x82\x39"
+ "\xdd\x43\xcc\x58\x9f\x62\x47\x56\x1d\x0f\xc2\x67\xbc\x24\xd0\xf9"
+ "\x0a\x50\x1b\x10\xe7\xbb\xd1\xc2\x01\xbb\xc4\x4c\xda\x12\x60\x0e"
+ "\x95\x2b\xde\x09\xd6\x67\xe1\xbc\x4c\xb9\x67\xdf\xd0\x1f\x97\xb4"
+ "\xde\xcb\x6b\x78\x83\x51\x74\x33\x01\x7f\xf6\x0a\x95\x69\x93\x00"
+ "\x2a\xc3\x75\x8e\xef\xbe\x53\x11\x6d\xc4\xd0\x9f\x6d\x63\x48\xc1"
+ "\x91\x1f\x7d\x88\xa7\x90\x78\xd1\x7e\x52\x42\x10\x01\xb4\x27\x95"
+ "\x91\x43\xcc\x82\x91\x86\x62\xa0\x9d\xef\x65\x6e\x67\xcf\x19\x11"
+ "\x35\x37\x5e\x94\x97\x83\xa6\x83\x1c\x7e\x8a\x3e\x32\xb0\xce\xff"
+ "\x20\xdc\x7b\x6e\x18\xd9\x6b\x27\x31\xfc\xc3\xef\x47\x8d\xbe\x34"
+ "\x2b\xc7\x60\x74\x3c\x93\xb3\x8e\x54\x77\x4e\x73\xe6\x40\x72\x35"
+ "\xb0\xf0\x06\x53\x43\xbe\xd0\xc3\x87\xcc\x38\x96\xa9\x10\xa0\xd6"
+ "\x17\xed\xa5\x6a\xf4\xf6\xaa\x77\x40\xed\x7d\x2e\x58\x0f\x5b\x04"
+ "\x5a\x41\x12\x95\x22\xcb\xa3\xce\x8b\x6d\x6d\x89\xec\x7c\x1d\x25"
+ "\x27\x52\x50\xa0\x5b\x93\x8c\x5d\x3f\x56\xb9\xa6\x5e\xe5\xf7\x9b"
+ "\xc7\x9a\x4a\x2e\x79\xb5\xca\x29\x58\x52\xa0\x63\xe4\x9d\xeb\x4c"
+ "\x4c\xa8\x37\x0b\xe9\xa0\x18\xf1\x86\xf6\x4d\x32\xfb\x9e\x4f\xb3"
+ "\x7b\x5d\x58\x78\x70\xbd\x56\xac\x99\x75\x25\x71\x66\x76\x4e\x5e"
+ "\x67\x4f\xb1\x17\xa7\x8b\x55\x12\x87\x01\x4e\xd1\x66\xef\xd0\x70"
+ "\xaf\x14\x34\xee\x2a\x76\x49\x25\xa6\x2e\x43\x37\x75\x7d\x1a\xad"
+ "\x08\xd5\x01\x85\x9c\xe1\x20\xd8\x38\x5c\x57\xa5\xed\x9d\x46\x3a"
+ "\xb7\x46\x60\x29\x8b\xc4\x21\x50\x0a\x30\x9c\x57\x42\xe4\x35\xf8"
+ "\x12\x5c\x4f\xa2\x20\xc2\xc9\x43\xe3\x6d\x20\xbc\xdf\xb8\x37\x33"
+ "\x45\x43\x06\x4e\x08\x6f\x8a\xcd\x61\xc3\x1b\x05\x28\x82\xbe\xf0"
+ "\x48\x33\xe5\x93\xc9\x1a\x61\x16\x67\x03\x9d\x47\x9d\x74\xeb\xae"
+ "\x13\xf2\xb4\x1b\x09\x11\xf5\x15\xcb\x28\xfd\x50\xe0\xbc\x58\x36"
+ "\x38\x91\x2c\x07\x27\x1f\x49\x68\xf4\xce\xad\xf7\xba\xec\x5d\x3d"
+ "\xfd\x27\xe2\xcf\xf4\x56\xfe\x08\xa6\x11\x61\xcb\x6c\x9f\xf9\x3c"
+ "\x57\x0b\x8b\xaa\x00\x16\x18\xba\x1f\xe8\x4f\x01\xe2\x79\x2a\x0b"
+ "\xc1\xbd\x52\xef\xe6\xf7\x5a\x66\xfe\x07\x3b\x50\x6b\xbb\xcb\x39"
+ "\x3c\x94\xf6\x21\x0d\x68\x69\xa4\xed\x2e\xb5\x85\x03\x11\x38\x79"
+ "\xec\xb5\x22\x23\xdf\x9e\xad\xb4\xbe\xd7\xc7\xdf\xea\x30\x23\x8a"
+ "\xb7\x21\x0a\x9d\xbd\x99\x13\x7d\x5f\x7e\xaf\x28\x54\x3f\xca\x5e"
+ "\xf4\xfc\x05\x0d\x65\x67\xd8\xf6\x8e\x90\x9d\x0d\xcf\x62\x82\xd6"
+ "\x9f\x02\xf8\xca\xfa\x42\x24\x7f\x4d\xb7\xfc\x92\xa6\x4a\x51\xc4"
+ "\xd8\xae\x19\x87\xc6\xa3\x83\xbe\x7b\x6d\xc3\xf5\xb8\xad\x4a\x05"
+ "\x78\x84\x3a\x15\x2e\x40\xbe\x79\xa9\xc0\x12\xa1\x48\x39\xc3\xdb"
+ "\x47\x4f\x7d\xea\x6d\xc7\xfa\x2c\x4e\xe9\xa5\x85\x81\xea\x6c\xcd"
+ "\x8a\xe5\x74\x17\x76\x31\x31\x75\x96\x83\xca\x81\xbb\x5c\xa9\x79"
+ "\x2c\xbd\x09\xfe\xe4\x86\x0d\x8c\x76\x9c\xbc\xe8\x93\xe4\xd0\xe4"
+ "\x0f\xf8\xff\x24\x7e\x66\x61\x69\xfb\xe4\x46\x08\x94\x99\xa5\x53"
+ "\xd7\xe4\x29\x72\x86\x86\xe8\x1d\x37\xfa\xcb\xd0\x8d\x51\xd0\xbf"
+ "\x81\xcf\x55\xb9\xc5\x78\x8c\x74\xa0\x16\x3a\xd2\x19\x94\x29\x6a"
+ "\x5e\xec\xd3\x20\xa0\xb2\xfd\xce\xd4\x14\xa3\x39\x10\xa9\xf4\x4e"
+ "\xba\x21\x09\x5c\xe6\x61\x43\x51\xae\xc4\x71\xd7\x21\xef\x98\x39",
+ .b_public =
+ "\x45\x96\x5a\xb7\x78\x5c\xa4\x4d\x39\xb2\x5f\xc8\xc2\xaa\x1a\xf4"
+ "\xa6\x68\xf6\x6f\x7e\xa8\x4a\x5b\x0e\xba\x0a\x99\x85\xf9\x63\xd4"
+ "\x58\x21\x6d\xa8\x3c\xf4\x05\x10\xb0\x0d\x6f\x1c\xa0\x17\x85\xae"
+ "\x68\xbf\xcc\x00\xc8\x86\x1b\x24\x31\xc9\x49\x23\x91\xe0\x71\x29"
+ "\x06\x39\x39\x93\x49\x9c\x75\x18\x1a\x8b\x61\x73\x1c\x7f\x37\xd5"
+ "\xf1\xab\x20\x5e\x62\x25\xeb\x58\xd5\xfa\xc9\x7f\xad\x57\xd5\xcc"
+ "\x0d\xc1\x7a\x2b\x33\x2a\x76\x84\x33\x26\x97\xcf\x47\x9d\x72\x2a"
+ "\xc9\x39\xde\xa8\x42\x27\x2d\xdc\xee\x00\x60\xd2\x4f\x13\xe0\xde"
+ "\xd5\xc7\xf6\x7d\x8b\x2a\x43\x49\x40\x99\xc2\x61\x84\x8e\x57\x09"
+ "\x7c\xcc\x19\x46\xbd\x4c\xd2\x7c\x7d\x02\x4d\x88\xdf\x58\x24\x80"
+ "\xeb\x19\x3b\x2a\x13\x2b\x19\x85\x3c\xd8\x31\x03\x00\xa4\xd4\x57"
+ "\x23\x2c\x24\x37\xb3\x62\xea\x35\x29\xd0\x2c\xac\xfd\xbd\xdf\x3d"
+ "\xa6\xce\xfa\x0d\x5b\xb6\x15\x8b\xe3\x58\xe9\xad\x99\x87\x29\x51"
+ "\x8d\x97\xd7\xa9\x55\xf0\x72\x6e\x4e\x58\xcb\x2b\x4d\xbd\xd0\x48"
+ "\x7d\x14\x86\xdb\x3f\xa2\x5f\x6e\x35\x4a\xe1\x70\xb1\x53\x72\xb7"
+ "\xbc\xe9\x3d\x1b\x33\xc0\x54\x6f\x43\x55\x76\x85\x7f\x9b\xa5\xb3"
+ "\xc1\x1d\xd3\xfe\xe2\xd5\x96\x3d\xdd\x92\x04\xb1\xad\x75\xdb\x13"
+ "\x4e\x49\xfc\x35\x34\xc5\xda\x13\x98\xb8\x12\xbe\xda\x90\x55\x7c"
+ "\x11\x6c\xbe\x2b\x8c\x51\x29\x23\xc1\x51\xbc\x0c\x1c\xe2\x20\xfc"
+ "\xfe\xf2\xaa\x71\x9b\x21\xdf\x25\x1f\x68\x21\x7e\xe1\xc9\x87\xa0"
+ "\x20\xf6\x8d\x4f\x27\x8c\x3c\x0f\x9d\xf4\x69\x25\xaa\x49\xab\x94"
+ "\x22\x5a\x92\x3a\xba\xb4\xc2\x8c\x5a\xaa\x04\xbf\x46\xc5\xaa\x93"
+ "\xab\x0d\xe9\x54\x6c\x3a\x64\xa6\xa2\x21\x66\xee\x1c\x10\x21\x84"
+ "\xf2\x9e\xcc\x57\xac\xc2\x25\x62\xad\xbb\x59\xef\x25\x61\x6c\x81"
+ "\x38\x8a\xdc\x8c\xeb\x7b\x18\x1d\xaf\xa9\xc5\x9a\xf4\x49\x26\x8a"
+ "\x25\xc4\x3e\x31\x95\x28\xef\xf7\x72\xe9\xc5\xaa\x59\x72\x2b\x67"
+ "\x47\xe8\x6b\x51\x05\x24\xb8\x18\xb3\x34\x0f\x8c\x2b\x80\xba\x61"
+ "\x1c\xbe\x9e\x9a\x7c\xe3\x60\x5e\x49\x02\xff\x50\x8a\x64\x28\x64"
+ "\x46\x7b\x83\x14\x72\x6e\x59\x9b\x56\x09\xb4\xf0\xde\x52\xc3\xf3"
+ "\x58\x17\x6a\xae\xb1\x0f\xf4\x39\xcc\xd8\xce\x4d\xe1\x51\x17\x88"
+ "\xe4\x98\xd9\xd1\xa9\x55\xbc\xbf\x7e\xc4\x51\x96\xdb\x44\x1d\xcd"
+ "\x8d\x74\xad\xa7\x8f\x87\x83\x75\xfc\x36\xb7\xd2\xd4\x89\x16\x97"
+ "\xe4\xc6\x2a\xe9\x65\xc8\xca\x1c\xbd\x86\xaf\x57\x80\xf7\xdd\x42"
+ "\xc0\x3b\x3f\x87\x51\x02\x2f\xf8\xd8\x68\x0f\x3d\x95\x2d\xf1\x67"
+ "\x09\xa6\x5d\x0b\x7e\x01\xb4\xb2\x32\x01\xa8\xd0\x58\x0d\xe6\xa2"
+ "\xd8\x4b\x22\x10\x7d\x11\xf3\xc2\x4e\xb8\x43\x8e\x31\x79\x59\xe2"
+ "\xc4\x96\x29\x17\x40\x06\x0d\xdf\xdf\xc3\x02\x30\x2a\xd1\x8e\xf2"
+ "\xee\x2d\xd2\x12\x63\x5a\x1d\x3c\xba\x4a\xc4\x56\x90\xc6\x12\x0b"
+ "\xe0\x04\x3f\x35\x59\x8e\x40\x75\xf4\x4c\x10\x61\xb9\x30\x89\x7c"
+ "\x8d\x0e\x25\xb7\x5a\x6b\x97\x05\xc6\x37\x80\x6e\x94\x56\xa8\x5f"
+ "\x03\x94\x59\xc8\xc5\x3e\xdc\x23\xe5\x68\x4f\xd7\xbb\x6d\x7e\xc1"
+ "\x8d\xf9\xcc\x3f\x38\xad\x77\xb3\x18\x61\xed\x04\xc0\x71\xa7\x96"
+ "\xb1\xaf\x1d\x69\x78\xda\x6d\x89\x8b\x50\x75\x99\x44\xb3\xb2\x75"
+ "\xd1\xc8\x14\x40\xa1\x0a\xbf\xc4\x45\xc4\xee\x12\x90\x76\x26\x64"
+ "\xb7\x73\x2e\x0b\x0c\xfa\xc3\x55\x29\x24\x1b\x7a\x00\x27\x07\x26"
+ "\x36\xf0\x38\x1a\xe3\xb7\xc4\x8d\x1c\x9c\xa9\xc0\xc1\x45\x91\x9e"
+ "\x86\xdd\x82\x94\x45\xfa\xcd\x5a\x19\x12\x7d\xef\xda\x17\xad\x21"
+ "\x17\x89\x8b\x45\xa7\xf5\xed\x51\x9e\x58\x13\xdc\x84\xa4\xe6\x37",
+ .secret_size = 16,
+ .b_secret_size = 784,
+ .b_public_size = 768,
+ .expected_a_public_size = 768,
+ .expected_ss_size = 768,
+ .genkey = true,
+ },
+};
+
+static const struct kpp_testvec ffdhe8192_dh_tv_template[] __maybe_unused = {
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x04" /* len */
+ "\x00\x04\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x04\x10" /* len */
+ "\x00\x00\x04\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#endif
+ /* xa */
+ "\x76\x6e\xeb\xf9\xeb\x76\xae\x37\xcb\x19\x49\x8b\xeb\xaf\xb0\x4b"
+ "\x6d\xe9\x15\xad\xda\xf2\xef\x58\xe9\xd6\xdd\x4c\xb3\x56\xd0\x3b"
+ "\x00\xb0\x65\xed\xae\xe0\x2e\xdf\x8f\x45\x3f\x3c\x5d\x2f\xfa\x96"
+ "\x36\x33\xb2\x01\x8b\x0f\xe8\x46\x15\x6d\x60\x5b\xec\x32\xc3\x3b"
+ "\x06\xf3\xb4\x1b\x9a\xef\x3c\x03\x0e\xcc\xce\x1d\x24\xa0\xc9\x08"
+ "\x65\xf9\x45\xe5\xd2\x43\x08\x88\x58\xd6\x46\xe7\xbb\x25\xac\xed"
+ "\x3b\xac\x6f\x5e\xfb\xd6\x19\xa6\x20\x3a\x1d\x0c\xe8\x00\x72\x54"
+ "\xd7\xd9\xc9\x26\x49\x18\xc6\xb8\xbc\xdd\xf3\xce\xf3\x7b\x69\x04"
+ "\x5c\x6f\x11\xdb\x44\x42\x72\xb6\xb7\x84\x17\x86\x47\x3f\xc5\xa1"
+ "\xd8\x86\xef\xe2\x27\x49\x2b\x8f\x3e\x91\x12\xd9\x45\x96\xf7\xe6"
+ "\x77\x76\x36\x58\x71\x9a\xb1\xdb\xcf\x24\x9e\x7e\xad\xce\x45\xba"
+ "\xb5\xec\x8e\xb9\xd6\x7b\x3d\x76\xa4\x85\xad\xd8\x49\x9b\x80\x9d"
+ "\x7f\x9f\x85\x09\x9e\x86\x5b\x6b\xf3\x8d\x39\x5e\x6f\xe4\x30\xc8"
+ "\xa5\xf3\xdf\x68\x73\x6b\x2e\x9a\xcb\xac\x0a\x0d\x44\xc1\xaf\xb2"
+ "\x11\x1b\x7c\x43\x08\x44\x43\xe2\x4e\xfd\x93\x30\x99\x09\x12\xbb"
+ "\xf6\x31\x34\xa5\x3d\x45\x98\xee\xd7\x2a\x1a\x89\xf5\x37\x92\x33"
+ "\xa0\xdd\xf5\xfb\x1f\x90\x42\x55\x5a\x0b\x82\xff\xf0\x96\x92\x15"
+ "\x65\x5a\x55\x96\xca\x1b\xd5\xe5\xb5\x94\xde\x2e\xa6\x03\x57\x9e"
+ "\x15\xe4\x32\x2b\x1f\xb2\x22\x21\xe9\xa0\x05\xd3\x65\x6c\x11\x66"
+ "\x25\x38\xbb\xa3\x6c\xc2\x0b\x2b\xd0\x7a\x20\x26\x29\x37\x5d\x5f"
+ "\xd8\xff\x2a\xcd\x46\x6c\xd6\x6e\xe5\x77\x1a\xe6\x33\xf1\x8e\xc8"
+ "\x10\x30\x11\x00\x27\xf9\x7d\x0e\x28\x43\xa7\x67\x38\x7f\x16\xda"
+ "\xd0\x01\x8e\xa4\xe8\x6f\xcd\x23\xaf\x77\x52\x34\xad\x7e\xc3\xed"
+ "\x2d\x10\x0a\x33\xdc\xcf\x1b\x88\x0f\xcc\x48\x7f\x42\xf0\x9e\x13"
+ "\x1f\xf5\xd1\xe9\x90\x87\xbd\xfa\x5f\x1d\x77\x55\xcb\xc3\x05\xaf"
+ "\x71\xd0\xe0\xab\x46\x31\xd7\xea\x89\x54\x2d\x39\xaf\xf6\x4f\x74"
+ "\xaf\x46\x58\x89\x78\x95\x2e\xe6\x90\xb7\xaa\x00\x73\x9f\xed\xb9"
+ "\x00\xd6\xf6\x6d\x26\x59\xcd\x56\xdb\xf7\x3d\x5f\xeb\x6e\x46\x33"
+ "\xb1\x23\xed\x9f\x8d\x58\xdc\xb4\x28\x3b\x90\x09\xc4\x61\x02\x1f"
+ "\xf8\x62\xf2\x6e\xc1\x94\x71\x66\x93\x11\xdf\xaa\x3e\xd7\xb5\xe5"
+ "\xc1\x78\xe9\x14\xcd\x55\x16\x51\xdf\x8d\xd0\x94\x8c\x43\xe9\xb8"
+ "\x1d\x42\x7f\x76\xbc\x6f\x87\x42\x88\xde\xd7\x52\x78\x00\x4f\x18"
+ "\x02\xe7\x7b\xe2\x8a\xc3\xd1\x43\xa5\xac\xda\xb0\x8d\x19\x96\xd4"
+ "\x81\xe0\x75\xe9\xca\x41\x7e\x1f\x93\x0b\x26\x24\xb3\xaa\xdd\x10"
+ "\x20\xd3\xf2\x9f\x3f\xdf\x65\xde\x67\x79\xdc\x76\x9f\x3c\x72\x75"
+ "\x65\x8a\x30\xcc\xd2\xcc\x06\xb1\xab\x62\x86\x78\x5d\xb8\xce\x72"
+ "\xb3\x12\xc7\x9f\x07\xd0\x6b\x98\x82\x9b\x6c\xbb\x15\xe5\xcc\xf4"
+ "\xc8\xf4\x60\x81\xdc\xd3\x09\x1b\x5e\xd4\xf3\x55\xcf\x1c\x16\x83"
+ "\x61\xb4\x2e\xcc\x08\x67\x58\xfd\x46\x64\xbc\x29\x4b\xdd\xda\xec"
+ "\xdc\xc6\xa9\xa5\x73\xfb\xf8\xf3\xaf\x89\xa8\x9e\x25\x14\xfa\xac"
+ "\xeb\x1c\x7c\x80\x96\x66\x4d\x41\x67\x9b\x07\x4f\x0a\x97\x17\x1c"
+ "\x4d\x61\xc7\x2e\x6f\x36\x98\x29\x50\x39\x6d\xe7\x70\xda\xf0\xc8"
+ "\x05\x80\x7b\x32\xff\xfd\x12\xde\x61\x0d\xf9\x4c\x21\xf1\x56\x72"
+ "\x3d\x61\x46\xc0\x2d\x07\xd1\x6c\xd3\xbe\x9a\x21\x83\x85\xf7\xed"
+ "\x53\x95\x44\x40\x8f\x75\x12\x18\xc2\x9a\xfd\x5e\xce\x66\xa6\x7f"
+ "\x57\xc0\xd7\x73\x76\xb3\x13\xda\x2e\x58\xc6\x27\x40\xb2\x2d\xef"
+ "\x7d\x72\xb4\xa8\x75\x6f\xcc\x5f\x42\x3e\x2c\x90\x36\x59\xa0\x34"
+ "\xaa\xce\xbc\x04\x4c\xe6\x56\xc2\xcd\xa6\x1c\x59\x04\x56\x53\xcf"
+ "\x6d\xd7\xf0\xb1\x4f\x91\xfa\x84\xcf\x4b\x8d\x50\x4c\xf8\x2a\x31"
+ "\x5f\xe3\xba\x79\xb4\xcc\x59\x64\xe3\x7a\xfa\xf6\x06\x9d\x04\xbb"
+ "\xce\x61\xbf\x9e\x59\x0a\x09\x51\x6a\xbb\x0b\x80\xe0\x91\xc1\x51"
+ "\x04\x58\x67\x67\x4b\x42\x4f\x95\x68\x75\xe2\x1f\x9c\x14\x70\xfd"
+ "\x3a\x8a\xce\x8b\x04\xa1\x89\xe7\xb4\xbf\x70\xfe\xf3\x0c\x48\x04"
+ "\x3a\xd2\x85\x68\x03\xe7\xfa\xec\x5b\x55\xb7\x95\xfd\x5b\x19\x35"
+ "\xad\xcb\x4a\x63\x03\x44\x64\x2a\x48\x59\x9a\x26\x43\x96\x8c\xe6"
+ "\xbd\xb7\x90\xd4\x5f\x8d\x08\x28\xa8\xc5\x89\x70\xb9\x6e\xd3\x3b"
+ "\x76\x0e\x37\x98\x15\x27\xca\xc9\xb0\xe0\xfd\xf3\xc6\xdf\x69\xce"
+ "\xe1\x5f\x6a\x3e\x5c\x86\xe2\x58\x41\x11\xf0\x7e\x56\xec\xe4\xc9"
+ "\x0d\x87\x91\xfb\xb9\xc8\x0d\x34\xab\xb0\xc6\xf2\xa6\x00\x7b\x18"
+ "\x92\xf4\x43\x7f\x01\x85\x2e\xef\x8c\x72\x50\x10\xdb\xf1\x37\x62"
+ "\x16\x85\x71\x01\xa8\x2b\xf0\x13\xd3\x7c\x0b\xaf\xf1\xf3\xd1\xee"
+ "\x90\x41\x5f\x7d\x5b\xa9\x83\x4b\xfa\x80\x59\x50\x73\xe1\xc4\xf9"
+ "\x5e\x4b\xde\xd9\xf5\x22\x68\x5e\x65\xd9\x37\xe4\x1a\x08\x0e\xb1"
+ "\x28\x2f\x40\x9e\x37\xa8\x12\x56\xb7\xb8\x64\x94\x68\x94\xff\x9f",
+ .b_public =
+ "\x26\xa8\x3a\x97\xe0\x52\x76\x07\x26\xa7\xbb\x21\xfd\xe5\x69\xde"
+ "\xe6\xe0\xb5\xa0\xf1\xaa\x51\x2b\x56\x1c\x3c\x6c\xe5\x9f\x8f\x75"
+ "\x71\x04\x86\xf6\x43\x2f\x20\x7f\x45\x4f\x5c\xb9\xf3\x90\xbe\xa9"
+ "\xa0\xd7\xe8\x03\x0e\xfe\x99\x9b\x8a\x1c\xbe\xa7\x63\xe8\x2b\x45"
+ "\xd4\x2c\x65\x25\x4c\x33\xda\xc5\x85\x77\x5d\x62\xea\x93\xe4\x45"
+ "\x59\xff\xa1\xd2\xf1\x73\x11\xed\x02\x64\x8a\x1a\xfb\xe1\x88\xa6"
+ "\x50\x6f\xff\x87\x12\xbb\xfc\x10\xcf\x19\x41\xb0\x35\x44\x7d\x51"
+ "\xe9\xc0\x77\xf2\x73\x21\x2e\x62\xbf\x65\xa5\xd1\x3b\xb1\x3e\x19"
+ "\x75\x4b\xb7\x8e\x03\xc3\xdf\xc8\xb2\xe6\xec\x2d\x7d\xa5\x6a\xba"
+ "\x93\x47\x50\xeb\x6e\xdb\x88\x05\x45\xad\x03\x8c\xf7\x9a\xe1\xc9"
+ "\x1e\x16\x96\x37\xa5\x3e\xe9\xb9\xa8\xdc\xb9\xa9\xf6\xa1\x3d\xed"
+ "\xbe\x12\x29\x8a\x3d\x3d\x90\xfc\x94\xfe\x66\x28\x1c\x1b\xa4\x89"
+ "\x47\x66\x4f\xac\x14\x00\x22\x2d\x5c\x03\xea\x71\x4d\x19\x7d\xd6"
+ "\x58\x39\x4c\x3d\x06\x2b\x30\xa6\xdc\x2c\x8d\xd1\xde\x79\x77\xfa"
+ "\x9c\x6b\x72\x11\x8a\x7f\x7d\x37\x28\x2a\x88\xbf\x0a\xdb\xac\x3b"
+ "\xc5\xa5\xd5\x7e\x25\xec\xa6\x7f\x5b\x53\x75\x83\x49\xd4\x77\xcc"
+ "\x7d\x7e\xd3\x3d\x30\x2c\x98\x3f\x18\x9a\x11\x8a\x37\xda\x99\x0f"
+ "\x3b\x06\xe1\x87\xd5\xe9\x4e\xe0\x9c\x0e\x39\x34\xe2\xdd\xf6\x58"
+ "\x60\x63\xa6\xea\xe8\xc0\xb4\xde\xdf\xa0\xbc\x21\xc3\x2d\xf4\xa4"
+ "\xc8\x6f\x62\x6c\x0f\x71\x88\xf9\xda\x2d\x30\xd5\x95\xe1\xfc\x6d"
+ "\x88\xc5\xc3\x95\x51\x83\xde\x41\x46\x6f\x7e\x1b\x10\x48\xad\x2b"
+ "\x82\x88\xa2\x6f\x57\x4d\x4a\xbd\x90\xc8\x06\x8f\x52\x5d\x6e\xee"
+ "\x09\xe6\xa3\xcb\x30\x9c\x14\xf6\xac\x66\x9b\x81\x0a\x75\x42\x6b"
+ "\xab\x27\xec\x76\xfb\x8d\xc5\xbf\x0e\x93\x81\x7b\x81\xd4\x85\xa6"
+ "\x90\x5a\xa6\xa2\x8b\xa9\xb7\x34\xe6\x15\x36\x93\x8b\xe2\x99\xc7"
+ "\xad\x66\x7e\xd6\x89\xa9\xc8\x15\xcb\xc5\xeb\x06\x85\xd4\x2f\x6e"
+ "\x9b\x95\x7a\x06\x6c\xfa\x31\x1d\xc4\xe5\x7d\xfb\x10\x35\x88\xc2"
+ "\xbe\x1c\x16\x5d\xc2\xf4\x0d\xf3\xc9\x94\xb2\x7e\xa7\xbd\x9c\x03"
+ "\x32\xaf\x8b\x1a\xc8\xcc\x82\xd8\x87\x96\x6e\x3d\xcc\x93\xd2\x43"
+ "\x73\xf9\xde\xec\x49\x49\xf4\x56\x2a\xc8\x6e\x32\x70\x48\xf8\x70"
+ "\xa3\x96\x31\xf4\xf2\x08\xc5\x12\xd2\xeb\xb6\xea\xa3\x07\x05\x61"
+ "\x74\xa3\x04\x2f\x17\x82\x40\x5e\x4c\xd1\x51\xb8\x10\x5b\xc8\x9f"
+ "\x87\x73\x80\x0d\x6f\xc6\xb9\xf6\x7c\x31\x0a\xcc\xd9\x03\x0f\x7a"
+ "\x47\x69\xb1\x55\xab\xe9\xb5\x75\x62\x9e\x95\xbe\x7b\xa9\x53\x6e"
+ "\x28\x73\xdc\xb3\xa4\x8a\x1c\x91\xf5\x8a\xf9\x32\x2b\xbd\xa5\xdc"
+ "\x07\xb5\xaf\x49\xdb\x9c\x35\xc9\x69\xde\xac\xb1\xd0\x86\xcb\x31"
+ "\x0b\xc4\x4f\x63\x4e\x70\xa7\x80\xe3\xbc\x0b\x73\x0e\xf2\x8c\x87"
+ "\x88\x7b\xa9\x6d\xde\x8a\x73\x14\xb9\x80\x55\x03\x2b\x29\x64\x6a"
+ "\xda\x48\x0e\x78\x07\x40\x48\x46\x58\xa9\x4e\x68\x1d\xd1\xc1\xc8"
+ "\x3b\x35\x53\x61\xd5\xe3\x0d\x4c\x42\x74\x10\x67\x85\x9f\x66\x2a"
+ "\xf7\x2b\x7b\x77\x8b\x6e\xda\x2c\xc1\x5a\x20\x34\x3f\xf5\x8b\x6f"
+ "\xe4\x61\xf5\x58\xab\x72\x1a\xf1\x8d\x28\xcc\xa5\x30\x68\xb5\x50"
+ "\x7b\x81\x43\x89\x8e\xa9\xac\x63\x3a\x4a\x78\x7b\xd2\x45\xe6\xe0"
+ "\xdc\x5d\xf2\x1a\x2b\x54\x50\xa5\x9d\xf6\xe7\x9f\x25\xaf\x56\x6a"
+ "\x84\x2a\x75\xa3\x9a\xc7\xfa\x94\xec\x83\xab\xa5\xaa\xe1\xf9\x89"
+ "\x29\xa9\xf6\x53\x24\x24\xae\x4a\xe8\xbc\xe8\x9e\x5c\xd7\x54\x7c"
+ "\x65\x20\x97\x28\x94\x76\xf9\x9e\x81\xcf\x98\x6a\x3a\x7b\xec\xf3"
+ "\x09\x60\x2e\x43\x18\xb5\xf6\x8c\x44\x0f\xf2\x0a\x17\x5b\xac\x98"
+ "\x30\xab\x6e\xd5\xb3\xef\x25\x68\x50\xb6\xe1\xc0\xe4\x5a\x63\x43"
+ "\xea\xca\xda\x23\xc1\xc2\xe9\x30\xec\xb3\x9f\xbf\x1f\x09\x76\xaf"
+ "\x65\xbc\xb5\xab\x30\xac\x0b\x05\xef\x5c\xa3\x65\x77\x33\x1c\xc5"
+ "\xdf\xc9\x39\xab\xca\xf4\x3b\x88\x25\x6d\x50\x87\xb1\x79\xc2\x23"
+ "\x9d\xb5\x21\x01\xaa\xa3\xb7\x61\xa3\x48\x91\x72\x3d\x54\x85\x86"
+ "\x91\x81\x35\x78\xbf\x8f\x27\x57\xcb\x9b\x34\xab\x63\x40\xf1\xbc"
+ "\x23\x5a\x26\x6a\xba\x57\xe2\x8f\x2a\xdc\x82\xe0\x3b\x7f\xec\xd3"
+ "\xd8\x9d\xd3\x13\x54\x70\x64\xc3\xfd\xbf\xa3\x46\xa7\x53\x42\x7f"
+ "\xc1\xbd\x7b\xb3\x13\x47\x2a\x45\x1e\x76\x2c\x0d\x6d\x46\x26\x24"
+ "\xa8\xc7\x00\x2b\x10\x7f\x2a\x6c\xfc\x68\x4e\x6e\x85\x53\x00\xaf"
+ "\xd5\xfb\x59\x64\xc7\x9b\x24\xd1\x05\xdc\x34\x53\x6d\x27\xa9\x79"
+ "\xff\xd7\x5e\x7a\x40\x81\x8e\xc3\xf2\x38\xc9\x8d\x87\xb5\x38\xda"
+ "\x43\x64\x1b\x59\x62\x88\xc1\x6e\x85\x84\x33\xcd\x6d\x7b\x62\x1d"
+ "\x60\xf9\x98\xf7\xd1\xb1\xd4\xbe\x56\x6e\xa8\x6f\xff\xe7\x8b\x60"
+ "\x53\x80\xc7\x7c\xe0\x78\x89\xa9\xab\x42\x8f\x8e\x4d\x92\xac\xa7"
+ "\xfd\x47\x11\xc7\xdb\x7c\x77\xfb\xa4\x1d\x70\xaf\x56\x14\x52\xb0",
+ .expected_a_public =
+ "\xa1\x6c\x9e\xda\x45\x4d\xf6\x59\x04\x00\xc1\xc6\x8b\x12\x3b\xcd"
+ "\x07\xe4\x3e\xec\xac\x9b\xfc\xf7\x6d\x73\x39\x9e\x52\xf8\xbe\x33"
+ "\xe2\xca\xea\x99\x76\xc7\xc9\x94\x5c\xf3\x1b\xea\x6b\x66\x4b\x51"
+ "\x90\xf6\x4f\x75\xd5\x85\xf4\x28\xfd\x74\xa5\x57\xb1\x71\x0c\xb6"
+ "\xb6\x95\x70\x2d\xfa\x4b\x56\xe0\x56\x10\x21\xe5\x60\xa6\x18\xa4"
+ "\x78\x8c\x07\xc0\x2b\x59\x9c\x84\x5b\xe9\xb9\x74\xbf\xbc\x65\x48"
+ "\x27\x82\x40\x53\x46\x32\xa2\x92\x91\x9d\xf6\xd1\x07\x0e\x1d\x07"
+ "\x1b\x41\x04\xb1\xd4\xce\xae\x6e\x46\xf1\x72\x50\x7f\xff\xa8\xa2"
+ "\xbc\x3a\xc1\xbb\x28\xd7\x7d\xcd\x7a\x22\x01\xaf\x57\xb0\xa9\x02"
+ "\xd4\x8a\x92\xd5\xe6\x8e\x6f\x11\x39\xfe\x36\x87\x89\x42\x25\x42"
+ "\xd9\xbe\x67\x15\xe1\x82\x8a\x5e\x98\xc2\xd5\xde\x9e\x13\x1a\xe7"
+ "\xf9\x9f\x8e\x2d\x49\xdc\x4d\x98\x8c\xdd\xfd\x24\x7c\x46\xa9\x69"
+ "\x3b\x31\xb3\x12\xce\x54\xf6\x65\x75\x40\xc2\xf1\x04\x92\xe3\x83"
+ "\xeb\x02\x3d\x79\xc0\xf9\x7c\x28\xb3\x97\x03\xf7\x61\x1c\xce\x95"
+ "\x1a\xa0\xb3\x77\x1b\xc1\x9f\xf8\xf6\x3f\x4d\x0a\xfb\xfa\x64\x1c"
+ "\xcb\x37\x5b\xc3\x28\x60\x9f\xd1\xf2\xc4\xee\x77\xaa\x1f\xe9\xa2"
+ "\x89\x4c\xc6\xb7\xb3\xe4\xa5\xed\xa7\xe8\xac\x90\xdc\xc3\xfb\x56"
+ "\x9c\xda\x2c\x1d\x1a\x9a\x8c\x82\x92\xee\xdc\xa0\xa4\x01\x6e\x7f"
+ "\xc7\x0e\xc2\x73\x7d\xa6\xac\x12\x01\xc0\xc0\xc8\x7c\x84\x86\xc7"
+ "\xa5\x94\xe5\x33\x84\x71\x6e\x36\xe3\x3b\x81\x30\xe0\xc8\x51\x52"
+ "\x2b\x9e\x68\xa2\x6e\x09\x95\x8c\x7f\x78\x82\xbd\x53\x26\xe7\x95"
+ "\xe0\x03\xda\xc0\xc3\x6e\xcf\xdc\xb3\x14\xfc\xe9\x5b\x9b\x70\x6c"
+ "\x93\x04\xab\x13\xf7\x17\x6d\xee\xad\x32\x48\xe9\xa0\x94\x1b\x14"
+ "\x64\x4f\xa1\xb3\x8d\x6a\xca\x28\xfe\x4a\xf4\xf0\xc5\xb7\xf9\x8a"
+ "\x8e\xff\xfe\x57\x6f\x20\xdb\x04\xab\x02\x31\x22\x42\xfd\xbd\x77"
+ "\xea\xce\xe8\xc7\x5d\xe0\x8e\xd6\x66\xd0\xe4\x04\x2f\x5f\x71\xc7"
+ "\x61\x2d\xa5\x3f\x2f\x46\xf2\xd8\x5b\x25\x82\xf0\x52\x88\xc0\x59"
+ "\xd3\xa3\x90\x17\xc2\x04\x13\xc3\x13\x69\x4f\x17\xb1\xb3\x46\x4f"
+ "\xa7\xe6\x8b\x5e\x3e\x95\x0e\xf5\x42\x17\x7f\x4d\x1f\x1b\x7d\x65"
+ "\x86\xc5\xc8\xae\xae\xd8\x4f\xe7\x89\x41\x69\xfd\x06\xce\x5d\xed"
+ "\x44\x55\xad\x51\x98\x15\x78\x8d\x68\xfc\x93\x72\x9d\x22\xe5\x1d"
+ "\x21\xc3\xbe\x3a\x44\x34\xc0\xa3\x1f\xca\xdf\x45\xd0\x5c\xcd\xb7"
+ "\x72\xeb\xae\x7a\xad\x3f\x05\xa0\xe3\x6e\x5a\xd8\x52\xa7\xf1\x1e"
+ "\xb4\xf2\xcf\xe7\xdf\xa7\xf2\x22\x00\xb2\xc4\x17\x3d\x2c\x15\x04"
+ "\x71\x28\x69\x5c\x69\x21\xc8\xf1\x9b\xd8\xc7\xbc\x27\xa3\x85\xe9"
+ "\x53\x77\xd3\x65\xc3\x86\xdd\xb3\x76\x13\xfb\xa1\xd4\xee\x9d\xe4"
+ "\x51\x3f\x83\x59\xe4\x47\xa8\xa6\x0d\x68\xd5\xf6\xf4\xca\x31\xcd"
+ "\x30\x48\x34\x90\x11\x8e\x87\xe9\xea\xc9\xd0\xc3\xba\x28\xf9\xc0"
+ "\xc9\x8e\x23\xe5\xc2\xee\xf2\x47\x9c\x41\x1c\x10\x33\x27\x23\x49"
+ "\xe5\x0d\x18\xbe\x19\xc1\xba\x6c\xdc\xb7\xa1\xe7\xc5\x0d\x6f\xf0"
+ "\x8c\x62\x6e\x0d\x14\xef\xef\xf2\x8e\x01\xd2\x76\xf5\xc1\xe1\x92"
+ "\x3c\xb3\x76\xcd\xd8\xdd\x9b\xe0\x8e\xdc\x24\x34\x13\x65\x0f\x11"
+ "\xaf\x99\x7a\x2f\xe6\x1f\x7d\x17\x3e\x8a\x68\x9a\x37\xc8\x8d\x3e"
+ "\xa3\xfe\xfe\x57\x22\xe6\x0e\x50\xb5\x98\x0b\x71\xd8\x01\xa2\x8d"
+ "\x51\x96\x50\xc2\x41\x31\xd8\x23\x98\xfc\xd1\x9d\x7e\x27\xbb\x69"
+ "\x78\xe0\x87\xf7\xe4\xdd\x58\x13\x9d\xec\x00\xe4\xb9\x70\xa2\x94"
+ "\x5d\x52\x4e\xf2\x5c\xd1\xbc\xfd\xee\x9b\xb9\xe5\xc4\xc0\xa8\x77"
+ "\x67\xa4\xd1\x95\x34\xe4\x6d\x5f\x25\x02\x8d\x65\xdd\x11\x63\x55"
+ "\x04\x01\x21\x60\xc1\x5c\xef\x77\x33\x01\x1c\xa2\x11\x2b\xdd\x2b"
+ "\x74\x99\x23\x38\x05\x1b\x7e\x2e\x01\x52\xfe\x9c\x23\xde\x3e\x1a"
+ "\x72\xf4\xff\x7b\x02\xaa\x08\xcf\xe0\x5b\x83\xbe\x85\x5a\xe8\x9d"
+ "\x11\x3e\xff\x2f\xc6\x97\x67\x36\x6c\x0f\x81\x9c\x26\x29\xb1\x0f"
+ "\xbb\x53\xbd\xf4\xec\x2a\x84\x41\x28\x3b\x86\x40\x95\x69\x55\x5f"
+ "\x30\xee\xda\x1e\x6c\x4b\x25\xd6\x2f\x2c\x0e\x3c\x1a\x26\xa0\x3e"
+ "\xef\x09\xc6\x2b\xe5\xa1\x0c\x03\xa8\xf5\x39\x70\x31\xc4\x32\x79"
+ "\xd1\xd9\xc2\xcc\x32\x4a\xf1\x2f\x57\x5a\xcc\xe5\xc3\xc5\xd5\x4e"
+ "\x86\x56\xca\x64\xdb\xab\x61\x85\x8f\xf9\x20\x02\x40\x66\x76\x9e"
+ "\x5e\xd4\xac\xf0\x47\xa6\x50\x5f\xc2\xaf\x55\x9b\xa3\xc9\x8b\xf8"
+ "\x42\xd5\xcf\x1a\x95\x22\xd9\xd1\x0b\x92\x51\xca\xde\x46\x02\x0d"
+ "\x8b\xee\xd9\xa0\x04\x74\xf5\x0e\xb0\x3a\x62\xec\x3c\x91\x29\x33"
+ "\xa7\x78\x22\x92\xac\x27\xe6\x2d\x6f\x56\x8a\x5d\x72\xc2\xf1\x5c"
+ "\x54\x11\x97\x24\x61\xcb\x0c\x52\xd4\x57\x56\x22\x86\xf0\x19\x27"
+ "\x76\x30\x04\xf4\x39\x7b\x1a\x5a\x04\x0d\xec\x59\x9a\x31\x4c\x40"
+ "\x19\x6d\x3c\x41\x1b\x0c\xca\xeb\x25\x39\x6c\x96\xf8\x55\xd0\xec",
+ .expected_ss =
+ "\xf9\x55\x4f\x48\x38\x74\xb7\x46\xa3\xc4\x2e\x88\xf0\x34\xab\x1d"
+ "\xcd\xa5\x58\xa7\x95\x88\x36\x62\x6f\x8a\xbd\xf2\xfb\x6f\x3e\xb9"
+ "\x91\x65\x58\xef\x70\x2f\xd5\xc2\x97\x70\xcb\xce\x8b\x78\x1c\xe0"
+ "\xb9\xfa\x77\x34\xd2\x4a\x19\x58\x11\xfd\x93\x84\x40\xc0\x8c\x19"
+ "\x8b\x98\x50\x83\xba\xfb\xe2\xad\x8b\x81\x84\x63\x90\x41\x4b\xf8"
+ "\xe8\x78\x86\x04\x09\x8d\x84\xd1\x43\xfd\xa3\x58\x21\x2a\x3b\xb1"
+ "\xa2\x5b\x48\x74\x3c\xa9\x16\x34\x28\xf0\x8e\xde\xe2\xcf\x8e\x68"
+ "\x53\xab\x65\x06\xb7\x86\xb1\x08\x4f\x73\x97\x00\x10\x95\xd1\x84"
+ "\x72\xcf\x14\xdb\xff\xa7\x80\xd8\xe5\xf2\x2c\x89\x37\xb0\x81\x2c"
+ "\xf5\xd6\x7d\x1b\xb0\xe2\x8e\x87\x32\x3d\x37\x6a\x79\xaa\xe7\x08"
+ "\xc9\x67\x55\x5f\x1c\xae\xa6\xf5\xef\x79\x3a\xaf\x3f\x82\x14\xe2"
+ "\xf3\x69\x91\xed\xb7\x9e\xc9\xde\xd0\x29\x70\xd9\xeb\x0f\xf5\xc7"
+ "\xf6\x7c\xa7\x7f\xec\xed\xe1\xbd\x13\xe1\x43\xe4\x42\x30\xe3\x5f"
+ "\xe0\xf3\x15\x55\x2f\x7a\x42\x17\x67\xcb\xc2\x4f\xd0\x85\xfc\x6c"
+ "\xec\xe8\xfc\x25\x78\x4b\xe4\x0f\xd4\x3d\x78\x28\xd3\x53\x79\xcb"
+ "\x2c\x82\x67\x9a\xdc\x32\x55\xd2\xda\xae\xd8\x61\xce\xd6\x59\x0b"
+ "\xc5\x44\xeb\x08\x81\x8c\x65\xb2\xb7\xa6\xff\xf7\xbf\x99\xc6\x8a"
+ "\xbe\xde\xc2\x17\x56\x05\x6e\xd2\xf1\x1e\xa2\x04\xeb\x02\x74\xaa"
+ "\x04\xfc\xf0\x6b\xd4\xfc\xf0\x7a\x5f\xfe\xe2\x74\x7f\xeb\x9b\x6a"
+ "\x8a\x09\x96\x5d\xe1\x91\xb6\x9e\x37\xd7\x63\xd7\xb3\x5c\xb5\xa3"
+ "\x5f\x62\x00\xdf\xc5\xbf\x85\xba\xa7\xa9\xb6\x1f\x76\x78\x65\x01"
+ "\xfe\x1d\x6c\xfe\x15\x9e\xf4\xb1\xbc\x8d\xad\x3c\xec\x69\x27\x57"
+ "\xa4\x89\x77\x46\xe1\x49\xc7\x22\xde\x79\xe0\xf7\x3a\xa1\x59\x8b"
+ "\x59\x71\xcc\xd6\x18\x24\xc1\x8a\x2f\xe3\xdf\xdd\x6c\xf7\x62\xaa"
+ "\x15\xaa\x39\x37\x3b\xaf\x7d\x6e\x88\xeb\x19\xa8\xa0\x26\xd3\xaa"
+ "\x2d\xcc\x5f\x56\x99\x86\xa9\xed\x4d\x02\x31\x40\x97\x70\x83\xa7"
+ "\x08\x98\x7e\x49\x46\xd9\x75\xb5\x7a\x6a\x40\x69\xa0\x6d\xb2\x18"
+ "\xc0\xad\x88\x05\x02\x95\x6f\xf7\x8f\xcb\xa2\xe4\x7b\xab\x4a\x0f"
+ "\x9a\x1b\xef\xcc\xd1\x6a\x5d\x1e\x6a\x2a\x8b\x5b\x80\xbc\x5f\x38"
+ "\xdd\xaf\xad\x44\x15\xb4\xaf\x26\x1c\x1a\x4d\xa7\x4b\xec\x88\x33"
+ "\x24\x42\xb5\x0c\x9c\x56\xd4\xba\xa7\xb9\x65\xd5\x76\xb2\xbc\x16"
+ "\x8e\xfa\x0c\x7a\xc0\xa2\x2c\x5a\x39\x56\x7d\xe6\xf8\xa9\xf4\x49"
+ "\xd0\x50\xf2\x5e\x4b\x0a\x43\xe4\x9a\xbb\xea\x35\x28\x99\x84\x83"
+ "\xec\xc1\xa0\x68\x15\x9a\x2b\x01\x04\x48\x09\x11\x1b\xb6\xa4\xd8"
+ "\x03\xad\xb6\x4c\x9e\x1d\x90\xae\x88\x0f\x75\x95\x25\xa0\x27\x13"
+ "\xb7\x4f\xe2\x3e\xd5\x59\x1a\x7c\xde\x95\x14\x28\xd1\xde\x84\xe4"
+ "\x07\x7c\x5b\x06\xd6\xe6\x9c\x8a\xbe\xd2\xb4\x62\xd1\x67\x8a\x9c"
+ "\xac\x4f\xfa\x70\xd6\xc8\xc0\xeb\x5e\xf6\x3e\xdc\x48\x8e\xce\x3f"
+ "\x92\x3e\x60\x77\x63\x60\x6b\x76\x04\xa5\xba\xc9\xab\x92\x4e\x0d"
+ "\xdc\xca\x82\x44\x5f\x3a\x42\xeb\x01\xe7\xe0\x33\xb3\x32\xaf\x4b"
+ "\x81\x35\x2d\xb6\x57\x15\xfe\x52\xc7\x54\x2e\x41\x3b\x22\x6b\x12"
+ "\x72\xdb\x5c\x66\xd0\xb6\xb4\xfe\x90\xc0\x20\x34\x95\xf9\xe4\xc7"
+ "\x7e\x71\x89\x4f\x6f\xfb\x2a\xf3\xdf\x3f\xe3\xcf\x0e\x1a\xd9\xf2"
+ "\xc1\x02\x67\x5d\xdc\xf1\x7d\xe8\xcf\x64\x77\x4d\x12\x03\x77\x2c"
+ "\xfb\xe1\x59\xf7\x2c\x96\x9c\xaf\x46\x9c\xc7\x67\xcf\xee\x94\x50"
+ "\xc7\xa1\x23\xe6\x9f\x4d\x73\x92\xad\xf9\x4a\xce\xdb\x44\xd5\xe3"
+ "\x17\x05\x37\xdb\x9c\x6c\xc5\x7e\xb7\xd4\x11\x4a\x8c\x51\x03\xaa"
+ "\x73\x4b\x16\xd9\x79\xf5\xf1\x67\x20\x9b\x25\xe5\x41\x52\x59\x06"
+ "\x8b\xf2\x23\x2f\x6e\xea\xf3\x24\x0a\x94\xbb\xb8\x7e\xd9\x23\x4a"
+ "\x9f\x1f\xe1\x13\xb5\xfe\x85\x2f\x4c\xbe\x6a\x66\x02\x1d\x90\xd2"
+ "\x01\x25\x8a\xfd\x78\x3a\x28\xb8\x18\xc1\x38\x16\x21\x6b\xb4\xf9"
+ "\x64\x0f\xf1\x73\xc4\x5c\xd1\x41\xf2\xfe\xe7\x26\xad\x79\x12\x75"
+ "\x49\x48\xdb\x21\x71\x35\xf7\xb7\x46\x5a\xa1\x81\x25\x47\x31\xea"
+ "\x1d\x76\xbb\x32\x5a\x90\xb0\x42\x1a\x47\xe8\x0c\x82\x92\x43\x1c"
+ "\x0b\xdd\xe5\x25\xce\xd3\x06\xcc\x59\x5a\xc9\xa0\x01\xac\x29\x12"
+ "\x31\x2e\x3d\x1a\xed\x3b\xf3\xa7\xef\x52\xc2\x0d\x18\x1f\x03\x28"
+ "\xc9\x2b\x38\x61\xa4\x01\xc9\x3c\x11\x08\x14\xd4\xe5\x31\xe9\x3c"
+ "\x1d\xad\xf8\x76\xc4\x84\x9f\xea\x16\x61\x3d\x6d\xa3\x32\x31\xcd"
+ "\x1c\xca\xb8\x74\xc2\x45\xf3\x01\x9c\x7a\xaf\xfd\xe7\x1e\x5a\x18"
+ "\xb1\x9d\xbb\x7a\x2d\x34\x40\x17\x49\xad\x1f\xeb\x2d\xa2\x26\xb8"
+ "\x16\x28\x4b\x72\xdd\xd0\x8d\x85\x4c\xdd\xf8\x57\x48\xd5\x1d\xfb"
+ "\xbd\xec\x11\x5d\x1e\x9c\x26\x81\xbf\xf1\x16\x12\x32\xc3\xf3\x07"
+ "\x0e\x6e\x7f\x17\xec\xfb\xf4\x5d\xe2\xb1\xca\x97\xca\x46\x20\x2d"
+ "\x09\x85\x19\x25\x89\xa8\x9b\x51\x74\xae\xc9\x1b\x4c\xb6\x80\x62",
+ .secret_size = 1040,
+ .b_public_size = 1024,
+ .expected_a_public_size = 1024,
+ .expected_ss_size = 1024,
+ },
+ {
+ .secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x00" /* len */
+ "\x00\x00\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00", /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x00\x10" /* len */
+ "\x00\x00\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00", /* g_size */
+#endif
+ .b_secret =
+#ifdef __LITTLE_ENDIAN
+ "\x01\x00" /* type */
+ "\x10\x04" /* len */
+ "\x00\x04\x00\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#else
+ "\x00\x01" /* type */
+ "\x04\x10" /* len */
+ "\x00\x00\x04\x00" /* key_size */
+ "\x00\x00\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* g_size */
+#endif
+ /* xa */
+ "\x76\x6e\xeb\xf9\xeb\x76\xae\x37\xcb\x19\x49\x8b\xeb\xaf\xb0\x4b"
+ "\x6d\xe9\x15\xad\xda\xf2\xef\x58\xe9\xd6\xdd\x4c\xb3\x56\xd0\x3b"
+ "\x00\xb0\x65\xed\xae\xe0\x2e\xdf\x8f\x45\x3f\x3c\x5d\x2f\xfa\x96"
+ "\x36\x33\xb2\x01\x8b\x0f\xe8\x46\x15\x6d\x60\x5b\xec\x32\xc3\x3b"
+ "\x06\xf3\xb4\x1b\x9a\xef\x3c\x03\x0e\xcc\xce\x1d\x24\xa0\xc9\x08"
+ "\x65\xf9\x45\xe5\xd2\x43\x08\x88\x58\xd6\x46\xe7\xbb\x25\xac\xed"
+ "\x3b\xac\x6f\x5e\xfb\xd6\x19\xa6\x20\x3a\x1d\x0c\xe8\x00\x72\x54"
+ "\xd7\xd9\xc9\x26\x49\x18\xc6\xb8\xbc\xdd\xf3\xce\xf3\x7b\x69\x04"
+ "\x5c\x6f\x11\xdb\x44\x42\x72\xb6\xb7\x84\x17\x86\x47\x3f\xc5\xa1"
+ "\xd8\x86\xef\xe2\x27\x49\x2b\x8f\x3e\x91\x12\xd9\x45\x96\xf7\xe6"
+ "\x77\x76\x36\x58\x71\x9a\xb1\xdb\xcf\x24\x9e\x7e\xad\xce\x45\xba"
+ "\xb5\xec\x8e\xb9\xd6\x7b\x3d\x76\xa4\x85\xad\xd8\x49\x9b\x80\x9d"
+ "\x7f\x9f\x85\x09\x9e\x86\x5b\x6b\xf3\x8d\x39\x5e\x6f\xe4\x30\xc8"
+ "\xa5\xf3\xdf\x68\x73\x6b\x2e\x9a\xcb\xac\x0a\x0d\x44\xc1\xaf\xb2"
+ "\x11\x1b\x7c\x43\x08\x44\x43\xe2\x4e\xfd\x93\x30\x99\x09\x12\xbb"
+ "\xf6\x31\x34\xa5\x3d\x45\x98\xee\xd7\x2a\x1a\x89\xf5\x37\x92\x33"
+ "\xa0\xdd\xf5\xfb\x1f\x90\x42\x55\x5a\x0b\x82\xff\xf0\x96\x92\x15"
+ "\x65\x5a\x55\x96\xca\x1b\xd5\xe5\xb5\x94\xde\x2e\xa6\x03\x57\x9e"
+ "\x15\xe4\x32\x2b\x1f\xb2\x22\x21\xe9\xa0\x05\xd3\x65\x6c\x11\x66"
+ "\x25\x38\xbb\xa3\x6c\xc2\x0b\x2b\xd0\x7a\x20\x26\x29\x37\x5d\x5f"
+ "\xd8\xff\x2a\xcd\x46\x6c\xd6\x6e\xe5\x77\x1a\xe6\x33\xf1\x8e\xc8"
+ "\x10\x30\x11\x00\x27\xf9\x7d\x0e\x28\x43\xa7\x67\x38\x7f\x16\xda"
+ "\xd0\x01\x8e\xa4\xe8\x6f\xcd\x23\xaf\x77\x52\x34\xad\x7e\xc3\xed"
+ "\x2d\x10\x0a\x33\xdc\xcf\x1b\x88\x0f\xcc\x48\x7f\x42\xf0\x9e\x13"
+ "\x1f\xf5\xd1\xe9\x90\x87\xbd\xfa\x5f\x1d\x77\x55\xcb\xc3\x05\xaf"
+ "\x71\xd0\xe0\xab\x46\x31\xd7\xea\x89\x54\x2d\x39\xaf\xf6\x4f\x74"
+ "\xaf\x46\x58\x89\x78\x95\x2e\xe6\x90\xb7\xaa\x00\x73\x9f\xed\xb9"
+ "\x00\xd6\xf6\x6d\x26\x59\xcd\x56\xdb\xf7\x3d\x5f\xeb\x6e\x46\x33"
+ "\xb1\x23\xed\x9f\x8d\x58\xdc\xb4\x28\x3b\x90\x09\xc4\x61\x02\x1f"
+ "\xf8\x62\xf2\x6e\xc1\x94\x71\x66\x93\x11\xdf\xaa\x3e\xd7\xb5\xe5"
+ "\xc1\x78\xe9\x14\xcd\x55\x16\x51\xdf\x8d\xd0\x94\x8c\x43\xe9\xb8"
+ "\x1d\x42\x7f\x76\xbc\x6f\x87\x42\x88\xde\xd7\x52\x78\x00\x4f\x18"
+ "\x02\xe7\x7b\xe2\x8a\xc3\xd1\x43\xa5\xac\xda\xb0\x8d\x19\x96\xd4"
+ "\x81\xe0\x75\xe9\xca\x41\x7e\x1f\x93\x0b\x26\x24\xb3\xaa\xdd\x10"
+ "\x20\xd3\xf2\x9f\x3f\xdf\x65\xde\x67\x79\xdc\x76\x9f\x3c\x72\x75"
+ "\x65\x8a\x30\xcc\xd2\xcc\x06\xb1\xab\x62\x86\x78\x5d\xb8\xce\x72"
+ "\xb3\x12\xc7\x9f\x07\xd0\x6b\x98\x82\x9b\x6c\xbb\x15\xe5\xcc\xf4"
+ "\xc8\xf4\x60\x81\xdc\xd3\x09\x1b\x5e\xd4\xf3\x55\xcf\x1c\x16\x83"
+ "\x61\xb4\x2e\xcc\x08\x67\x58\xfd\x46\x64\xbc\x29\x4b\xdd\xda\xec"
+ "\xdc\xc6\xa9\xa5\x73\xfb\xf8\xf3\xaf\x89\xa8\x9e\x25\x14\xfa\xac"
+ "\xeb\x1c\x7c\x80\x96\x66\x4d\x41\x67\x9b\x07\x4f\x0a\x97\x17\x1c"
+ "\x4d\x61\xc7\x2e\x6f\x36\x98\x29\x50\x39\x6d\xe7\x70\xda\xf0\xc8"
+ "\x05\x80\x7b\x32\xff\xfd\x12\xde\x61\x0d\xf9\x4c\x21\xf1\x56\x72"
+ "\x3d\x61\x46\xc0\x2d\x07\xd1\x6c\xd3\xbe\x9a\x21\x83\x85\xf7\xed"
+ "\x53\x95\x44\x40\x8f\x75\x12\x18\xc2\x9a\xfd\x5e\xce\x66\xa6\x7f"
+ "\x57\xc0\xd7\x73\x76\xb3\x13\xda\x2e\x58\xc6\x27\x40\xb2\x2d\xef"
+ "\x7d\x72\xb4\xa8\x75\x6f\xcc\x5f\x42\x3e\x2c\x90\x36\x59\xa0\x34"
+ "\xaa\xce\xbc\x04\x4c\xe6\x56\xc2\xcd\xa6\x1c\x59\x04\x56\x53\xcf"
+ "\x6d\xd7\xf0\xb1\x4f\x91\xfa\x84\xcf\x4b\x8d\x50\x4c\xf8\x2a\x31"
+ "\x5f\xe3\xba\x79\xb4\xcc\x59\x64\xe3\x7a\xfa\xf6\x06\x9d\x04\xbb"
+ "\xce\x61\xbf\x9e\x59\x0a\x09\x51\x6a\xbb\x0b\x80\xe0\x91\xc1\x51"
+ "\x04\x58\x67\x67\x4b\x42\x4f\x95\x68\x75\xe2\x1f\x9c\x14\x70\xfd"
+ "\x3a\x8a\xce\x8b\x04\xa1\x89\xe7\xb4\xbf\x70\xfe\xf3\x0c\x48\x04"
+ "\x3a\xd2\x85\x68\x03\xe7\xfa\xec\x5b\x55\xb7\x95\xfd\x5b\x19\x35"
+ "\xad\xcb\x4a\x63\x03\x44\x64\x2a\x48\x59\x9a\x26\x43\x96\x8c\xe6"
+ "\xbd\xb7\x90\xd4\x5f\x8d\x08\x28\xa8\xc5\x89\x70\xb9\x6e\xd3\x3b"
+ "\x76\x0e\x37\x98\x15\x27\xca\xc9\xb0\xe0\xfd\xf3\xc6\xdf\x69\xce"
+ "\xe1\x5f\x6a\x3e\x5c\x86\xe2\x58\x41\x11\xf0\x7e\x56\xec\xe4\xc9"
+ "\x0d\x87\x91\xfb\xb9\xc8\x0d\x34\xab\xb0\xc6\xf2\xa6\x00\x7b\x18"
+ "\x92\xf4\x43\x7f\x01\x85\x2e\xef\x8c\x72\x50\x10\xdb\xf1\x37\x62"
+ "\x16\x85\x71\x01\xa8\x2b\xf0\x13\xd3\x7c\x0b\xaf\xf1\xf3\xd1\xee"
+ "\x90\x41\x5f\x7d\x5b\xa9\x83\x4b\xfa\x80\x59\x50\x73\xe1\xc4\xf9"
+ "\x5e\x4b\xde\xd9\xf5\x22\x68\x5e\x65\xd9\x37\xe4\x1a\x08\x0e\xb1"
+ "\x28\x2f\x40\x9e\x37\xa8\x12\x56\xb7\xb8\x64\x94\x68\x94\xff\x9f",
+ .b_public =
+ "\xa1\x6c\x9e\xda\x45\x4d\xf6\x59\x04\x00\xc1\xc6\x8b\x12\x3b\xcd"
+ "\x07\xe4\x3e\xec\xac\x9b\xfc\xf7\x6d\x73\x39\x9e\x52\xf8\xbe\x33"
+ "\xe2\xca\xea\x99\x76\xc7\xc9\x94\x5c\xf3\x1b\xea\x6b\x66\x4b\x51"
+ "\x90\xf6\x4f\x75\xd5\x85\xf4\x28\xfd\x74\xa5\x57\xb1\x71\x0c\xb6"
+ "\xb6\x95\x70\x2d\xfa\x4b\x56\xe0\x56\x10\x21\xe5\x60\xa6\x18\xa4"
+ "\x78\x8c\x07\xc0\x2b\x59\x9c\x84\x5b\xe9\xb9\x74\xbf\xbc\x65\x48"
+ "\x27\x82\x40\x53\x46\x32\xa2\x92\x91\x9d\xf6\xd1\x07\x0e\x1d\x07"
+ "\x1b\x41\x04\xb1\xd4\xce\xae\x6e\x46\xf1\x72\x50\x7f\xff\xa8\xa2"
+ "\xbc\x3a\xc1\xbb\x28\xd7\x7d\xcd\x7a\x22\x01\xaf\x57\xb0\xa9\x02"
+ "\xd4\x8a\x92\xd5\xe6\x8e\x6f\x11\x39\xfe\x36\x87\x89\x42\x25\x42"
+ "\xd9\xbe\x67\x15\xe1\x82\x8a\x5e\x98\xc2\xd5\xde\x9e\x13\x1a\xe7"
+ "\xf9\x9f\x8e\x2d\x49\xdc\x4d\x98\x8c\xdd\xfd\x24\x7c\x46\xa9\x69"
+ "\x3b\x31\xb3\x12\xce\x54\xf6\x65\x75\x40\xc2\xf1\x04\x92\xe3\x83"
+ "\xeb\x02\x3d\x79\xc0\xf9\x7c\x28\xb3\x97\x03\xf7\x61\x1c\xce\x95"
+ "\x1a\xa0\xb3\x77\x1b\xc1\x9f\xf8\xf6\x3f\x4d\x0a\xfb\xfa\x64\x1c"
+ "\xcb\x37\x5b\xc3\x28\x60\x9f\xd1\xf2\xc4\xee\x77\xaa\x1f\xe9\xa2"
+ "\x89\x4c\xc6\xb7\xb3\xe4\xa5\xed\xa7\xe8\xac\x90\xdc\xc3\xfb\x56"
+ "\x9c\xda\x2c\x1d\x1a\x9a\x8c\x82\x92\xee\xdc\xa0\xa4\x01\x6e\x7f"
+ "\xc7\x0e\xc2\x73\x7d\xa6\xac\x12\x01\xc0\xc0\xc8\x7c\x84\x86\xc7"
+ "\xa5\x94\xe5\x33\x84\x71\x6e\x36\xe3\x3b\x81\x30\xe0\xc8\x51\x52"
+ "\x2b\x9e\x68\xa2\x6e\x09\x95\x8c\x7f\x78\x82\xbd\x53\x26\xe7\x95"
+ "\xe0\x03\xda\xc0\xc3\x6e\xcf\xdc\xb3\x14\xfc\xe9\x5b\x9b\x70\x6c"
+ "\x93\x04\xab\x13\xf7\x17\x6d\xee\xad\x32\x48\xe9\xa0\x94\x1b\x14"
+ "\x64\x4f\xa1\xb3\x8d\x6a\xca\x28\xfe\x4a\xf4\xf0\xc5\xb7\xf9\x8a"
+ "\x8e\xff\xfe\x57\x6f\x20\xdb\x04\xab\x02\x31\x22\x42\xfd\xbd\x77"
+ "\xea\xce\xe8\xc7\x5d\xe0\x8e\xd6\x66\xd0\xe4\x04\x2f\x5f\x71\xc7"
+ "\x61\x2d\xa5\x3f\x2f\x46\xf2\xd8\x5b\x25\x82\xf0\x52\x88\xc0\x59"
+ "\xd3\xa3\x90\x17\xc2\x04\x13\xc3\x13\x69\x4f\x17\xb1\xb3\x46\x4f"
+ "\xa7\xe6\x8b\x5e\x3e\x95\x0e\xf5\x42\x17\x7f\x4d\x1f\x1b\x7d\x65"
+ "\x86\xc5\xc8\xae\xae\xd8\x4f\xe7\x89\x41\x69\xfd\x06\xce\x5d\xed"
+ "\x44\x55\xad\x51\x98\x15\x78\x8d\x68\xfc\x93\x72\x9d\x22\xe5\x1d"
+ "\x21\xc3\xbe\x3a\x44\x34\xc0\xa3\x1f\xca\xdf\x45\xd0\x5c\xcd\xb7"
+ "\x72\xeb\xae\x7a\xad\x3f\x05\xa0\xe3\x6e\x5a\xd8\x52\xa7\xf1\x1e"
+ "\xb4\xf2\xcf\xe7\xdf\xa7\xf2\x22\x00\xb2\xc4\x17\x3d\x2c\x15\x04"
+ "\x71\x28\x69\x5c\x69\x21\xc8\xf1\x9b\xd8\xc7\xbc\x27\xa3\x85\xe9"
+ "\x53\x77\xd3\x65\xc3\x86\xdd\xb3\x76\x13\xfb\xa1\xd4\xee\x9d\xe4"
+ "\x51\x3f\x83\x59\xe4\x47\xa8\xa6\x0d\x68\xd5\xf6\xf4\xca\x31\xcd"
+ "\x30\x48\x34\x90\x11\x8e\x87\xe9\xea\xc9\xd0\xc3\xba\x28\xf9\xc0"
+ "\xc9\x8e\x23\xe5\xc2\xee\xf2\x47\x9c\x41\x1c\x10\x33\x27\x23\x49"
+ "\xe5\x0d\x18\xbe\x19\xc1\xba\x6c\xdc\xb7\xa1\xe7\xc5\x0d\x6f\xf0"
+ "\x8c\x62\x6e\x0d\x14\xef\xef\xf2\x8e\x01\xd2\x76\xf5\xc1\xe1\x92"
+ "\x3c\xb3\x76\xcd\xd8\xdd\x9b\xe0\x8e\xdc\x24\x34\x13\x65\x0f\x11"
+ "\xaf\x99\x7a\x2f\xe6\x1f\x7d\x17\x3e\x8a\x68\x9a\x37\xc8\x8d\x3e"
+ "\xa3\xfe\xfe\x57\x22\xe6\x0e\x50\xb5\x98\x0b\x71\xd8\x01\xa2\x8d"
+ "\x51\x96\x50\xc2\x41\x31\xd8\x23\x98\xfc\xd1\x9d\x7e\x27\xbb\x69"
+ "\x78\xe0\x87\xf7\xe4\xdd\x58\x13\x9d\xec\x00\xe4\xb9\x70\xa2\x94"
+ "\x5d\x52\x4e\xf2\x5c\xd1\xbc\xfd\xee\x9b\xb9\xe5\xc4\xc0\xa8\x77"
+ "\x67\xa4\xd1\x95\x34\xe4\x6d\x5f\x25\x02\x8d\x65\xdd\x11\x63\x55"
+ "\x04\x01\x21\x60\xc1\x5c\xef\x77\x33\x01\x1c\xa2\x11\x2b\xdd\x2b"
+ "\x74\x99\x23\x38\x05\x1b\x7e\x2e\x01\x52\xfe\x9c\x23\xde\x3e\x1a"
+ "\x72\xf4\xff\x7b\x02\xaa\x08\xcf\xe0\x5b\x83\xbe\x85\x5a\xe8\x9d"
+ "\x11\x3e\xff\x2f\xc6\x97\x67\x36\x6c\x0f\x81\x9c\x26\x29\xb1\x0f"
+ "\xbb\x53\xbd\xf4\xec\x2a\x84\x41\x28\x3b\x86\x40\x95\x69\x55\x5f"
+ "\x30\xee\xda\x1e\x6c\x4b\x25\xd6\x2f\x2c\x0e\x3c\x1a\x26\xa0\x3e"
+ "\xef\x09\xc6\x2b\xe5\xa1\x0c\x03\xa8\xf5\x39\x70\x31\xc4\x32\x79"
+ "\xd1\xd9\xc2\xcc\x32\x4a\xf1\x2f\x57\x5a\xcc\xe5\xc3\xc5\xd5\x4e"
+ "\x86\x56\xca\x64\xdb\xab\x61\x85\x8f\xf9\x20\x02\x40\x66\x76\x9e"
+ "\x5e\xd4\xac\xf0\x47\xa6\x50\x5f\xc2\xaf\x55\x9b\xa3\xc9\x8b\xf8"
+ "\x42\xd5\xcf\x1a\x95\x22\xd9\xd1\x0b\x92\x51\xca\xde\x46\x02\x0d"
+ "\x8b\xee\xd9\xa0\x04\x74\xf5\x0e\xb0\x3a\x62\xec\x3c\x91\x29\x33"
+ "\xa7\x78\x22\x92\xac\x27\xe6\x2d\x6f\x56\x8a\x5d\x72\xc2\xf1\x5c"
+ "\x54\x11\x97\x24\x61\xcb\x0c\x52\xd4\x57\x56\x22\x86\xf0\x19\x27"
+ "\x76\x30\x04\xf4\x39\x7b\x1a\x5a\x04\x0d\xec\x59\x9a\x31\x4c\x40"
+ "\x19\x6d\x3c\x41\x1b\x0c\xca\xeb\x25\x39\x6c\x96\xf8\x55\xd0\xec",
+ .secret_size = 16,
+ .b_secret_size = 1040,
+ .b_public_size = 1024,
+ .expected_a_public_size = 1024,
+ .expected_ss_size = 1024,
+ .genkey = true,
+ },
+};
+
static const struct kpp_testvec curve25519_tv_template[] = {
{
.secret = (u8[32]){ 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d,
@@ -5713,6 +7140,7 @@ static const struct hash_testvec hmac_sha1_tv_template[] = {
.psize = 28,
.digest = "\xef\xfc\xdf\x6a\xe5\xeb\x2f\xa2\xd2\x74"
"\x16\xd5\xf1\x84\xdf\x9c\x25\x9a\x7c\x79",
+ .fips_skip = 1,
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
.ksize = 20,
@@ -5802,6 +7230,7 @@ static const struct hash_testvec hmac_sha224_tv_template[] = {
"\x45\x69\x0f\x3a\x7e\x9e\x6d\x0f"
"\x8b\xbe\xa2\xa3\x9e\x61\x48\x00"
"\x8f\xd0\x5e\x44",
+ .fips_skip = 1,
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -5945,6 +7374,7 @@ static const struct hash_testvec hmac_sha256_tv_template[] = {
"\x6a\x04\x24\x26\x08\x95\x75\xc7"
"\x5a\x00\x3f\x08\x9d\x27\x39\x83"
"\x9d\xec\x58\xb9\x64\xec\x38\x43",
+ .fips_skip = 1,
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -6443,6 +7873,7 @@ static const struct hash_testvec hmac_sha384_tv_template[] = {
"\xe4\x2e\xc3\x73\x63\x22\x44\x5e"
"\x8e\x22\x40\xca\x5e\x69\xe2\xc7"
"\x8b\x32\x39\xec\xfa\xb2\x16\x49",
+ .fips_skip = 1,
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -6543,6 +7974,7 @@ static const struct hash_testvec hmac_sha512_tv_template[] = {
"\x6d\x03\x4f\x65\xf8\xf0\xe6\xfd"
"\xca\xea\xb1\xa3\x4d\x4a\x6b\x4b"
"\x63\x6e\x07\x0a\x38\xbc\xe7\x37",
+ .fips_skip = 1,
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -6638,6 +8070,7 @@ static const struct hash_testvec hmac_sha3_224_tv_template[] = {
"\x1b\x79\x86\x34\xad\x38\x68\x11"
"\xc2\xcf\xc8\x5b\xfa\xf5\xd5\x2b"
"\xba\xce\x5e\x66",
+ .fips_skip = 1,
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -6725,6 +8158,7 @@ static const struct hash_testvec hmac_sha3_256_tv_template[] = {
"\x35\x96\xbb\xb0\xda\x73\xb8\x87"
"\xc9\x17\x1f\x93\x09\x5b\x29\x4a"
"\xe8\x57\xfb\xe2\x64\x5e\x1b\xa5",
+ .fips_skip = 1,
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -6816,6 +8250,7 @@ static const struct hash_testvec hmac_sha3_384_tv_template[] = {
"\x3c\xa1\x35\x08\xa9\x32\x43\xce"
"\x48\xc0\x45\xdc\x00\x7f\x26\xa2"
"\x1b\x3f\x5e\x0e\x9d\xf4\xc2\x0a",
+ .fips_skip = 1,
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -6915,6 +8350,7 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = {
"\xee\x7a\x0c\x31\xd0\x22\xa9\x5e"
"\x1f\xc9\x2b\xa9\xd7\x7d\xf8\x83"
"\x96\x02\x75\xbe\xb4\xe6\x20\x24",
+ .fips_skip = 1,
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
diff --git a/crypto/xts.c b/crypto/xts.c
index 6c12f30dbdd6..63c85b9e64e0 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -466,3 +466,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XTS block cipher mode");
MODULE_ALIAS_CRYPTO("xts");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_SOFTDEP("pre: ecb");
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 9704963f9d50..a087156a5818 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -401,7 +401,7 @@ config HW_RANDOM_MESON
config HW_RANDOM_CAVIUM
tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && ARM64
+ depends on HW_RANDOM && PCI && ARCH_THUNDER
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index ecb71c4317a5..b8effe77d80f 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -13,13 +13,16 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/hw_random.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#define TRNG_CR 0x00
#define TRNG_MR 0x04
#define TRNG_ISR 0x1c
+#define TRNG_ISR_DATRDY BIT(0)
#define TRNG_ODATA 0x50
#define TRNG_KEY 0x524e4700 /* RNG */
@@ -34,37 +37,79 @@ struct atmel_trng {
struct clk *clk;
void __iomem *base;
struct hwrng rng;
+ bool has_half_rate;
};
+static bool atmel_trng_wait_ready(struct atmel_trng *trng, bool wait)
+{
+ int ready;
+
+ ready = readl(trng->base + TRNG_ISR) & TRNG_ISR_DATRDY;
+ if (!ready && wait)
+ readl_poll_timeout(trng->base + TRNG_ISR, ready,
+ ready & TRNG_ISR_DATRDY, 1000, 20000);
+
+ return !!ready;
+}
+
static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng);
u32 *data = buf;
+ int ret;
- /* data ready? */
- if (readl(trng->base + TRNG_ISR) & 1) {
- *data = readl(trng->base + TRNG_ODATA);
- /*
- ensure data ready is only set again AFTER the next data
- word is ready in case it got set between checking ISR
- and reading ODATA, so we don't risk re-reading the
- same word
- */
- readl(trng->base + TRNG_ISR);
- return 4;
- } else
- return 0;
+ ret = pm_runtime_get_sync((struct device *)trng->rng.priv);
+ if (ret < 0) {
+ pm_runtime_put_sync((struct device *)trng->rng.priv);
+ return ret;
+ }
+
+ ret = atmel_trng_wait_ready(trng, wait);
+ if (!ret)
+ goto out;
+
+ *data = readl(trng->base + TRNG_ODATA);
+ /*
+ * ensure data ready is only set again AFTER the next data word is ready
+ * in case it got set between checking ISR and reading ODATA, so we
+ * don't risk re-reading the same word
+ */
+ readl(trng->base + TRNG_ISR);
+ ret = 4;
+
+out:
+ pm_runtime_mark_last_busy((struct device *)trng->rng.priv);
+ pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv);
+ return ret;
}
-static void atmel_trng_enable(struct atmel_trng *trng)
+static int atmel_trng_init(struct atmel_trng *trng)
{
+ unsigned long rate;
+ int ret;
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret)
+ return ret;
+
+ if (trng->has_half_rate) {
+ rate = clk_get_rate(trng->clk);
+
+ /* if peripheral clk is above 100MHz, set HALFR */
+ if (rate > 100000000)
+ writel(TRNG_HALFR, trng->base + TRNG_MR);
+ }
+
writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+
+ return 0;
}
-static void atmel_trng_disable(struct atmel_trng *trng)
+static void atmel_trng_cleanup(struct atmel_trng *trng)
{
writel(TRNG_KEY, trng->base + TRNG_CR);
+ clk_disable_unprepare(trng->clk);
}
static int atmel_trng_probe(struct platform_device *pdev)
@@ -88,32 +133,31 @@ static int atmel_trng_probe(struct platform_device *pdev)
if (!data)
return -ENODEV;
- if (data->has_half_rate) {
- unsigned long rate = clk_get_rate(trng->clk);
-
- /* if peripheral clk is above 100MHz, set HALFR */
- if (rate > 100000000)
- writel(TRNG_HALFR, trng->base + TRNG_MR);
- }
-
- ret = clk_prepare_enable(trng->clk);
- if (ret)
- return ret;
-
- atmel_trng_enable(trng);
+ trng->has_half_rate = data->has_half_rate;
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
+ trng->rng.priv = (unsigned long)&pdev->dev;
+ platform_set_drvdata(pdev, trng);
- ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+#ifndef CONFIG_PM
+ ret = atmel_trng_init(trng);
if (ret)
- goto err_register;
+ return ret;
+#endif
- platform_set_drvdata(pdev, trng);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
- return 0;
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+#ifndef CONFIG_PM
+ atmel_trng_cleanup(trng);
+#endif
+ }
-err_register:
- clk_disable_unprepare(trng->clk);
return ret;
}
@@ -121,43 +165,35 @@ static int atmel_trng_remove(struct platform_device *pdev)
{
struct atmel_trng *trng = platform_get_drvdata(pdev);
-
- atmel_trng_disable(trng);
- clk_disable_unprepare(trng->clk);
+ atmel_trng_cleanup(trng);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM
-static int atmel_trng_suspend(struct device *dev)
+static int __maybe_unused atmel_trng_runtime_suspend(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
- atmel_trng_disable(trng);
- clk_disable_unprepare(trng->clk);
+ atmel_trng_cleanup(trng);
return 0;
}
-static int atmel_trng_resume(struct device *dev)
+static int __maybe_unused atmel_trng_runtime_resume(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
- int ret;
- ret = clk_prepare_enable(trng->clk);
- if (ret)
- return ret;
-
- atmel_trng_enable(trng);
-
- return 0;
+ return atmel_trng_init(trng);
}
-static const struct dev_pm_ops atmel_trng_pm_ops = {
- .suspend = atmel_trng_suspend,
- .resume = atmel_trng_resume,
+static const struct dev_pm_ops __maybe_unused atmel_trng_pm_ops = {
+ SET_RUNTIME_PM_OPS(atmel_trng_runtime_suspend,
+ atmel_trng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
-#endif /* CONFIG_PM */
static const struct atmel_trng_data at91sam9g45_config = {
.has_half_rate = false,
@@ -185,9 +221,7 @@ static struct platform_driver atmel_trng_driver = {
.remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
-#ifdef CONFIG_PM
- .pm = &atmel_trng_pm_ops,
-#endif /* CONFIG_PM */
+ .pm = pm_ptr(&atmel_trng_pm_ops),
.of_match_table = atmel_trng_dt_ids,
},
};
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index 6f66919652bf..7c55f4cf4a8b 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -179,7 +179,7 @@ static int cavium_map_pf_regs(struct cavium_rng *rng)
pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_CAVIUM_RNG_PF, NULL);
if (!pdev) {
- dev_err(&pdev->dev, "Cannot find RNG PF device\n");
+ pr_err("Cannot find RNG PF device\n");
return -EIO;
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index cfb085de876b..16f227b995e8 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -32,7 +32,7 @@ static struct hwrng *current_rng;
/* the current rng has been explicitly chosen by user via sysfs */
static int cur_rng_set_by_user;
static struct task_struct *hwrng_fill;
-/* list of registered rngs, sorted decending by quality */
+/* list of registered rngs */
static LIST_HEAD(rng_list);
/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
@@ -45,14 +45,14 @@ static unsigned short default_quality; /* = 0; default to "off" */
module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
- "current hwrng entropy estimation per 1024 bits of input");
+ "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
"default entropy content of hwrng per 1024 bits of input");
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
-static void start_khwrngd(void);
+static void hwrng_manage_rngd(struct hwrng *rng);
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait);
@@ -65,13 +65,12 @@ static size_t rng_buffer_size(void)
static void add_early_randomness(struct hwrng *rng)
{
int bytes_read;
- size_t size = min_t(size_t, 16, rng_buffer_size());
mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, rng_buffer, size, 0);
+ bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
- add_device_randomness(rng_buffer, bytes_read);
+ add_device_randomness(rng_fillbuf, bytes_read);
}
static inline void cleanup_rng(struct kref *kref)
@@ -162,14 +161,13 @@ static int hwrng_init(struct hwrng *rng)
reinit_completion(&rng->cleanup_done);
skip_init:
- current_quality = rng->quality ? : default_quality;
- if (current_quality > 1024)
- current_quality = 1024;
+ if (!rng->quality)
+ rng->quality = default_quality;
+ if (rng->quality > 1024)
+ rng->quality = 1024;
+ current_quality = rng->quality; /* obsolete */
- if (current_quality == 0 && hwrng_fill)
- kthread_stop(hwrng_fill);
- if (current_quality > 0 && !hwrng_fill)
- start_khwrngd();
+ hwrng_manage_rngd(rng);
return 0;
}
@@ -299,24 +297,28 @@ static struct miscdevice rng_miscdev = {
static int enable_best_rng(void)
{
+ struct hwrng *rng, *new_rng = NULL;
int ret = -ENODEV;
BUG_ON(!mutex_is_locked(&rng_mutex));
- /* rng_list is sorted by quality, use the best (=first) one */
- if (!list_empty(&rng_list)) {
- struct hwrng *new_rng;
-
- new_rng = list_entry(rng_list.next, struct hwrng, list);
- ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
- if (!ret)
- cur_rng_set_by_user = 0;
- } else {
+ /* no rng to use? */
+ if (list_empty(&rng_list)) {
drop_current_rng();
cur_rng_set_by_user = 0;
- ret = 0;
+ return 0;
+ }
+
+ /* use the rng which offers the best quality */
+ list_for_each_entry(rng, &rng_list, list) {
+ if (!new_rng || rng->quality > new_rng->quality)
+ new_rng = rng;
}
+ ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
+ if (!ret)
+ cur_rng_set_by_user = 0;
+
return ret;
}
@@ -337,8 +339,9 @@ static ssize_t rng_current_store(struct device *dev,
} else {
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
- cur_rng_set_by_user = 1;
err = set_current_rng(rng);
+ if (!err)
+ cur_rng_set_by_user = 1;
break;
}
}
@@ -400,14 +403,76 @@ static ssize_t rng_selected_show(struct device *dev,
return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
}
+static ssize_t rng_quality_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret;
+ struct hwrng *rng;
+
+ rng = get_current_rng();
+ if (IS_ERR(rng))
+ return PTR_ERR(rng);
+
+ if (!rng) /* no need to put_rng */
+ return -ENODEV;
+
+ ret = sysfs_emit(buf, "%hu\n", rng->quality);
+ put_rng(rng);
+
+ return ret;
+}
+
+static ssize_t rng_quality_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ u16 quality;
+ int ret = -EINVAL;
+
+ if (len < 2)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&rng_mutex);
+ if (ret)
+ return -ERESTARTSYS;
+
+ ret = kstrtou16(buf, 0, &quality);
+ if (ret || quality > 1024) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!current_rng) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ current_rng->quality = quality;
+ current_quality = quality; /* obsolete */
+
+ /* the best available RNG may have changed */
+ ret = enable_best_rng();
+
+ /* start/stop rngd if necessary */
+ if (current_rng)
+ hwrng_manage_rngd(current_rng);
+
+out:
+ mutex_unlock(&rng_mutex);
+ return ret ? ret : len;
+}
+
static DEVICE_ATTR_RW(rng_current);
static DEVICE_ATTR_RO(rng_available);
static DEVICE_ATTR_RO(rng_selected);
+static DEVICE_ATTR_RW(rng_quality);
static struct attribute *rng_dev_attrs[] = {
&dev_attr_rng_current.attr,
&dev_attr_rng_available.attr,
&dev_attr_rng_selected.attr,
+ &dev_attr_rng_quality.attr,
NULL
};
@@ -425,9 +490,11 @@ static int __init register_miscdev(void)
static int hwrng_fillfn(void *unused)
{
+ size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
long rc;
while (!kthread_should_stop()) {
+ unsigned short quality;
struct hwrng *rng;
rng = get_current_rng();
@@ -436,27 +503,49 @@ static int hwrng_fillfn(void *unused)
mutex_lock(&reading_mutex);
rc = rng_get_data(rng, rng_fillbuf,
rng_buffer_size(), 1);
+ if (current_quality != rng->quality)
+ rng->quality = current_quality; /* obsolete */
+ quality = rng->quality;
mutex_unlock(&reading_mutex);
put_rng(rng);
+
+ if (!quality)
+ break;
+
if (rc <= 0) {
pr_warn("hwrng: no data available\n");
msleep_interruptible(10000);
continue;
}
+
+ /* If we cannot credit at least one bit of entropy,
+ * keep track of the remainder for the next iteration
+ */
+ entropy = rc * quality * 8 + entropy_credit;
+ if ((entropy >> 10) == 0)
+ entropy_credit = entropy;
+
/* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
- rc * current_quality * 8 >> 10);
+ entropy >> 10);
}
hwrng_fill = NULL;
return 0;
}
-static void start_khwrngd(void)
+static void hwrng_manage_rngd(struct hwrng *rng)
{
- hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (IS_ERR(hwrng_fill)) {
- pr_err("hwrng_fill thread creation failed\n");
- hwrng_fill = NULL;
+ if (WARN_ON(!mutex_is_locked(&rng_mutex)))
+ return;
+
+ if (rng->quality == 0 && hwrng_fill)
+ kthread_stop(hwrng_fill);
+ if (rng->quality > 0 && !hwrng_fill) {
+ hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+ if (IS_ERR(hwrng_fill)) {
+ pr_err("hwrng_fill thread creation failed\n");
+ hwrng_fill = NULL;
+ }
}
}
@@ -464,7 +553,6 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *tmp;
- struct list_head *rng_list_ptr;
bool is_new_current = false;
if (!rng->name || (!rng->data_read && !rng->read))
@@ -478,18 +566,11 @@ int hwrng_register(struct hwrng *rng)
if (strcmp(tmp->name, rng->name) == 0)
goto out_unlock;
}
+ list_add_tail(&rng->list, &rng_list);
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
- /* rng_list is sorted by decreasing quality */
- list_for_each(rng_list_ptr, &rng_list) {
- tmp = list_entry(rng_list_ptr, struct hwrng, list);
- if (tmp->quality < rng->quality)
- break;
- }
- list_add_tail(&rng->list, rng_list_ptr);
-
if (!current_rng ||
(!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
/*
@@ -639,7 +720,7 @@ static void __exit hwrng_modexit(void)
unregister_miscdev();
}
-module_init(hwrng_modinit);
+fs_initcall(hwrng_modinit); /* depends on misc_register() */
module_exit(hwrng_modexit);
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 67947a19aa22..e8f9621e7954 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -65,14 +65,14 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
out_release:
amba_release_regions(dev);
out_clk:
- clk_disable(rng_clk);
+ clk_disable_unprepare(rng_clk);
return ret;
}
static void nmk_rng_remove(struct amba_device *dev)
{
amba_release_regions(dev);
- clk_disable(rng_clk);
+ clk_disable_unprepare(rng_clk);
}
static const struct amba_id nmk_rng_ids[] = {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4f705674f94f..7b2d138bc83e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -808,6 +808,16 @@ config CRYPTO_DEV_ZYNQMP_AES
accelerator. Select this if you want to use the ZynqMP module
for AES algorithms.
+config CRYPTO_DEV_ZYNQMP_SHA3
+ tristate "Support for Xilinx ZynqMP SHA3 hardware accelerator"
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
+ select CRYPTO_SHA3
+ help
+ Xilinx ZynqMP has SHA3 engine used for secure hash calculation.
+ This driver interfaces with SHA3 hardware engine.
+ Select this if you want to use the ZynqMP module
+ for SHA3 hash computation.
+
source "drivers/crypto/chelsio/Kconfig"
source "drivers/crypto/virtio/Kconfig"
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 1fe5120eb966..0a4fff23d272 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -47,7 +47,7 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
-obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += xilinx/
+obj-y += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += keembay/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 54ae8d16e493..35e3cadccac2 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -11,6 +11,7 @@
* You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -283,7 +284,9 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
flow = rctx->flow;
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 88194718a806..859b7522faaa 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -9,6 +9,7 @@
*
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
@@ -414,6 +415,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
theend:
kfree(buf);
kfree(result);
+ local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 9ef1c85c4aaa..554e400d41ca 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -11,6 +11,7 @@
* You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -274,7 +275,9 @@ static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *ar
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = sun8i_ss_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 80e89066dbd1..319fe3279a71 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -30,6 +30,8 @@
static const struct ss_variant ss_a80_variant = {
.alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
},
+ .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP,
+ },
.op_mode = { SS_OP_ECB, SS_OP_CBC,
},
.ss_clks = {
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 3c073eb3db03..1a71ed49d233 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -9,6 +9,7 @@
*
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
@@ -442,6 +443,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
theend:
kfree(pad);
kfree(result);
+ local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index c6865cbd334b..e79514fce731 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -265,7 +265,9 @@ static int meson_handle_cipher_request(struct crypto_engine *engine,
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = meson_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index fe0558403191..f72c6b3e4ad8 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2509,6 +2509,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x700:
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 1b13f601fd95..d1628112dacc 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2508,6 +2508,7 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x700:
case 0x510:
dd->caps.has_dma = 1;
dd->caps.has_dualbuff = 1;
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index e30786ec9f2d..9fd7b8e439d2 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1130,6 +1130,7 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xf00) {
+ case 0x800:
case 0x700:
dd->caps.has_dma = 1;
dd->caps.has_cfb_3keys = 1;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index 2e9c0d214363..9e7308e39b30 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitmap.h>
#include <linux/workqueue.h>
#include "nitrox_csr.h"
@@ -120,6 +121,7 @@ static void pf2vf_resp_handler(struct work_struct *work)
void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
{
+ DECLARE_BITMAP(csr, BITS_PER_TYPE(u64));
struct nitrox_vfdev *vfdev;
struct pf2vf_work *pfwork;
u64 value, reg_addr;
@@ -129,7 +131,8 @@ void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
/* loop for VF(0..63) */
reg_addr = NPS_PKT_MBOX_INT_LO;
value = nitrox_read_csr(ndev, reg_addr);
- for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
+ bitmap_from_u64(csr, value);
+ for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
/* get the vfno from ring */
vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
vfdev = ndev->iov.vfdev + vfno;
@@ -151,7 +154,8 @@ void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
/* loop for VF(64..127) */
reg_addr = NPS_PKT_MBOX_INT_HI;
value = nitrox_read_csr(ndev, reg_addr);
- for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
+ bitmap_from_u64(csr, value);
+ for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
/* get the vfno from ring */
vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
vfdev = ndev->iov.vfdev + vfno;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index ed174883c8e3..6bf088bcdd11 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -440,7 +440,7 @@ struct aqmq_command_s {
/**
* struct ctx_hdr - Book keeping data about the crypto context
* @pool: Pool used to allocate crypto context
- * @dma: Base DMA address of the cypto context
+ * @dma: Base DMA address of the crypto context
* @ctx_dma: Actual usable crypto context for NITROX
*/
struct ctx_hdr {
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index 812b4ac9afd6..dc5b7bf7e1fd 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -55,6 +55,11 @@ static const struct pci_device_id zip_id_table[] = {
{ 0, }
};
+static void zip_debugfs_init(void);
+static void zip_debugfs_exit(void);
+static int zip_register_compression_device(void);
+static void zip_unregister_compression_device(void);
+
void zip_reg_write(u64 val, u64 __iomem *addr)
{
writeq(val, addr);
@@ -235,6 +240,15 @@ static int zip_init_hw(struct zip_device *zip)
return 0;
}
+static void zip_reset(struct zip_device *zip)
+{
+ union zip_cmd_ctl cmd_ctl;
+
+ cmd_ctl.u_reg64 = 0x0ull;
+ cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
+ zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
+}
+
static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
@@ -282,8 +296,21 @@ static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_release_regions;
+ /* Register with the Kernel Crypto Interface */
+ err = zip_register_compression_device();
+ if (err < 0) {
+ zip_err("ZIP: Kernel Crypto Registration failed\n");
+ goto err_register;
+ }
+
+ /* comp-decomp statistics are handled with debugfs interface */
+ zip_debugfs_init();
+
return 0;
+err_register:
+ zip_reset(zip);
+
err_release_regions:
if (zip->reg_base)
iounmap(zip->reg_base);
@@ -305,16 +332,17 @@ err_free_device:
static void zip_remove(struct pci_dev *pdev)
{
struct zip_device *zip = pci_get_drvdata(pdev);
- union zip_cmd_ctl cmd_ctl;
int q = 0;
if (!zip)
return;
+ zip_debugfs_exit();
+
+ zip_unregister_compression_device();
+
if (zip->reg_base) {
- cmd_ctl.u_reg64 = 0x0ull;
- cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
- zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
+ zip_reset(zip);
iounmap(zip->reg_base);
}
@@ -585,7 +613,7 @@ DEFINE_SHOW_ATTRIBUTE(zip_regs);
/* Root directory for thunderx_zip debugfs entry */
static struct dentry *zip_debugfs_root;
-static void __init zip_debugfs_init(void)
+static void zip_debugfs_init(void)
{
if (!debugfs_initialized())
return;
@@ -604,7 +632,7 @@ static void __init zip_debugfs_init(void)
}
-static void __exit zip_debugfs_exit(void)
+static void zip_debugfs_exit(void)
{
debugfs_remove_recursive(zip_debugfs_root);
}
@@ -615,48 +643,7 @@ static void __exit zip_debugfs_exit(void) { }
#endif
/* debugfs - end */
-static int __init zip_init_module(void)
-{
- int ret;
-
- zip_msg("%s\n", DRV_NAME);
-
- ret = pci_register_driver(&zip_driver);
- if (ret < 0) {
- zip_err("ZIP: pci_register_driver() failed\n");
- return ret;
- }
-
- /* Register with the Kernel Crypto Interface */
- ret = zip_register_compression_device();
- if (ret < 0) {
- zip_err("ZIP: Kernel Crypto Registration failed\n");
- goto err_pci_unregister;
- }
-
- /* comp-decomp statistics are handled with debugfs interface */
- zip_debugfs_init();
-
- return ret;
-
-err_pci_unregister:
- pci_unregister_driver(&zip_driver);
- return ret;
-}
-
-static void __exit zip_cleanup_module(void)
-{
- zip_debugfs_exit();
-
- /* Unregister from the kernel crypto interface */
- zip_unregister_compression_device();
-
- /* Unregister this driver for pci zip devices */
- pci_unregister_driver(&zip_driver);
-}
-
-module_init(zip_init_module);
-module_exit(zip_cleanup_module);
+module_pci_driver(zip_driver);
MODULE_AUTHOR("Cavium Inc");
MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index e6dcd8cedd53..bed331953ff9 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -69,7 +69,6 @@ static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
- int ret;
if (!ctx->u.aes.key_len)
return -EINVAL;
@@ -104,9 +103,7 @@ static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
rctx->cmd.u.aes.src_len = req->cryptlen;
rctx->cmd.u.aes.dst = req->dst;
- ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
-
- return ret;
+ return ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
}
static int ccp_aes_encrypt(struct skcipher_request *req)
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index d718db224be4..7d4b4ad1db1f 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -632,6 +632,20 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
return 0;
}
+static void ccp_dma_release(struct ccp_device *ccp)
+{
+ struct ccp_dma_chan *chan;
+ struct dma_chan *dma_chan;
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ chan = ccp->ccp_dma_chan + i;
+ dma_chan = &chan->dma_chan;
+ tasklet_kill(&chan->cleanup_tasklet);
+ list_del_rcu(&dma_chan->device_node);
+ }
+}
+
int ccp_dmaengine_register(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
@@ -736,6 +750,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
return 0;
err_reg:
+ ccp_dma_release(ccp);
kmem_cache_destroy(ccp->dma_desc_cache);
err_cache:
@@ -752,6 +767,7 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
return;
dma_async_device_unregister(dma_dev);
+ ccp_dma_release(ccp);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 8fd774a10edc..6ab93dfd478a 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -413,7 +413,7 @@ static int __sev_platform_init_locked(int *error)
{
struct psp_device *psp = psp_master;
struct sev_device *sev;
- int rc, psp_ret;
+ int rc, psp_ret = -1;
int (*init_function)(int *error);
if (!psp || !psp->sev_data)
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index a5e041d9d2cf..11e0278c8631 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -258,6 +258,13 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
{
int ret = 0;
+ if (!nbytes) {
+ *mapped_nents = 0;
+ *lbytes = 0;
+ *nents = 0;
+ return 0;
+ }
+
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
if (*nents > max_sg_nents) {
*nents = 0;
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 78833491f534..309da6334a0a 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -257,8 +257,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm)
&ctx_p->user.key_dma_addr);
/* Free key buffer in context */
- kfree_sensitive(ctx_p->user.key);
dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
+ kfree_sensitive(ctx_p->user.key);
}
struct tdes_keys {
diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c
index c1c2b1d86663..14d0d83d388d 100644
--- a/drivers/crypto/gemini/sl3516-ce-cipher.c
+++ b/drivers/crypto/gemini/sl3516-ce-cipher.c
@@ -23,8 +23,8 @@ static bool sl3516_ce_need_fallback(struct skcipher_request *areq)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sl3516_ce_dev *ce = op->ce;
- struct scatterlist *in_sg = areq->src;
- struct scatterlist *out_sg = areq->dst;
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
struct scatterlist *sg;
if (areq->cryptlen == 0 || areq->cryptlen % 16) {
@@ -264,7 +264,9 @@ static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *a
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = sl3516_ce_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index c5b84a5ea350..453390044181 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -3840,7 +3840,7 @@ static void qm_clear_queues(struct hisi_qm *qm)
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
- if (qp->is_resetting)
+ if (qp->is_in_kernel && qp->is_resetting)
memset(qp->qdma.va, 0, qp->qdma.size);
}
@@ -4295,7 +4295,7 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
static int qm_vf_read_qos(struct hisi_qm *qm)
{
int cnt = 0;
- int ret;
+ int ret = -EINVAL;
/* reset mailbox qos val */
qm->mb_qos = 0;
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 6a45bd23b363..a91635c348b5 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -42,6 +42,8 @@
#define SEC_DE_OFFSET_V3 9
#define SEC_SCENE_OFFSET_V3 5
#define SEC_CKEY_OFFSET_V3 13
+#define SEC_CTR_CNT_OFFSET 25
+#define SEC_CTR_CNT_ROLLOVER 2
#define SEC_SRC_SGL_OFFSET_V3 11
#define SEC_DST_SGL_OFFSET_V3 14
#define SEC_CALG_OFFSET_V3 4
@@ -63,6 +65,7 @@
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
#define SEC_MAX_AAD_LEN 65535
+#define SEC_MAX_CCM_AAD_LEN 65279
#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
#define SEC_PBUF_SZ 512
@@ -237,7 +240,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
if (unlikely(type != type_supported)) {
atomic64_inc(&dfx->err_bd_cnt);
- pr_err("err bd type [%d]\n", type);
+ pr_err("err bd type [%u]\n", type);
return;
}
@@ -641,13 +644,15 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
c_ctx->fallback = false;
+
+ /* Currently, only XTS mode need fallback tfm when using 192bit key */
if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
return 0;
c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(c_ctx->fbtfm)) {
- pr_err("failed to alloc fallback tfm!\n");
+ pr_err("failed to alloc xts mode fallback tfm!\n");
return PTR_ERR(c_ctx->fbtfm);
}
@@ -808,7 +813,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (c_ctx->fallback) {
+ if (c_ctx->fallback && c_ctx->fbtfm) {
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
if (ret) {
dev_err(dev, "failed to set fallback skcipher key!\n");
@@ -1300,6 +1305,10 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
cipher = SEC_CIPHER_DEC;
sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
+ /* Set the CTR counter mode is 128bit rollover */
+ sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<
+ SEC_CTR_CNT_OFFSET);
+
if (req->use_pbuf) {
bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
@@ -1614,7 +1623,7 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
} else {
- sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
+ sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);
sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
}
sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
@@ -2032,13 +2041,12 @@ static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
struct skcipher_request *sreq, bool encrypt)
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
struct device *dev = ctx->dev;
int ret;
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
-
if (!c_ctx->fbtfm) {
- dev_err(dev, "failed to check fallback tfm\n");
+ dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");
return -EINVAL;
}
@@ -2219,6 +2227,10 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
}
if (c_mode == SEC_CMODE_CCM) {
+ if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
+ dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
+ return -EINVAL;
+ }
ret = aead_iv_demension_check(req);
if (ret) {
dev_err(dev, "aead input iv param error!\n");
@@ -2256,7 +2268,6 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (ctx->sec->qm.ver == QM_HW_V2) {
if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
req->cryptlen <= authsize))) {
- dev_err(dev, "Kunpeng920 not support 0 length!\n");
ctx->a_ctx.fallback = true;
return -EINVAL;
}
@@ -2284,9 +2295,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
struct aead_request *aead_req,
bool encrypt)
{
- struct aead_request *subreq = aead_request_ctx(aead_req);
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
struct device *dev = ctx->dev;
+ struct aead_request *subreq;
+ int ret;
/* Kunpeng920 aead mode not support input 0 size */
if (!a_ctx->fallback_aead_tfm) {
@@ -2294,6 +2306,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
return -EINVAL;
}
+ subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
+ if (!subreq)
+ return -ENOMEM;
+
aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
aead_request_set_callback(subreq, aead_req->base.flags,
aead_req->base.complete, aead_req->base.data);
@@ -2301,8 +2317,13 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
aead_req->cryptlen, aead_req->iv);
aead_request_set_ad(subreq, aead_req->assoclen);
- return encrypt ? crypto_aead_encrypt(subreq) :
- crypto_aead_decrypt(subreq);
+ if (encrypt)
+ ret = crypto_aead_encrypt(subreq);
+ else
+ ret = crypto_aead_decrypt(subreq);
+ aead_request_free(subreq);
+
+ return ret;
}
static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 9f71c358a6d3..5e039b50e9d4 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -354,8 +354,10 @@ struct sec_sqe3 {
* akey_len: 9~14 bits
* a_alg: 15~20 bits
* key_sel: 21~24 bits
- * updata_key: 25 bits
- * reserved: 26~31 bits
+ * ctr_count_mode/sm4_xts: 25~26 bits
+ * sva_prefetch: 27 bits
+ * key_wrap_num: 28~30 bits
+ * update_key: 31 bits
*/
__le32 auth_mac_key;
__le32 salt;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 26d3ab1d308b..0b9906ff69e3 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -90,6 +90,10 @@
SEC_USER1_WB_DATA_SSV)
#define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
#define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET)
+#define SEC_INTERFACE_USER_CTRL0_REG_V3 0x302220
+#define SEC_INTERFACE_USER_CTRL1_REG_V3 0x302224
+#define SEC_USER1_SMMU_NORMAL_V3 (BIT(23) | BIT(17) | BIT(11) | BIT(5))
+#define SEC_USER1_SMMU_MASK_V3 0xFF79E79E
#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
#define SEC_PREFETCH_CFG 0x301130
@@ -335,6 +339,41 @@ static void sec_set_endian(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
}
+static void sec_engine_sva_config(struct hisi_qm *qm)
+{
+ u32 reg;
+
+ if (qm->ver > QM_HW_V2) {
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG_V3);
+ reg |= SEC_USER0_SMMU_NORMAL;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG_V3);
+
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG_V3);
+ reg &= SEC_USER1_SMMU_MASK_V3;
+ reg |= SEC_USER1_SMMU_NORMAL_V3;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG_V3);
+ } else {
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG);
+ reg |= SEC_USER0_SMMU_NORMAL;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL0_REG);
+ reg = readl_relaxed(qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG);
+ reg &= SEC_USER1_SMMU_MASK;
+ if (qm->use_sva)
+ reg |= SEC_USER1_SMMU_SVA;
+ else
+ reg |= SEC_USER1_SMMU_NORMAL;
+ writel_relaxed(reg, qm->io_base +
+ SEC_INTERFACE_USER_CTRL1_REG);
+ }
+}
+
static void sec_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
@@ -426,26 +465,18 @@ static int sec_engine_init(struct hisi_qm *qm)
reg |= (0x1 << SEC_TRNG_EN_SHIFT);
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
- reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
- reg |= SEC_USER0_SMMU_NORMAL;
- writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
-
- reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
- reg &= SEC_USER1_SMMU_MASK;
- if (qm->use_sva && qm->ver == QM_HW_V2)
- reg |= SEC_USER1_SMMU_SVA;
- else
- reg |= SEC_USER1_SMMU_NORMAL;
- writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
+ sec_engine_sva_config(qm);
writel(SEC_SINGLE_PORT_MAX_TRANS,
qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
- /* Enable sm4 extra mode, as ctr/ecb */
- writel_relaxed(SEC_BD_ERR_CHK_EN0,
- qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
+ /* HW V2 enable sm4 extra mode, as ctr/ecb */
+ if (qm->ver < QM_HW_V3)
+ writel_relaxed(SEC_BD_ERR_CHK_EN0,
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
+
/* Enable sm4 xts mode multiple iv */
writel_relaxed(SEC_BD_ERR_CHK_EN1,
qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index 9125199f1702..a48591af12d0 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -47,6 +47,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
+ select NET_DEVLINK
help
This driver allows you to utilize the Marvell Cryptographic
Accelerator Unit(CPT) found in OcteonTX2 series of processors.
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index ccbef01888d4..01c48ddc4eeb 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -1639,11 +1639,8 @@ static void swap_func(void *lptr, void *rptr, int size)
{
struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
- struct cpt_device_desc desc;
- desc = *ldesc;
- *ldesc = *rdesc;
- *rdesc = desc;
+ swap(*ldesc, *rdesc);
}
int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index b681bd2dc6ad..36d72e35ebeb 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -204,7 +204,6 @@ static int alloc_command_queues(struct otx_cptvf *cptvf,
/* per queue initialization */
for (i = 0; i < cptvf->num_queues; i++) {
- c_size = 0;
rem_q_size = q_size;
first = NULL;
last = NULL;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index fb56824cb0a6..5012b7e669f0 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -157,5 +157,6 @@ struct otx2_cptlfs_info;
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 9074876d38e5..a317319696ef 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -202,3 +202,17 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
}
return ret;
}
+
+int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(mbox, 0);
+ err = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(mbox, 0);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index b691b6c1d5c4..4fcaf61a70e3 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -26,12 +26,22 @@
*/
#define OTX2_CPT_INST_QLEN_MSGS ((OTX2_CPT_SIZE_DIV40 - 1) * 40)
+/*
+ * LDWB is getting incorrectly used when IQB_LDWB = 1 and CPT instruction
+ * queue has less than 320 free entries. So, increase HW instruction queue
+ * size by 320 and give 320 entries less for SW/NIX RX as a workaround.
+ */
+#define OTX2_CPT_INST_QLEN_EXTRA_BYTES (320 * OTX2_CPT_INST_SIZE)
+#define OTX2_CPT_EXTRA_SIZE_DIV40 (320/40)
+
/* CPT instruction queue length in bytes */
-#define OTX2_CPT_INST_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 40 * \
- OTX2_CPT_INST_SIZE)
+#define OTX2_CPT_INST_QLEN_BYTES \
+ ((OTX2_CPT_SIZE_DIV40 * 40 * OTX2_CPT_INST_SIZE) + \
+ OTX2_CPT_INST_QLEN_EXTRA_BYTES)
/* CPT instruction group queue length in bytes */
-#define OTX2_CPT_INST_GRP_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 16)
+#define OTX2_CPT_INST_GRP_QLEN_BYTES \
+ ((OTX2_CPT_SIZE_DIV40 + OTX2_CPT_EXTRA_SIZE_DIV40) * 16)
/* CPT FC length in bytes */
#define OTX2_CPT_Q_FC_LEN 128
@@ -179,7 +189,8 @@ static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf)
{
union otx2_cptx_lf_q_size lf_q_size = { .u = 0x0 };
- lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40;
+ lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 +
+ OTX2_CPT_EXTRA_SIZE_DIV40;
otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
OTX2_CPT_LF_Q_SIZE, lf_q_size.u);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index 05b2d9c650e1..936174b012e8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -46,6 +46,7 @@ struct otx2_cptpf_dev {
struct workqueue_struct *flr_wq;
struct cptpf_flr_work *flr_work;
+ struct mutex lock; /* serialize mailbox access */
unsigned long cap_flag;
u8 pf_id; /* RVU PF number */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 1720a5bb7016..a402ccfac557 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -140,10 +140,13 @@ static void cptpf_flr_wq_handler(struct work_struct *work)
vf = flr_work - pf->flr_work;
+ mutex_lock(&pf->lock);
req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct msg_rsp));
- if (!req)
+ if (!req) {
+ mutex_unlock(&pf->lock);
return;
+ }
req->sig = OTX2_MBOX_REQ_SIG;
req->id = MBOX_MSG_VF_FLR;
@@ -151,16 +154,19 @@ static void cptpf_flr_wq_handler(struct work_struct *work)
req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
otx2_cpt_send_mbox_msg(mbox, pf->pdev);
+ if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
- if (vf >= 64) {
- reg = 1;
- vf = vf - 64;
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* Clear transaction pending register */
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
}
- /* Clear transaction pending register */
- otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
- otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+ mutex_unlock(&pf->lock);
}
static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
@@ -468,6 +474,7 @@ static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
goto error;
INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
+ mutex_init(&cptpf->lock);
return 0;
error:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index 186f1c1190c1..dee0aa60b698 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -18,9 +18,12 @@ static int forward_to_af(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *msg;
int ret;
+ mutex_lock(&cptpf->lock);
msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
- if (msg == NULL)
+ if (msg == NULL) {
+ mutex_unlock(&cptpf->lock);
return -ENOMEM;
+ }
memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
(uint8_t *)req + sizeof(struct mbox_msghdr), size);
@@ -29,15 +32,19 @@ static int forward_to_af(struct otx2_cptpf_dev *cptpf,
msg->sig = req->sig;
msg->ver = req->ver;
- otx2_mbox_msg_send(&cptpf->afpf_mbox, 0);
- ret = otx2_mbox_wait_for_rsp(&cptpf->afpf_mbox, 0);
+ ret = otx2_cpt_sync_mbox_msg(&cptpf->afpf_mbox);
+ /* Error code -EIO indicate there is a communication failure
+ * to the AF. Rest of the error codes indicate that AF processed
+ * VF messages and set the error codes in response messages
+ * (if any) so simply forward responses to VF.
+ */
if (ret == -EIO) {
- dev_err(&cptpf->pdev->dev, "RVU MBOX timeout.\n");
+ dev_warn(&cptpf->pdev->dev,
+ "AF not responding to VF%d messages\n", vf->vf_id);
+ mutex_unlock(&cptpf->lock);
return ret;
- } else if (ret) {
- dev_err(&cptpf->pdev->dev, "RVU MBOX error: %d.\n", ret);
- return -EFAULT;
}
+ mutex_unlock(&cptpf->lock);
return 0;
}
@@ -204,6 +211,10 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
if (err == -ENOMEM || err == -EIO)
break;
offset = msg->next_msgoff;
+ /* Write barrier required for VF responses which are handled by
+ * PF driver and not forwarded to AF.
+ */
+ smp_wmb();
}
/* Send mbox responses to VF */
if (mdev->num_msgs)
@@ -350,6 +361,8 @@ void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
process_afpf_mbox_msg(cptpf, msg);
offset = msg->next_msgoff;
+ /* Sync VF response ready to be sent */
+ smp_wmb();
mdev->msgs_acked++;
}
otx2_mbox_reset(afpf_mbox, 0);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 1b4d425bbf0e..9cba2f714c7e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1076,6 +1076,39 @@ static void delete_engine_grps(struct pci_dev *pdev,
delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
}
+#define PCI_DEVID_CN10K_RNM 0xA098
+#define RNM_ENTROPY_STATUS 0x8
+
+static void rnm_to_cpt_errata_fixup(struct device *dev)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ int timeout = 5000;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto put_pdev;
+
+ while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout) {
+ dev_warn(dev, "RNM is not producing entropy\n");
+ break;
+ }
+ }
+
+ iounmap(base);
+
+put_pdev:
+ pci_dev_put(pdev);
+}
+
int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
{
@@ -1111,6 +1144,7 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
struct pci_dev *pdev = cptpf->pdev;
struct fw_info_t fw_info;
+ u64 reg_val;
int ret = 0;
mutex_lock(&eng_grps->lock);
@@ -1189,9 +1223,17 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
if (is_dev_otx2(pdev))
goto unlock;
+
+ /*
+ * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
+ * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
+ */
+ rnm_to_cpt_errata_fixup(&pdev->dev);
+
/*
* Configure engine group mask to allow context prefetching
- * for the groups.
+ * for the groups and enable random number request, to enable
+ * CPT to request random numbers from RNM.
*/
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
@@ -1203,6 +1245,18 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
*/
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
+
+ /*
+ * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
+ * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
+ * encounters a fault/poison, a rare case may result in
+ * unpredictable data being delivered to a CPT engine.
+ */
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, &reg_val,
+ BLKADDR_CPT0);
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
+ reg_val | BIT_ULL(24), BLKADDR_CPT0);
+
mutex_unlock(&eng_grps->lock);
return 0;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 2748a3327e39..f8f8542ce3e4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -1634,16 +1634,13 @@ static inline int cpt_register_algs(void)
{
int i, err = 0;
- if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
- for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
- otx2_cpt_skciphers[i].base.cra_flags &=
- ~CRYPTO_ALG_DEAD;
-
- err = crypto_register_skciphers(otx2_cpt_skciphers,
- ARRAY_SIZE(otx2_cpt_skciphers));
- if (err)
- return err;
- }
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
+ otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ if (err)
+ return err;
for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index d19e5ffb5104..d6f9e2fe863d 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -331,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
}
- for_each_sg(req->src, src, sg_nents(src), i) {
+ for_each_sg(req->src, src, sg_nents(req->src), i) {
src_buf = sg_virt(src);
len = sg_dma_len(src);
tlen += len;
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 4e304f6081e4..7584a34ba88c 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -962,7 +962,7 @@ static struct attribute *nx842_sysfs_entries[] = {
NULL,
};
-static struct attribute_group nx842_attribute_group = {
+static const struct attribute_group nx842_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = nx842_sysfs_entries,
};
@@ -992,7 +992,7 @@ static struct attribute *nxcop_caps_sysfs_entries[] = {
NULL,
};
-static struct attribute_group nxcop_caps_attr_group = {
+static const struct attribute_group nxcop_caps_attr_group = {
.name = "nx_gzip_caps",
.attrs = nxcop_caps_sysfs_entries,
};
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a196bb8b1701..581211a92628 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1093,7 +1093,7 @@ static struct attribute *omap_aes_attrs[] = {
NULL,
};
-static struct attribute_group omap_aes_attr_group = {
+static const struct attribute_group omap_aes_attr_group = {
.attrs = omap_aes_attrs,
};
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index f6bf53c00b61..4b37dc69a50c 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2045,7 +2045,7 @@ static struct attribute *omap_sham_attrs[] = {
NULL,
};
-static struct attribute_group omap_sham_attr_group = {
+static const struct attribute_group omap_sham_attr_group = {
.attrs = omap_sham_attrs,
};
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index 6d10edc40aca..fb5970a68484 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -6,6 +6,7 @@
#include <adf_common_drv.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
+#include <adf_gen4_pm.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -52,7 +53,7 @@ static const char *const dev_cfg_services[] = {
static int get_service_enabled(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
- u32 ret;
+ int ret;
ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED, services);
@@ -229,7 +230,7 @@ static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
void __iomem *csr = misc_bar->virt_addr;
/* Enable all in errsou3 except VFLR notification on host */
- ADF_CSR_WR(csr, ADF_4XXX_ERRMSK3, ADF_4XXX_VFLNOTIFY);
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
}
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
@@ -256,19 +257,19 @@ static int adf_init_device(struct adf_accel_dev *accel_dev)
addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
/* Temporarily mask PM interrupt */
- csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2);
- csr |= ADF_4XXX_PM_SOU;
- ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr);
+ csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
+ csr |= ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
/* Set DRV_ACTIVE bit to power up the device */
- ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE);
+ ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
/* Poll status register to make sure the device is powered up */
ret = read_poll_timeout(ADF_CSR_RD, status,
- status & ADF_4XXX_PM_INIT_STATE,
- ADF_4XXX_PM_POLL_DELAY_US,
- ADF_4XXX_PM_POLL_TIMEOUT_US, true, addr,
- ADF_4XXX_PM_STATUS);
+ status & ADF_GEN4_PM_INIT_STATE,
+ ADF_GEN4_PM_POLL_DELAY_US,
+ ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_GEN4_PM_STATUS);
if (ret)
dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
@@ -354,6 +355,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->enable_pm = adf_gen4_enable_pm;
+ hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
index 12e4fb9b40ce..1034752845ca 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -39,20 +39,6 @@
#define ADF_4XXX_NUM_RINGS_PER_BANK 2
#define ADF_4XXX_NUM_BANKS_PER_VF 4
-/* Error source registers */
-#define ADF_4XXX_ERRSOU0 (0x41A200)
-#define ADF_4XXX_ERRSOU1 (0x41A204)
-#define ADF_4XXX_ERRSOU2 (0x41A208)
-#define ADF_4XXX_ERRSOU3 (0x41A20C)
-
-/* Error source mask registers */
-#define ADF_4XXX_ERRMSK0 (0x41A210)
-#define ADF_4XXX_ERRMSK1 (0x41A214)
-#define ADF_4XXX_ERRMSK2 (0x41A218)
-#define ADF_4XXX_ERRMSK3 (0x41A21C)
-
-#define ADF_4XXX_VFLNOTIFY BIT(7)
-
/* Arbiter configuration */
#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
#define ADF_4XXX_ARB_OFFSET (0x0)
@@ -63,16 +49,6 @@
#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578)
#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970)
-/* Power management */
-#define ADF_4XXX_PM_POLL_DELAY_US 20
-#define ADF_4XXX_PM_POLL_TIMEOUT_US USEC_PER_SEC
-#define ADF_4XXX_PM_STATUS (0x50A00C)
-#define ADF_4XXX_PM_INTERRUPT (0x50A028)
-#define ADF_4XXX_PM_DRV_ACTIVE BIT(20)
-#define ADF_4XXX_PM_INIT_STATE BIT(21)
-/* Power management source in ERRSOU2 and ERRMSK2 */
-#define ADF_4XXX_PM_SOU BIT(18)
-
/* Firmware Binaries */
#define ADF_4XXX_FW "qat_4xxx.bin"
#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
index a6c78b9c730b..fa4c350c1bf9 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
@@ -75,6 +75,13 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
+ /* Temporarily set the number of crypto instances to zero to avoid
+ * registering the crypto algorithms.
+ * This will be removed when the algorithms will support the
+ * CRYPTO_TFM_REQ_MAY_BACKLOG flag
+ */
+ instances = 0;
+
for (i = 0; i < instances; i++) {
val = i;
bank = i * 2;
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 7e191a42a5c7..f25a6c8edfc7 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -12,6 +12,7 @@ intel_qat-objs := adf_cfg.o \
adf_hw_arbiter.o \
adf_gen2_hw_data.o \
adf_gen4_hw_data.o \
+ adf_gen4_pm.o \
qat_crypto.o \
qat_algs.o \
qat_asym_algs.o \
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 2d4cd7c7cf33..a03c6cf72331 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -184,6 +184,8 @@ struct adf_hw_device_data {
void (*exit_arb)(struct adf_accel_dev *accel_dev);
const u32 *(*get_arb_mapping)(void);
int (*init_device)(struct adf_accel_dev *accel_dev);
+ int (*enable_pm)(struct adf_accel_dev *accel_dev);
+ bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index 498eb6f690e3..3b6184c35081 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -251,6 +251,43 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_send_admin_init);
+/**
+ * adf_init_admin_pm() - Function sends PM init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ * @idle_delay: QAT HW idle time before power gating is initiated.
+ * 000 - 64us
+ * 001 - 128us
+ * 010 - 256us
+ * 011 - 512us
+ * 100 - 1ms
+ * 101 - 2ms
+ * 110 - 4ms
+ * 111 - 8ms
+ *
+ * Function sends to the FW the admin init message for the PM state
+ * configuration.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_resp resp = {0};
+ struct icp_qat_fw_init_admin_req req = {0};
+ u32 ae_mask = hw_data->admin_ae_mask;
+
+ if (!accel_dev->admin) {
+ dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
+ return -EFAULT;
+ }
+
+ req.cmd_id = ICP_QAT_FW_PM_STATE_CONFIG;
+ req.idle_filter = idle_delay;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_pm);
+
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 76f4f96ec5eb..e8c9b77c0d66 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -102,6 +102,7 @@ void adf_exit_aer(void);
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
int adf_init_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
@@ -188,6 +189,9 @@ int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, u32 mem_size, char *obj_name);
int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
unsigned int cfg_ae_mask);
+int adf_init_misc_wq(void);
+void adf_exit_misc_wq(void);
+bool adf_misc_wq_queue_work(struct work_struct *work);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 6f64aa693146..e8ac932bbaab 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -419,6 +419,9 @@ static int __init adf_register_ctl_device_driver(void)
if (adf_chr_drv_create())
goto err_chr_dev;
+ if (adf_init_misc_wq())
+ goto err_misc_wq;
+
if (adf_init_aer())
goto err_aer;
@@ -440,6 +443,8 @@ err_vf_wq:
err_pf_wq:
adf_exit_aer();
err_aer:
+ adf_exit_misc_wq();
+err_misc_wq:
adf_chr_drv_destroy();
err_chr_dev:
mutex_destroy(&adf_ctl_lock);
@@ -449,6 +454,7 @@ err_chr_dev:
static void __exit adf_unregister_ctl_device_driver(void)
{
adf_chr_drv_destroy();
+ adf_exit_misc_wq();
adf_exit_aer();
adf_exit_vf_wq();
adf_exit_pf_wq();
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index f0f71ca44ca3..43b8f864806b 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -122,6 +122,20 @@ do { \
#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+/* Error source registers */
+#define ADF_GEN4_ERRSOU0 (0x41A200)
+#define ADF_GEN4_ERRSOU1 (0x41A204)
+#define ADF_GEN4_ERRSOU2 (0x41A208)
+#define ADF_GEN4_ERRSOU3 (0x41A20C)
+
+/* Error source mask registers */
+#define ADF_GEN4_ERRMSK0 (0x41A210)
+#define ADF_GEN4_ERRMSK1 (0x41A214)
+#define ADF_GEN4_ERRMSK2 (0x41A218)
+#define ADF_GEN4_ERRMSK3 (0x41A21C)
+
+#define ADF_GEN4_VFLNOTIFY BIT(7)
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
index 8efbedf63bc8..d80d493a7756 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
@@ -9,15 +9,12 @@
#include "adf_pfvf_pf_proto.h"
#include "adf_pfvf_utils.h"
-#define ADF_4XXX_MAX_NUM_VFS 16
-
#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
/* VF2PF interrupt source registers */
-#define ADF_4XXX_VM2PF_SOU(i) (0x41A180 + ((i) * 4))
-#define ADF_4XXX_VM2PF_MSK(i) (0x41A1C0 + ((i) * 4))
-#define ADF_4XXX_VM2PF_INT_EN_MSK BIT(0)
+#define ADF_4XXX_VM2PF_SOU 0x41A180
+#define ADF_4XXX_VM2PF_MSK 0x41A1C0
#define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2
#define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F
@@ -41,51 +38,30 @@ static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
static u32 adf_gen4_get_vf2pf_sources(void __iomem *pmisc_addr)
{
- int i;
u32 sou, mask;
- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
- u32 vf_mask = 0;
- for (i = 0; i < num_csrs; i++) {
- sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU(i));
- mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK(i));
- sou &= ~mask;
- vf_mask |= sou << i;
- }
+ sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
+ mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
- return vf_mask;
+ return sou & ~mask;
}
static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
u32 vf_mask)
{
- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
- unsigned long mask = vf_mask;
unsigned int val;
- int i;
-
- for_each_set_bit(i, &mask, num_csrs) {
- unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
- val = ADF_CSR_RD(pmisc_addr, offset) & ~ADF_4XXX_VM2PF_INT_EN_MSK;
- ADF_CSR_WR(pmisc_addr, offset, val);
- }
+ val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
}
static void adf_gen4_disable_vf2pf_interrupts(void __iomem *pmisc_addr,
u32 vf_mask)
{
- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
- unsigned long mask = vf_mask;
unsigned int val;
- int i;
-
- for_each_set_bit(i, &mask, num_csrs) {
- unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
- val = ADF_CSR_RD(pmisc_addr, offset) | ADF_4XXX_VM2PF_INT_EN_MSK;
- ADF_CSR_WR(pmisc_addr, offset, val);
- }
+ val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) | vf_mask;
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
}
static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/qat/qat_common/adf_gen4_pm.c
new file mode 100644
index 000000000000..7037c0892a8a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pm.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pm.h"
+#include "adf_cfg_strings.h"
+#include "icp_qat_fw_init_admin.h"
+#include "adf_gen4_hw_data.h"
+#include "adf_cfg.h"
+
+enum qat_pm_host_msg {
+ PM_NO_CHANGE = 0,
+ PM_SET_MIN,
+};
+
+struct adf_gen4_pm_data {
+ struct work_struct pm_irq_work;
+ struct adf_accel_dev *accel_dev;
+ u32 pm_int_sts;
+};
+
+static int send_host_msg(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ u32 msg;
+
+ msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
+ if (msg & ADF_GEN4_PM_MSG_PENDING)
+ return -EBUSY;
+
+ /* Send HOST_MSG */
+ msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN);
+ msg |= ADF_GEN4_PM_MSG_PENDING;
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
+
+ /* Poll status register to make sure the HOST_MSG has been processed */
+ return read_poll_timeout(ADF_CSR_RD, msg,
+ !(msg & ADF_GEN4_PM_MSG_PENDING),
+ ADF_GEN4_PM_MSG_POLL_DELAY_US,
+ ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc,
+ ADF_GEN4_PM_HOST_MSG);
+}
+
+static void pm_bh_handler(struct work_struct *work)
+{
+ struct adf_gen4_pm_data *pm_data =
+ container_of(work, struct adf_gen4_pm_data, pm_irq_work);
+ struct adf_accel_dev *accel_dev = pm_data->accel_dev;
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ u32 pm_int_sts = pm_data->pm_int_sts;
+ u32 val;
+
+ /* PM Idle interrupt */
+ if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
+ /* Issue host message to FW */
+ if (send_host_msg(accel_dev))
+ dev_warn_ratelimited(&GET_DEV(accel_dev),
+ "Failed to send host msg to FW\n");
+ }
+
+ /* Clear interrupt status */
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
+
+ /* Reenable PM interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val &= ~ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ kfree(pm_data);
+}
+
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ struct adf_gen4_pm_data *pm_data = NULL;
+ u32 errsou2;
+ u32 errmsk2;
+ u32 val;
+
+ /* Only handle the interrupt triggered by PM */
+ errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ if (errmsk2 & ADF_GEN4_PM_SOU)
+ return false;
+
+ errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2);
+ if (!(errsou2 & ADF_GEN4_PM_SOU))
+ return false;
+
+ /* Disable interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val |= ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+
+ pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC);
+ if (!pm_data)
+ return false;
+
+ pm_data->pm_int_sts = val;
+ pm_data->accel_dev = accel_dev;
+
+ INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler);
+ adf_misc_wq_queue_work(&pm_data->pm_irq_work);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt);
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ int ret;
+ u32 val;
+
+ ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER);
+ if (ret)
+ return ret;
+
+ /* Enable default PM interrupts: IDLE, THROTTLE */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+ val |= ADF_GEN4_PM_INT_EN_DEFAULT;
+
+ /* Clear interrupt status */
+ val |= ADF_GEN4_PM_INT_STS_MASK;
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val);
+
+ /* Unmask PM Interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val &= ~ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_pm);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/qat/qat_common/adf_gen4_pm.h
new file mode 100644
index 000000000000..f8f8a9ee29e5
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pm.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN4_PM_H
+#define ADF_GEN4_PM_H
+
+#include "adf_accel_devices.h"
+
+/* Power management registers */
+#define ADF_GEN4_PM_HOST_MSG (0x50A01C)
+
+/* Power management */
+#define ADF_GEN4_PM_POLL_DELAY_US 20
+#define ADF_GEN4_PM_POLL_TIMEOUT_US USEC_PER_SEC
+#define ADF_GEN4_PM_MSG_POLL_DELAY_US (10 * USEC_PER_MSEC)
+#define ADF_GEN4_PM_STATUS (0x50A00C)
+#define ADF_GEN4_PM_INTERRUPT (0x50A028)
+
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_GEN4_PM_SOU BIT(18)
+
+#define ADF_GEN4_PM_IDLE_INT_EN BIT(18)
+#define ADF_GEN4_PM_THROTTLE_INT_EN BIT(19)
+#define ADF_GEN4_PM_DRV_ACTIVE BIT(20)
+#define ADF_GEN4_PM_INIT_STATE BIT(21)
+#define ADF_GEN4_PM_INT_EN_DEFAULT (ADF_GEN4_PM_IDLE_INT_EN | \
+ ADF_GEN4_PM_THROTTLE_INT_EN)
+
+#define ADF_GEN4_PM_THR_STS BIT(0)
+#define ADF_GEN4_PM_IDLE_STS BIT(1)
+#define ADF_GEN4_PM_FW_INT_STS BIT(2)
+#define ADF_GEN4_PM_INT_STS_MASK (ADF_GEN4_PM_THR_STS | \
+ ADF_GEN4_PM_IDLE_STS | \
+ ADF_GEN4_PM_FW_INT_STS)
+
+#define ADF_GEN4_PM_MSG_PENDING BIT(0)
+#define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK GENMASK(28, 1)
+
+#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x0)
+#define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7)
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 2edc63c6b6ca..c2c718f1b489 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -181,6 +181,12 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
if (hw_data->set_ssm_wdtimer)
hw_data->set_ssm_wdtimer(accel_dev);
+ /* Enable Power Management */
+ if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
+ return -EFAULT;
+ }
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index 4ca482aa69f7..a35149f8bf1e 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -16,6 +16,7 @@
#include "adf_transport_internal.h"
#define ADF_MAX_NUM_VFS 32
+static struct workqueue_struct *adf_misc_wq;
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
@@ -123,6 +124,17 @@ static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
}
#endif /* CONFIG_PCI_IOV */
+static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+ if (hw_data->handle_pm_interrupt &&
+ hw_data->handle_pm_interrupt(accel_dev))
+ return true;
+
+ return false;
+}
+
static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
{
struct adf_accel_dev *accel_dev = dev_ptr;
@@ -133,6 +145,9 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
return IRQ_HANDLED;
#endif /* CONFIG_PCI_IOV */
+ if (adf_handle_pm_int(accel_dev))
+ return IRQ_HANDLED;
+
dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
accel_dev->accel_id);
@@ -341,3 +356,30 @@ err_out:
return ret;
}
EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
+
+/**
+ * adf_init_misc_wq() - Init misc workqueue
+ *
+ * Function init workqueue 'qat_misc_wq' for general purpose.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int __init adf_init_misc_wq(void)
+{
+ adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
+
+ return !adf_misc_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_misc_wq(void)
+{
+ if (adf_misc_wq)
+ destroy_workqueue(adf_misc_wq);
+
+ adf_misc_wq = NULL;
+}
+
+bool adf_misc_wq_queue_work(struct work_struct *work)
+{
+ return queue_work(adf_misc_wq, work);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
index 14b222691c9c..1141258db4b6 100644
--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
@@ -96,7 +96,7 @@ int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct capabilities_v3 cap_msg = { { 0 }, };
+ struct capabilities_v3 cap_msg = { 0 };
unsigned int len = sizeof(cap_msg);
if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
@@ -141,7 +141,7 @@ int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
{
- struct ring_to_svc_map_v1 rts_map_msg = { { 0 }, };
+ struct ring_to_svc_map_v1 rts_map_msg = { 0 };
unsigned int len = sizeof(rts_map_msg);
if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
index afe59a7684ac..56cb827f93ea 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
@@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_HEARTBEAT_SYNC = 7,
ICP_QAT_FW_HEARTBEAT_GET = 8,
ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
+ ICP_QAT_FW_PM_STATE_CONFIG = 128,
};
enum icp_qat_fw_init_admin_resp_status {
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 7234c4940fae..67c9588e89df 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -161,6 +161,13 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
+ /* Temporarily set the number of crypto instances to zero to avoid
+ * registering the crypto algorithms.
+ * This will be removed when the algorithms will support the
+ * CRYPTO_TFM_REQ_MAY_BACKLOG flag
+ */
+ instances = 0;
+
for (i = 0; i < instances; i++) {
val = i;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 2026cc6be8f0..6356402a2c9e 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -387,7 +387,9 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
page = image->page;
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
- if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
+ unsigned long ae_assigned = uof_image->ae_assigned;
+
+ if (!test_bit(ae, &ae_assigned))
continue;
if (!test_bit(ae, &cfg_ae_mask))
@@ -664,8 +666,9 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
continue;
for (i = 0; i < obj_handle->uimage_num; i++) {
- if (!test_bit(ae, (unsigned long *)
- &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
+ unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
+
+ if (!test_bit(ae, &ae_assigned))
continue;
mflag = 1;
if (qat_uclo_init_ae_data(obj_handle, ae, i))
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 1cece1a7d3f0..5bbf0d2722e1 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -506,7 +506,6 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
.exit = rk_ablk_exit_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
.setkey = rk_tdes_setkey,
.encrypt = rk_des3_ede_ecb_encrypt,
.decrypt = rk_des3_ede_ecb_decrypt,
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 97277b7150cb..5a57c9afd8c8 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1264,7 +1264,7 @@ static int ux500_cryp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
dev_dbg(dev, "[%s]", __func__);
- device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
+ device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL);
if (!device_data) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 51a6e1a42434..5157c118d642 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1658,7 +1658,7 @@ static int ux500_hash_probe(struct platform_device *pdev)
struct hash_device_data *device_data;
struct device *dev = &pdev->dev;
- device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
+ device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL);
if (!device_data) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
index c85fab7ef0bd..b2c28b87f14b 100644
--- a/drivers/crypto/vmx/Kconfig
+++ b/drivers/crypto/vmx/Kconfig
@@ -2,7 +2,11 @@
config CRYPTO_DEV_VMX_ENCRYPT
tristate "Encryption acceleration support on P8 CPU"
depends on CRYPTO_DEV_VMX
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_CTR
select CRYPTO_GHASH
+ select CRYPTO_XTS
default m
help
Support for VMX cryptographic acceleration instructions on Power8 CPU.
diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
index 534e32daf76a..730feff5b5f2 100644
--- a/drivers/crypto/xilinx/Makefile
+++ b/drivers/crypto/xilinx/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
new file mode 100644
index 000000000000..43ff170ff1c2
--- /dev/null
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP SHA Driver.
+ * Copyright (c) 2022 Xilinx Inc.
+ */
+#include <linux/cacheflush.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha3.h>
+#include <linux/crypto.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define ZYNQMP_DMA_BIT_MASK 32U
+#define ZYNQMP_DMA_ALLOC_FIXED_SIZE 0x1000U
+
+enum zynqmp_sha_op {
+ ZYNQMP_SHA3_INIT = 1,
+ ZYNQMP_SHA3_UPDATE = 2,
+ ZYNQMP_SHA3_FINAL = 4,
+};
+
+struct zynqmp_sha_drv_ctx {
+ struct shash_alg sha3_384;
+ struct device *dev;
+};
+
+struct zynqmp_sha_tfm_ctx {
+ struct device *dev;
+ struct crypto_shash *fbk_tfm;
+};
+
+struct zynqmp_sha_desc_ctx {
+ struct shash_desc fbk_req;
+};
+
+static dma_addr_t update_dma_addr, final_dma_addr;
+static char *ubuf, *fbuf;
+
+static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
+{
+ const char *fallback_driver_name = crypto_shash_alg_name(hash);
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+ struct crypto_shash *fallback_tfm;
+ struct zynqmp_sha_drv_ctx *drv_ctx;
+
+ drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384);
+ tfm_ctx->dev = drv_ctx->dev;
+
+ /* Allocate a fallback and abort if it failed. */
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm))
+ return PTR_ERR(fallback_tfm);
+
+ tfm_ctx->fbk_tfm = fallback_tfm;
+ hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm);
+
+ return 0;
+}
+
+static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
+{
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
+
+ if (tfm_ctx->fbk_tfm) {
+ crypto_free_shash(tfm_ctx->fbk_tfm);
+ tfm_ctx->fbk_tfm = NULL;
+ }
+
+ memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
+}
+
+static int zynqmp_sha_init(struct shash_desc *desc)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+
+ dctx->fbk_req.tfm = tctx->fbk_tfm;
+ return crypto_shash_init(&dctx->fbk_req);
+}
+
+static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_update(&dctx->fbk_req, data, length);
+}
+
+static int zynqmp_sha_final(struct shash_desc *desc, u8 *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_final(&dctx->fbk_req, out);
+}
+
+static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_finup(&dctx->fbk_req, data, length, out);
+}
+
+static int zynqmp_sha_import(struct shash_desc *desc, const void *in)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+
+ dctx->fbk_req.tfm = tctx->fbk_tfm;
+ return crypto_shash_import(&dctx->fbk_req, in);
+}
+
+static int zynqmp_sha_export(struct shash_desc *desc, void *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_export(&dctx->fbk_req, out);
+}
+
+static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+{
+ unsigned int remaining_len = len;
+ int update_size;
+ int ret;
+
+ ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT);
+ if (ret)
+ return ret;
+
+ while (remaining_len != 0) {
+ memzero_explicit(ubuf, ZYNQMP_DMA_ALLOC_FIXED_SIZE);
+ if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE) {
+ update_size = ZYNQMP_DMA_ALLOC_FIXED_SIZE;
+ remaining_len -= ZYNQMP_DMA_ALLOC_FIXED_SIZE;
+ } else {
+ update_size = remaining_len;
+ remaining_len = 0;
+ }
+ memcpy(ubuf, data, update_size);
+ flush_icache_range((unsigned long)ubuf, (unsigned long)ubuf + update_size);
+ ret = zynqmp_pm_sha_hash(update_dma_addr, update_size, ZYNQMP_SHA3_UPDATE);
+ if (ret)
+ return ret;
+
+ data += update_size;
+ }
+
+ ret = zynqmp_pm_sha_hash(final_dma_addr, SHA3_384_DIGEST_SIZE, ZYNQMP_SHA3_FINAL);
+ memcpy(out, fbuf, SHA3_384_DIGEST_SIZE);
+ memzero_explicit(fbuf, SHA3_384_DIGEST_SIZE);
+
+ return ret;
+}
+
+static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
+ .sha3_384 = {
+ .init = zynqmp_sha_init,
+ .update = zynqmp_sha_update,
+ .final = zynqmp_sha_final,
+ .finup = zynqmp_sha_finup,
+ .digest = zynqmp_sha_digest,
+ .export = zynqmp_sha_export,
+ .import = zynqmp_sha_import,
+ .init_tfm = zynqmp_sha_init_tfm,
+ .exit_tfm = zynqmp_sha_exit_tfm,
+ .descsize = sizeof(struct zynqmp_sha_desc_ctx),
+ .statesize = sizeof(struct sha3_state),
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "zynqmp-sha3-384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
+static int zynqmp_sha_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int err;
+ u32 v;
+
+ /* Verify the hardware is present */
+ err = zynqmp_pm_get_api_version(&v);
+ if (err)
+ return err;
+
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
+ if (err < 0) {
+ dev_err(dev, "No usable DMA configuration\n");
+ return err;
+ }
+
+ err = crypto_register_shash(&sha3_drv_ctx.sha3_384);
+ if (err < 0) {
+ dev_err(dev, "Failed to register shash alg.\n");
+ return err;
+ }
+
+ sha3_drv_ctx.dev = dev;
+ platform_set_drvdata(pdev, &sha3_drv_ctx);
+
+ ubuf = dma_alloc_coherent(dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, &update_dma_addr, GFP_KERNEL);
+ if (!ubuf) {
+ err = -ENOMEM;
+ goto err_shash;
+ }
+
+ fbuf = dma_alloc_coherent(dev, SHA3_384_DIGEST_SIZE, &final_dma_addr, GFP_KERNEL);
+ if (!fbuf) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ return 0;
+
+err_mem:
+ dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
+
+err_shash:
+ crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
+
+ return err;
+}
+
+static int zynqmp_sha_remove(struct platform_device *pdev)
+{
+ sha3_drv_ctx.dev = platform_get_drvdata(pdev);
+
+ dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
+ dma_free_coherent(sha3_drv_ctx.dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
+ crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_sha_driver = {
+ .probe = zynqmp_sha_probe,
+ .remove = zynqmp_sha_remove,
+ .driver = {
+ .name = "zynqmp-sha3-384",
+ },
+};
+
+module_platform_driver(zynqmp_sha_driver);
+MODULE_DESCRIPTION("ZynqMP SHA3 hardware acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Harsha <harsha.harsha@xilinx.com>");
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 450c5f6a1cbf..5e5b0bb2e4e0 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1121,6 +1121,32 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out)
EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
/**
+ * zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
+ * @address: Address of the data/ Address of output buffer where
+ * hash should be stored.
+ * @size: Size of the data.
+ * @flags:
+ * BIT(0) - for initializing csudma driver and SHA3(Here address
+ * and size inputs can be NULL).
+ * BIT(1) - to call Sha3_Update API which can be called multiple
+ * times when data is not contiguous.
+ * BIT(2) - to get final hash of the whole updated data.
+ * Hash will be overwritten at provided address with
+ * 48 bytes.
+ *
+ * Return: Returns status, either success or error code.
+ */
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags)
+{
+ u32 lower_addr = lower_32_bits(address);
+ u32 upper_addr = upper_32_bits(address);
+
+ return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_addr, lower_addr,
+ size, flags, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash);
+
+/**
* zynqmp_pm_register_notifier() - PM API for register a subsystem
* to be notified about specific
* event/error.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index a73a8017e0ee..a79201a9a6f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -605,6 +605,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
/* Registers that can be accessed from PF */
switch (offset) {
+ case CPT_AF_DIAG:
case CPT_AF_CTL:
case CPT_AF_PF_FUNC:
case CPT_AF_BLK_RST:
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
index b62a2a56a4d4..44509d48fca2 100644
--- a/include/asm-generic/xor.h
+++ b/include/asm-generic/xor.h
@@ -8,7 +8,8 @@
#include <linux/prefetch.h>
static void
-xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -27,8 +28,9 @@ xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_8regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -48,8 +50,10 @@ xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_8regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -70,8 +74,11 @@ xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_8regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -93,7 +100,8 @@ xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -129,8 +137,9 @@ xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_32regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -175,8 +184,10 @@ xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_32regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -230,8 +241,11 @@ xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_32regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -294,7 +308,8 @@ xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
@@ -320,8 +335,9 @@ xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_8regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
@@ -350,8 +366,10 @@ xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_8regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -384,8 +402,11 @@ xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_8regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -421,7 +442,8 @@ xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -466,8 +488,9 @@ xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_32regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -523,8 +546,10 @@ xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_32regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -591,8 +616,11 @@ xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_32regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index f76ec723ceae..f50c5d1725da 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -13,6 +13,8 @@
#include <linux/list.h>
#include <linux/types.h>
+#include <asm/unaligned.h>
+
/*
* Maximum values for blocksize and alignmask, used to allocate
* static buffers that are big enough for any combination of
@@ -154,9 +156,11 @@ static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
(size % sizeof(unsigned long)) == 0) {
unsigned long *d = (unsigned long *)dst;
unsigned long *s = (unsigned long *)src;
+ unsigned long l;
while (size > 0) {
- *d++ ^= *s++;
+ l = get_unaligned(d) ^ get_unaligned(s++);
+ put_unaligned(l, d++);
size -= sizeof(unsigned long);
}
} else {
@@ -173,9 +177,11 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
unsigned long *d = (unsigned long *)dst;
unsigned long *s1 = (unsigned long *)src1;
unsigned long *s2 = (unsigned long *)src2;
+ unsigned long l;
while (size > 0) {
- *d++ = *s1++ ^ *s2++;
+ l = get_unaligned(s1++) ^ get_unaligned(s2++);
+ put_unaligned(l, d++);
size -= sizeof(unsigned long);
}
} else {
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index d71e9858ab86..7b863e911cb4 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -24,21 +24,17 @@
*
* @key: Private DH key
* @p: Diffie-Hellman parameter P
- * @q: Diffie-Hellman parameter Q
* @g: Diffie-Hellman generator G
* @key_size: Size of the private DH key
* @p_size: Size of DH parameter P
- * @q_size: Size of DH parameter Q
* @g_size: Size of DH generator G
*/
struct dh {
- void *key;
- void *p;
- void *q;
- void *g;
+ const void *key;
+ const void *p;
+ const void *g;
unsigned int key_size;
unsigned int p_size;
- unsigned int q_size;
unsigned int g_size;
};
@@ -83,4 +79,20 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params);
*/
int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);
+/**
+ * __crypto_dh_decode_key() - decode a private key without parameter checks
+ * @buf: Buffer holding a packet key that should be decoded
+ * @len: Length of the packet private key buffer
+ * @params: Buffer allocated by the caller that is filled with the
+ * unpacked DH private key.
+ *
+ * Internal function providing the same services as the exported
+ * crypto_dh_decode_key(), but without any of those basic parameter
+ * checks conducted by the latter.
+ *
+ * Return: -EINVAL if buffer has insufficient size, 0 on success
+ */
+int __crypto_dh_decode_key(const char *buf, unsigned int len,
+ struct dh *params);
+
#endif
diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h
index 659b642efada..9cb0662ebe87 100644
--- a/include/crypto/internal/kpp.h
+++ b/include/crypto/internal/kpp.h
@@ -10,6 +10,38 @@
#include <crypto/kpp.h>
#include <crypto/algapi.h>
+/**
+ * struct kpp_instance - KPP template instance
+ * @free: Callback getting invoked upon instance destruction. Must be set.
+ * @s: Internal. Generic crypto core instance state properly layout
+ * to alias with @alg as needed.
+ * @alg: The &struct kpp_alg implementation provided by the instance.
+ */
+struct kpp_instance {
+ void (*free)(struct kpp_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct kpp_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct kpp_alg alg;
+ };
+};
+
+/**
+ * struct crypto_kpp_spawn - KPP algorithm spawn
+ * @base: Internal. Generic crypto core spawn state.
+ *
+ * Template instances can get a hold on some inner KPP algorithm by
+ * binding a &struct crypto_kpp_spawn via
+ * crypto_grab_kpp(). Transforms may subsequently get instantiated
+ * from the referenced inner &struct kpp_alg by means of
+ * crypto_spawn_kpp().
+ */
+struct crypto_kpp_spawn {
+ struct crypto_spawn base;
+};
+
/*
* Transform internal helpers.
*/
@@ -33,6 +65,62 @@ static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
return crypto_kpp_tfm(tfm)->__crt_alg->cra_name;
}
+/*
+ * Template instance internal helpers.
+ */
+/**
+ * kpp_crypto_instance() - Cast a &struct kpp_instance to the corresponding
+ * generic &struct crypto_instance.
+ * @inst: Pointer to the &struct kpp_instance to be cast.
+ * Return: A pointer to the &struct crypto_instance embedded in @inst.
+ */
+static inline struct crypto_instance *kpp_crypto_instance(
+ struct kpp_instance *inst)
+{
+ return &inst->s.base;
+}
+
+/**
+ * kpp_instance() - Cast a generic &struct crypto_instance to the corresponding
+ * &struct kpp_instance.
+ * @inst: Pointer to the &struct crypto_instance to be cast.
+ * Return: A pointer to the &struct kpp_instance @inst is embedded in.
+ */
+static inline struct kpp_instance *kpp_instance(struct crypto_instance *inst)
+{
+ return container_of(inst, struct kpp_instance, s.base);
+}
+
+/**
+ * kpp_alg_instance() - Get the &struct kpp_instance a given KPP transform has
+ * been instantiated from.
+ * @kpp: The KPP transform instantiated from some &struct kpp_instance.
+ * Return: The &struct kpp_instance associated with @kpp.
+ */
+static inline struct kpp_instance *kpp_alg_instance(struct crypto_kpp *kpp)
+{
+ return kpp_instance(crypto_tfm_alg_instance(&kpp->base));
+}
+
+/**
+ * kpp_instance_ctx() - Get a pointer to a &struct kpp_instance's implementation
+ * specific context data.
+ * @inst: The &struct kpp_instance whose context data to access.
+ *
+ * A KPP template implementation may allocate extra memory beyond the
+ * end of a &struct kpp_instance instantiated from &crypto_template.create().
+ * This function provides a means to obtain a pointer to this area.
+ *
+ * Return: A pointer to the implementation specific context data.
+ */
+static inline void *kpp_instance_ctx(struct kpp_instance *inst)
+{
+ return crypto_instance_ctx(kpp_crypto_instance(inst));
+}
+
+/*
+ * KPP algorithm (un)registration functions.
+ */
/**
* crypto_register_kpp() -- Register key-agreement protocol primitives algorithm
*
@@ -56,4 +144,74 @@ int crypto_register_kpp(struct kpp_alg *alg);
*/
void crypto_unregister_kpp(struct kpp_alg *alg);
+/**
+ * kpp_register_instance() - Register a KPP template instance.
+ * @tmpl: The instantiating template.
+ * @inst: The KPP template instance to be registered.
+ * Return: %0 on success, negative error code otherwise.
+ */
+int kpp_register_instance(struct crypto_template *tmpl,
+ struct kpp_instance *inst);
+
+/*
+ * KPP spawn related functions.
+ */
+/**
+ * crypto_grab_kpp() - Look up a KPP algorithm and bind a spawn to it.
+ * @spawn: The KPP spawn to bind.
+ * @inst: The template instance owning @spawn.
+ * @name: The KPP algorithm name to look up.
+ * @type: The type bitset to pass on to the lookup.
+ * @mask: The mask bismask to pass on to the lookup.
+ * Return: %0 on success, a negative error code otherwise.
+ */
+int crypto_grab_kpp(struct crypto_kpp_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+/**
+ * crypto_drop_kpp() - Release a spawn previously bound via crypto_grab_kpp().
+ * @spawn: The spawn to release.
+ */
+static inline void crypto_drop_kpp(struct crypto_kpp_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+/**
+ * crypto_spawn_kpp_alg() - Get the algorithm a KPP spawn has been bound to.
+ * @spawn: The spawn to get the referenced &struct kpp_alg for.
+ *
+ * This function as well as the returned result are safe to use only
+ * after @spawn has been successfully bound via crypto_grab_kpp() and
+ * up to until the template instance owning @spawn has either been
+ * registered successfully or the spawn has been released again via
+ * crypto_drop_spawn().
+ *
+ * Return: A pointer to the &struct kpp_alg referenced from the spawn.
+ */
+static inline struct kpp_alg *crypto_spawn_kpp_alg(
+ struct crypto_kpp_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct kpp_alg, base);
+}
+
+/**
+ * crypto_spawn_kpp() - Create a transform from a KPP spawn.
+ * @spawn: The spawn previously bound to some &struct kpp_alg via
+ * crypto_grab_kpp().
+ *
+ * Once a &struct crypto_kpp_spawn has been successfully bound to a
+ * &struct kpp_alg via crypto_grab_kpp(), transforms for the latter
+ * may get instantiated from the former by means of this function.
+ *
+ * Return: A pointer to the freshly created KPP transform on success
+ * or an ``ERR_PTR()`` otherwise.
+ */
+static inline struct crypto_kpp *crypto_spawn_kpp(
+ struct crypto_kpp_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
#endif
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
index 42ea21289ba9..1f021ad0533f 100644
--- a/include/crypto/sm3.h
+++ b/include/crypto/sm3.h
@@ -1,5 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Common values for SM3 algorithm
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
+ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#ifndef _CRYPTO_SM3_H
@@ -30,13 +35,30 @@ struct sm3_state {
u8 buffer[SM3_BLOCK_SIZE];
};
-struct shash_desc;
+/*
+ * Stand-alone implementation of the SM3 algorithm. It is designed to
+ * have as little dependencies as possible so it can be used in the
+ * kexec_file purgatory. In other cases you should generally use the
+ * hash APIs from include/crypto/hash.h. Especially when hashing large
+ * amounts of data as those APIs may be hw-accelerated.
+ *
+ * For details see lib/crypto/sm3.c
+ */
-extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+static inline void sm3_init(struct sm3_state *sctx)
+{
+ sctx->state[0] = SM3_IVA;
+ sctx->state[1] = SM3_IVB;
+ sctx->state[2] = SM3_IVC;
+ sctx->state[3] = SM3_IVD;
+ sctx->state[4] = SM3_IVE;
+ sctx->state[5] = SM3_IVF;
+ sctx->state[6] = SM3_IVG;
+ sctx->state[7] = SM3_IVH;
+ sctx->count = 0;
+}
-extern int crypto_sm3_final(struct shash_desc *desc, u8 *out);
+void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len);
+void sm3_final(struct sm3_state *sctx, u8 *out);
-extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 855869e1fd32..2324ab6f1846 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -133,6 +133,15 @@
#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000
/*
+ * Mark an algorithm as a service implementation only usable by a
+ * template and never by a normal user of the kernel crypto API.
+ * This is intended to be used by algorithms that are themselves
+ * not FIPS-approved but may instead be used to implement parts of
+ * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)).
+ */
+#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_NEED_KEY 0x00000001
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 907cb01890cf..f6783f58c64a 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -93,6 +93,7 @@ enum pm_api_id {
PM_FPGA_LOAD = 22,
PM_FPGA_GET_STATUS = 23,
PM_GET_CHIPID = 24,
+ PM_SECURE_SHA = 26,
PM_PINCTRL_REQUEST = 28,
PM_PINCTRL_RELEASE = 29,
PM_PINCTRL_GET_FUNCTION = 30,
@@ -427,6 +428,7 @@ int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack);
int zynqmp_pm_aes_engine(const u64 address, u32 *out);
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_get_status(u32 *value);
int zynqmp_pm_write_ggs(u32 index, u32 value);
@@ -601,6 +603,12 @@ static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
return -ENODEV;
}
+static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
const u32 flags)
{
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 2a9fee8ddae3..51b811b62322 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -11,13 +11,20 @@ struct xor_block_template {
struct xor_block_template *next;
const char *name;
int speed;
- void (*do_2)(unsigned long, unsigned long *, unsigned long *);
- void (*do_3)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
- void (*do_4)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
- void (*do_5)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
+ void (*do_2)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_3)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_4)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_5)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
};
#endif
diff --git a/kernel/padata.c b/kernel/padata.c
index 18d3a5c699d8..e5819bb8bd1d 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -181,7 +181,7 @@ int padata_do_parallel(struct padata_shell *ps,
goto out;
if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
- if (!cpumask_weight(pd->cpumask.cbcpu))
+ if (cpumask_empty(pd->cpumask.cbcpu))
goto out;
/* Select an alternate fallback CPU and notify the caller. */
diff --git a/lib/crc32.c b/lib/crc32.c
index 2a68dfd3b96c..5649847d0a8d 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -194,13 +194,11 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
#else
u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len,
- (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
+ return crc32_le_generic(crc, p, len, crc32table_le, CRC32_POLY_LE);
}
u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len,
- (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
+ return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE);
}
#endif
EXPORT_SYMBOL(crc32_le);
@@ -208,6 +206,7 @@ EXPORT_SYMBOL(__crc32c_le);
u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+u32 __pure crc32_be_base(u32, unsigned char const *, size_t) __alias(crc32_be);
/*
* This multiplies the polynomials x and y modulo the given modulus.
@@ -332,15 +331,14 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
}
#if CRC_BE_BITS == 1
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
}
#else
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_be_generic(crc, p, len,
- (const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
+ return crc32_be_generic(crc, p, len, crc32table_be, CRC32_POLY_BE);
}
#endif
EXPORT_SYMBOL(crc32_be);
diff --git a/lib/crc32test.c b/lib/crc32test.c
index 61ddce2cff77..9b4af79412c4 100644
--- a/lib/crc32test.c
+++ b/lib/crc32test.c
@@ -675,7 +675,7 @@ static int __init crc32c_test(void)
/* pre-warm the cache */
for (i = 0; i < 100; i++) {
- bytes += 2*test[i].length;
+ bytes += test[i].length;
crc ^= __crc32c_le(test[i].crc, test_buf +
test[i].start, test[i].length);
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index e8e525650cf2..379a66d7f504 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -123,6 +123,9 @@ config CRYPTO_LIB_CHACHA20POLY1305
config CRYPTO_LIB_SHA256
tristate
+config CRYPTO_LIB_SM3
+ tristate
+
config CRYPTO_LIB_SM4
tristate
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index ed43a41f2dcc..6c872d05d1e6 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -37,6 +37,9 @@ libpoly1305-y += poly1305.o
obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
libsha256-y := sha256.o
+obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o
+libsm3-y := sm3.o
+
obj-$(CONFIG_CRYPTO_LIB_SM4) += libsm4.o
libsm4-y := sm4.o
diff --git a/lib/crypto/sm3.c b/lib/crypto/sm3.c
new file mode 100644
index 000000000000..d473e358a873
--- /dev/null
+++ b/lib/crypto/sm3.c
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described
+ * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
+ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <crypto/sm3.h>
+
+static const u32 ____cacheline_aligned K[64] = {
+ 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb,
+ 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc,
+ 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce,
+ 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6,
+ 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
+ 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
+ 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
+ 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5,
+ 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53,
+ 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d,
+ 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4,
+ 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43,
+ 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
+ 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
+ 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
+ 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
+};
+
+/*
+ * Transform the message X which consists of 16 32-bit-words. See
+ * GM/T 004-2012 for details.
+ */
+#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \
+ do { \
+ ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \
+ ss2 = ss1 ^ rol32((a), 12); \
+ d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \
+ h += GG ## i(e, f, g) + ss1 + (w1); \
+ b = rol32((b), 9); \
+ f = rol32((f), 19); \
+ h = P0((h)); \
+ } while (0)
+
+#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \
+ R(1, a, b, c, d, e, f, g, h, t, w1, w2)
+#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \
+ R(2, a, b, c, d, e, f, g, h, t, w1, w2)
+
+#define FF1(x, y, z) (x ^ y ^ z)
+#define FF2(x, y, z) ((x & y) | (x & z) | (y & z))
+
+#define GG1(x, y, z) FF1(x, y, z)
+#define GG2(x, y, z) ((x & y) | (~x & z))
+
+/* Message expansion */
+#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17))
+#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23))
+#define I(i) (W[i] = get_unaligned_be32(data + i * 4))
+#define W1(i) (W[i & 0x0f])
+#define W2(i) (W[i & 0x0f] = \
+ P1(W[i & 0x0f] \
+ ^ W[(i-9) & 0x0f] \
+ ^ rol32(W[(i-3) & 0x0f], 15)) \
+ ^ rol32(W[(i-13) & 0x0f], 7) \
+ ^ W[(i-6) & 0x0f])
+
+static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16])
+{
+ u32 a, b, c, d, e, f, g, h, ss1, ss2;
+
+ a = sctx->state[0];
+ b = sctx->state[1];
+ c = sctx->state[2];
+ d = sctx->state[3];
+ e = sctx->state[4];
+ f = sctx->state[5];
+ g = sctx->state[6];
+ h = sctx->state[7];
+
+ R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4));
+ R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5));
+ R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6));
+ R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7));
+ R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8));
+ R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9));
+ R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10));
+ R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11));
+ R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12));
+ R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13));
+ R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14));
+ R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15));
+ R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16));
+ R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17));
+ R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18));
+ R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19));
+
+ R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20));
+ R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21));
+ R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22));
+ R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23));
+ R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24));
+ R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25));
+ R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26));
+ R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27));
+ R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28));
+ R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29));
+ R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30));
+ R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31));
+ R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32));
+ R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33));
+ R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34));
+ R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35));
+
+ R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36));
+ R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37));
+ R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38));
+ R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39));
+ R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40));
+ R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41));
+ R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42));
+ R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43));
+ R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44));
+ R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45));
+ R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46));
+ R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47));
+ R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48));
+ R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49));
+ R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50));
+ R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51));
+
+ R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52));
+ R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53));
+ R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54));
+ R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55));
+ R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56));
+ R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57));
+ R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58));
+ R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59));
+ R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60));
+ R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61));
+ R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62));
+ R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63));
+ R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64));
+ R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65));
+ R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66));
+ R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67));
+
+ sctx->state[0] ^= a;
+ sctx->state[1] ^= b;
+ sctx->state[2] ^= c;
+ sctx->state[3] ^= d;
+ sctx->state[4] ^= e;
+ sctx->state[5] ^= f;
+ sctx->state[6] ^= g;
+ sctx->state[7] ^= h;
+}
+#undef R
+#undef R1
+#undef R2
+#undef I
+#undef W1
+#undef W2
+
+static inline void sm3_block(struct sm3_state *sctx,
+ u8 const *data, int blocks, u32 W[16])
+{
+ while (blocks--) {
+ sm3_transform(sctx, data, W);
+ data += SM3_BLOCK_SIZE;
+ }
+}
+
+void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len)
+{
+ unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+ u32 W[16];
+
+ sctx->count += len;
+
+ if ((partial + len) >= SM3_BLOCK_SIZE) {
+ int blocks;
+
+ if (partial) {
+ int p = SM3_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buffer + partial, data, p);
+ data += p;
+ len -= p;
+
+ sm3_block(sctx, sctx->buffer, 1, W);
+ }
+
+ blocks = len / SM3_BLOCK_SIZE;
+ len %= SM3_BLOCK_SIZE;
+
+ if (blocks) {
+ sm3_block(sctx, data, blocks, W);
+ data += blocks * SM3_BLOCK_SIZE;
+ }
+
+ memzero_explicit(W, sizeof(W));
+
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buffer + partial, data, len);
+}
+EXPORT_SYMBOL_GPL(sm3_update);
+
+void sm3_final(struct sm3_state *sctx, u8 *out)
+{
+ const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64);
+ __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
+ __be32 *digest = (__be32 *)out;
+ unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+ u32 W[16];
+ int i;
+
+ sctx->buffer[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial);
+ partial = 0;
+
+ sm3_block(sctx, sctx->buffer, 1, W);
+ }
+
+ memset(sctx->buffer + partial, 0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ sm3_block(sctx, sctx->buffer, 1, W);
+
+ for (i = 0; i < 8; i++)
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ /* Zeroize sensitive information. */
+ memzero_explicit(W, sizeof(W));
+ memzero_explicit(sctx, sizeof(*sctx));
+}
+EXPORT_SYMBOL_GPL(sm3_final);
+
+MODULE_DESCRIPTION("Generic SM3 library");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
index 142b680835df..070ba784c9f1 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/mpi/mpi-bit.c
@@ -242,6 +242,7 @@ void mpi_rshift(MPI x, MPI a, unsigned int n)
}
MPN_NORMALIZE(x->d, x->nlimbs);
}
+EXPORT_SYMBOL_GPL(mpi_rshift);
/****************
* Shift A by COUNT limbs to the left
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 4573fc15617d..b339760a31dd 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -15,7 +15,7 @@
#include <keys/user-type.h>
#include "internal.h"
-static ssize_t dh_data_from_key(key_serial_t keyid, void **data)
+static ssize_t dh_data_from_key(key_serial_t keyid, const void **data)
{
struct key *key;
key_ref_t key_ref;