aboutsummaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds2020-10-13 08:50:16 -0700
committerLinus Torvalds2020-10-13 08:50:16 -0700
commit39a5101f989e8d2be557136704d53990f9b402c8 (patch)
treeb9c16c6f32508939111fb6d0159d7450713a5f33 /crypto
parent865c50e1d279671728c2936cb7680eb89355eeea (diff)
parent3093e7c16e12d729c325adb3c53dde7308cefbd8 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Allow DRBG testing through user-space af_alg - Add tcrypt speed testing support for keyed hashes - Add type-safe init/exit hooks for ahash Algorithms: - Mark arc4 as obsolete and pending for future removal - Mark anubis, khazad, sead and tea as obsolete - Improve boot-time xor benchmark - Add OSCCA SM2 asymmetric cipher algorithm and use it for integrity Drivers: - Fixes and enhancement for XTS in caam - Add support for XIP8001B hwrng in xiphera-trng - Add RNG and hash support in sun8i-ce/sun8i-ss - Allow imx-rngc to be used by kernel entropy pool - Use crypto engine in omap-sham - Add support for Ingenic X1830 with ingenic" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (205 commits) X.509: Fix modular build of public_key_sm2 crypto: xor - Remove unused variable count in do_xor_speed X.509: fix error return value on the failed path crypto: bcm - Verify GCM/CCM key length in setkey crypto: qat - drop input parameter from adf_enable_aer() crypto: qat - fix function parameters descriptions crypto: atmel-tdes - use semicolons rather than commas to separate statements crypto: drivers - use semicolons rather than commas to separate statements hwrng: mxc-rnga - use semicolons rather than commas to separate statements hwrng: iproc-rng200 - use semicolons rather than commas to separate statements hwrng: stm32 - use semicolons rather than commas to separate statements crypto: xor - use ktime for template benchmarking crypto: xor - defer load time benchmark to a later time crypto: hisilicon/zip - fix the uninitalized 'curr_qm_qp_num' crypto: hisilicon/zip - fix the return value when device is busy crypto: hisilicon/zip - fix zero length input in GZIP decompress crypto: hisilicon/zip - fix the uncleared debug registers lib/mpi: Fix unused variable warnings crypto: x86/poly1305 - Remove assignments with no effect hwrng: npcm - modify readl to readb ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig40
-rw-r--r--crypto/Makefile8
-rw-r--r--crypto/af_alg.c14
-rw-r--r--crypto/ahash.c54
-rw-r--r--crypto/algif_aead.c9
-rw-r--r--crypto/algif_rng.c175
-rw-r--r--crypto/algif_skcipher.c2
-rw-r--r--crypto/arc4.c11
-rw-r--r--crypto/asymmetric_keys/public_key.c63
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c27
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c3
-rw-r--r--crypto/cbc.c144
-rw-r--r--crypto/crc32c_generic.c4
-rw-r--r--crypto/crct10dif_generic.c2
-rw-r--r--crypto/crypto_engine.c3
-rw-r--r--crypto/ecrdsa.c1
-rw-r--r--crypto/internal.h16
-rw-r--r--crypto/jitterentropy-kcapi.c2
-rw-r--r--crypto/proc.c4
-rw-r--r--crypto/rsa-pkcs1pad.c1
-rw-r--r--crypto/sm2.c481
-rw-r--r--crypto/sm2signature.asn14
-rw-r--r--crypto/sm3_generic.c7
-rw-r--r--crypto/tcrypt.c18
-rw-r--r--crypto/tcrypt.h29
-rw-r--r--crypto/testmgr.c25
-rw-r--r--crypto/testmgr.h59
-rw-r--r--crypto/xor.c69
28 files changed, 1122 insertions, 153 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 1b57419fa2e7..094ef56ab7b4 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -260,6 +260,23 @@ config CRYPTO_ECRDSA
standard algorithms (called GOST algorithms). Only signature verification
is implemented.
+config CRYPTO_SM2
+ tristate "SM2 algorithm"
+ select CRYPTO_SM3
+ select CRYPTO_AKCIPHER
+ select CRYPTO_MANAGER
+ select MPILIB
+ select ASN1
+ help
+ Generic implementation of the SM2 public key algorithm. It was
+ published by State Encryption Management Bureau, China.
+ as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012.
+
+ References:
+ https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
+ http://www.oscca.gov.cn/sca/xxgk/2010-12/17/content_1002386.shtml
+ http://www.gmbz.org.cn/main/bzlb.html
+
config CRYPTO_CURVE25519
tristate "Curve25519 algorithm"
select CRYPTO_KPP
@@ -1185,6 +1202,7 @@ config CRYPTO_AES_PPC_SPE
config CRYPTO_ANUBIS
tristate "Anubis cipher algorithm"
+ depends on CRYPTO_USER_API_ENABLE_OBSOLETE
select CRYPTO_ALGAPI
help
Anubis cipher algorithm.
@@ -1199,6 +1217,7 @@ config CRYPTO_ANUBIS
config CRYPTO_ARC4
tristate "ARC4 cipher algorithm"
+ depends on CRYPTO_USER_API_ENABLE_OBSOLETE
select CRYPTO_SKCIPHER
select CRYPTO_LIB_ARC4
help
@@ -1423,6 +1442,7 @@ config CRYPTO_FCRYPT
config CRYPTO_KHAZAD
tristate "Khazad cipher algorithm"
+ depends on CRYPTO_USER_API_ENABLE_OBSOLETE
select CRYPTO_ALGAPI
help
Khazad cipher algorithm.
@@ -1486,6 +1506,7 @@ config CRYPTO_CHACHA_MIPS
config CRYPTO_SEED
tristate "SEED cipher algorithm"
+ depends on CRYPTO_USER_API_ENABLE_OBSOLETE
select CRYPTO_ALGAPI
help
SEED cipher algorithm (RFC4269).
@@ -1612,6 +1633,7 @@ config CRYPTO_SM4
config CRYPTO_TEA
tristate "TEA, XTEA and XETA cipher algorithms"
+ depends on CRYPTO_USER_API_ENABLE_OBSOLETE
select CRYPTO_ALGAPI
help
TEA cipher algorithm.
@@ -1870,6 +1892,15 @@ config CRYPTO_USER_API_RNG
This option enables the user-spaces interface for random
number generator algorithms.
+config CRYPTO_USER_API_RNG_CAVP
+ bool "Enable CAVP testing of DRBG"
+ depends on CRYPTO_USER_API_RNG && CRYPTO_DRBG
+ help
+ This option enables extra API for CAVP testing via the user-space
+ interface: resetting of DRBG entropy, and providing Additional Data.
+ This should only be enabled for CAVP testing. You should say
+ no unless you know what this is.
+
config CRYPTO_USER_API_AEAD
tristate "User-space interface for AEAD cipher algorithms"
depends on NET
@@ -1881,6 +1912,15 @@ config CRYPTO_USER_API_AEAD
This option enables the user-spaces interface for AEAD
cipher algorithms.
+config CRYPTO_USER_API_ENABLE_OBSOLETE
+ bool "Enable obsolete cryptographic algorithms for userspace"
+ depends on CRYPTO_USER_API
+ default y
+ help
+ Allow obsolete cryptographic algorithms to be selected that have
+ already been phased out from internal use by the kernel, and are
+ only useful for userspace clients that still rely on them.
+
config CRYPTO_STATS
bool "Crypto usage statistics for User-space"
depends on CRYPTO_USER
diff --git a/crypto/Makefile b/crypto/Makefile
index 4ca12b6044f7..b279483fba50 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -42,6 +42,14 @@ rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
+$(obj)/sm2signature.asn1.o: $(obj)/sm2signature.asn1.c $(obj)/sm2signature.asn1.h
+$(obj)/sm2.o: $(obj)/sm2signature.asn1.h
+
+sm2_generic-y += sm2signature.asn1.o
+sm2_generic-y += sm2.o
+
+obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o
+
crypto_acompress-y := acompress.o
crypto_acompress-y += scompress.o
obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 8be8bec07cdd..d11db80d24cd 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -254,6 +254,14 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
if (!type->setauthsize)
goto unlock;
err = type->setauthsize(ask->private, optlen);
+ break;
+ case ALG_SET_DRBG_ENTROPY:
+ if (sock->state == SS_CONNECTED)
+ goto unlock;
+ if (!type->setentropy)
+ goto unlock;
+
+ err = type->setentropy(ask->private, optval, optlen);
}
unlock:
@@ -286,6 +294,11 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
security_sock_graft(sk2, newsock);
security_sk_clone(sk, sk2);
+ /*
+ * newsock->ops assigned here to allow type->accept call to override
+ * them when required.
+ */
+ newsock->ops = type->ops;
err = type->accept(ask->private, sk2);
nokey = err == -ENOKEY;
@@ -304,7 +317,6 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
alg_sk(sk2)->parent = sk;
alg_sk(sk2)->type = type;
- newsock->ops = type->ops;
newsock->state = SS_CONNECTED;
if (nokey)
diff --git a/crypto/ahash.c b/crypto/ahash.c
index d9d65d1cc669..c2ca631a111f 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -10,7 +10,6 @@
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
-#include <linux/bug.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -46,10 +45,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
- if (walk->flags & CRYPTO_ALG_ASYNC)
- walk->data = kmap(walk->pg);
- else
- walk->data = kmap_atomic(walk->pg);
+ walk->data = kmap_atomic(walk->pg);
walk->data += offset;
if (offset & alignmask) {
@@ -99,16 +95,8 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
}
}
- if (walk->flags & CRYPTO_ALG_ASYNC)
- kunmap(walk->pg);
- else {
- kunmap_atomic(walk->data);
- /*
- * The may sleep test only makes sense for sync users.
- * Async users don't need to sleep here anyway.
- */
- crypto_yield(walk->flags);
- }
+ kunmap_atomic(walk->data);
+ crypto_yield(walk->flags);
if (err)
return err;
@@ -140,33 +128,12 @@ int crypto_hash_walk_first(struct ahash_request *req,
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
- walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
+ walk->flags = req->base.flags;
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
-int crypto_ahash_walk_first(struct ahash_request *req,
- struct crypto_hash_walk *walk)
-{
- walk->total = req->nbytes;
-
- if (!walk->total) {
- walk->entrylen = 0;
- return 0;
- }
-
- walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
- walk->sg = req->src;
- walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
- walk->flags |= CRYPTO_ALG_ASYNC;
-
- BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
-
- return hash_walk_new_entry(walk);
-}
-EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
-
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -477,6 +444,14 @@ static int ahash_def_finup(struct ahash_request *req)
return ahash_def_finup_finish1(req, err);
}
+static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
+ struct ahash_alg *alg = crypto_ahash_alg(hash);
+
+ alg->exit_tfm(hash);
+}
+
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
@@ -500,7 +475,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
ahash_set_needkey(hash);
}
- return 0;
+ if (alg->exit_tfm)
+ tfm->exit = crypto_ahash_exit_tfm;
+
+ return alg->init_tfm ? alg->init_tfm(hash) : 0;
}
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 21efa786f09c..42493b4d8ce4 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
skcipher_request_set_sync_tfm(skreq, null_tfm);
- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
@@ -120,7 +120,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/*
* Make sure sufficient data is present -- note, the same check is
- * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
+ * also present in sendmsg/sendpage. The checks in sendpage/sendmsg
* shall provide an information to the data sender that something is
* wrong, but they are irrelevant to maintain the kernel integrity.
* We need this check here too in case user space decides to not honor
@@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
areq->outlen = outlen;
aead_request_set_callback(&areq->cra_u.aead_req,
- CRYPTO_TFM_REQ_MAY_BACKLOG,
+ CRYPTO_TFM_REQ_MAY_SLEEP,
af_alg_async_cb, areq);
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req);
/* AIO operation in progress */
- if (err == -EINPROGRESS || err == -EBUSY)
+ if (err == -EINPROGRESS)
return -EIOCBQUEUED;
sock_put(sk);
} else {
/* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req,
+ CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
err = crypto_wait_req(ctx->enc ?
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 6300e0566dc5..407408c43730 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -38,6 +38,7 @@
* DAMAGE.
*/
+#include <linux/capability.h>
#include <linux/module.h>
#include <crypto/rng.h>
#include <linux/random.h>
@@ -53,15 +54,26 @@ struct rng_ctx {
#define MAXSIZE 128
unsigned int len;
struct crypto_rng *drng;
+ u8 *addtl;
+ size_t addtl_len;
};
-static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- int flags)
+struct rng_parent_ctx {
+ struct crypto_rng *drng;
+ u8 *entropy;
+};
+
+static void rng_reset_addtl(struct rng_ctx *ctx)
{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct rng_ctx *ctx = ask->private;
- int err;
+ kfree_sensitive(ctx->addtl);
+ ctx->addtl = NULL;
+ ctx->addtl_len = 0;
+}
+
+static int _rng_recvmsg(struct crypto_rng *drng, struct msghdr *msg, size_t len,
+ u8 *addtl, size_t addtl_len)
+{
+ int err = 0;
int genlen = 0;
u8 result[MAXSIZE];
@@ -82,7 +94,7 @@ static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
* seeding as they automatically seed. The X9.31 DRNG will return
* an error if it was not seeded properly.
*/
- genlen = crypto_rng_get_bytes(ctx->drng, result, len);
+ genlen = crypto_rng_generate(drng, addtl, addtl_len, result, len);
if (genlen < 0)
return genlen;
@@ -92,6 +104,63 @@ static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
return err ? err : len;
}
+static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct rng_ctx *ctx = ask->private;
+
+ return _rng_recvmsg(ctx->drng, msg, len, NULL, 0);
+}
+
+static int rng_test_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct rng_ctx *ctx = ask->private;
+ int ret;
+
+ lock_sock(sock->sk);
+ ret = _rng_recvmsg(ctx->drng, msg, len, ctx->addtl, ctx->addtl_len);
+ rng_reset_addtl(ctx);
+ release_sock(sock->sk);
+
+ return ret;
+}
+
+static int rng_test_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+{
+ int err;
+ struct alg_sock *ask = alg_sk(sock->sk);
+ struct rng_ctx *ctx = ask->private;
+
+ lock_sock(sock->sk);
+ if (len > MAXSIZE) {
+ err = -EMSGSIZE;
+ goto unlock;
+ }
+
+ rng_reset_addtl(ctx);
+ ctx->addtl = kmalloc(len, GFP_KERNEL);
+ if (!ctx->addtl) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ err = memcpy_from_msg(ctx->addtl, msg, len);
+ if (err) {
+ rng_reset_addtl(ctx);
+ goto unlock;
+ }
+ ctx->addtl_len = len;
+
+unlock:
+ release_sock(sock->sk);
+ return err ? err : len;
+}
+
static struct proto_ops algif_rng_ops = {
.family = PF_ALG,
@@ -111,14 +180,53 @@ static struct proto_ops algif_rng_ops = {
.recvmsg = rng_recvmsg,
};
+static struct proto_ops __maybe_unused algif_rng_test_ops = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .accept = sock_no_accept,
+ .sendpage = sock_no_sendpage,
+
+ .release = af_alg_release,
+ .recvmsg = rng_test_recvmsg,
+ .sendmsg = rng_test_sendmsg,
+};
+
static void *rng_bind(const char *name, u32 type, u32 mask)
{
- return crypto_alloc_rng(name, type, mask);
+ struct rng_parent_ctx *pctx;
+ struct crypto_rng *rng;
+
+ pctx = kzalloc(sizeof(*pctx), GFP_KERNEL);
+ if (!pctx)
+ return ERR_PTR(-ENOMEM);
+
+ rng = crypto_alloc_rng(name, type, mask);
+ if (IS_ERR(rng)) {
+ kfree(pctx);
+ return ERR_CAST(rng);
+ }
+
+ pctx->drng = rng;
+ return pctx;
}
static void rng_release(void *private)
{
- crypto_free_rng(private);
+ struct rng_parent_ctx *pctx = private;
+
+ if (unlikely(!pctx))
+ return;
+ crypto_free_rng(pctx->drng);
+ kfree_sensitive(pctx->entropy);
+ kfree_sensitive(pctx);
}
static void rng_sock_destruct(struct sock *sk)
@@ -126,6 +234,7 @@ static void rng_sock_destruct(struct sock *sk)
struct alg_sock *ask = alg_sk(sk);
struct rng_ctx *ctx = ask->private;
+ rng_reset_addtl(ctx);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
@@ -133,6 +242,7 @@ static void rng_sock_destruct(struct sock *sk)
static int rng_accept_parent(void *private, struct sock *sk)
{
struct rng_ctx *ctx;
+ struct rng_parent_ctx *pctx = private;
struct alg_sock *ask = alg_sk(sk);
unsigned int len = sizeof(*ctx);
@@ -141,6 +251,8 @@ static int rng_accept_parent(void *private, struct sock *sk)
return -ENOMEM;
ctx->len = len;
+ ctx->addtl = NULL;
+ ctx->addtl_len = 0;
/*
* No seeding done at that point -- if multiple accepts are
@@ -148,20 +260,58 @@ static int rng_accept_parent(void *private, struct sock *sk)
* state of the RNG.
*/
- ctx->drng = private;
+ ctx->drng = pctx->drng;
ask->private = ctx;
sk->sk_destruct = rng_sock_destruct;
+ /*
+ * Non NULL pctx->entropy means that CAVP test has been initiated on
+ * this socket, replace proto_ops algif_rng_ops with algif_rng_test_ops.
+ */
+ if (IS_ENABLED(CONFIG_CRYPTO_USER_API_RNG_CAVP) && pctx->entropy)
+ sk->sk_socket->ops = &algif_rng_test_ops;
+
return 0;
}
static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen)
{
+ struct rng_parent_ctx *pctx = private;
/*
* Check whether seedlen is of sufficient size is done in RNG
* implementations.
*/
- return crypto_rng_reset(private, seed, seedlen);
+ return crypto_rng_reset(pctx->drng, seed, seedlen);
+}
+
+static int __maybe_unused rng_setentropy(void *private, sockptr_t entropy,
+ unsigned int len)
+{
+ struct rng_parent_ctx *pctx = private;
+ u8 *kentropy = NULL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (pctx->entropy)
+ return -EINVAL;
+
+ if (len > MAXSIZE)
+ return -EMSGSIZE;
+
+ if (len) {
+ kentropy = memdup_sockptr(entropy, len);
+ if (IS_ERR(kentropy))
+ return PTR_ERR(kentropy);
+ }
+
+ crypto_rng_alg(pctx->drng)->set_ent(pctx->drng, kentropy, len);
+ /*
+ * Since rng doesn't perform any memory management for the entropy
+ * buffer, save kentropy pointer to pctx now to free it after use.
+ */
+ pctx->entropy = kentropy;
+ return 0;
}
static const struct af_alg_type algif_type_rng = {
@@ -169,6 +319,9 @@ static const struct af_alg_type algif_type_rng = {
.release = rng_release,
.accept = rng_accept_parent,
.setkey = rng_setkey,
+#ifdef CONFIG_CRYPTO_USER_API_RNG_CAVP
+ .setentropy = rng_setentropy,
+#endif
.ops = &algif_rng_ops,
.name = "rng",
.owner = THIS_MODULE
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 478f3b8f5bd5..ee8890ee8f33 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -123,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
/* AIO operation in progress */
- if (err == -EINPROGRESS || err == -EBUSY)
+ if (err == -EINPROGRESS)
return -EIOCBQUEUED;
sock_put(sk);
diff --git a/crypto/arc4.c b/crypto/arc4.c
index aa79571dbd49..3254dcc34368 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -11,7 +11,9 @@
#include <crypto/arc4.h>
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/sched.h>
static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
@@ -39,6 +41,14 @@ static int crypto_arc4_crypt(struct skcipher_request *req)
return err;
}
+static int crypto_arc4_init(struct crypto_skcipher *tfm)
+{
+ pr_warn_ratelimited("\"%s\" (%ld) uses obsolete ecb(arc4) skcipher\n",
+ current->comm, (unsigned long)current->pid);
+
+ return 0;
+}
+
static struct skcipher_alg arc4_alg = {
/*
* For legacy reasons, this is named "ecb(arc4)", not "arc4".
@@ -55,6 +65,7 @@ static struct skcipher_alg arc4_alg = {
.setkey = crypto_arc4_setkey,
.encrypt = crypto_arc4_crypt,
.decrypt = crypto_arc4_crypt,
+ .init = crypto_arc4_init,
};
static int __init arc4_init(void)
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index d8410ffd7f12..8892908ad58c 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -17,6 +17,8 @@
#include <keys/asymmetric-subtype.h>
#include <crypto/public_key.h>
#include <crypto/akcipher.h>
+#include <crypto/sm2.h>
+#include <crypto/sm3_base.h>
MODULE_DESCRIPTION("In-software asymmetric public-key subtype");
MODULE_AUTHOR("Red Hat, Inc.");
@@ -246,6 +248,61 @@ error_free_tfm:
return ret;
}
+#if IS_REACHABLE(CONFIG_CRYPTO_SM2)
+static int cert_sig_digest_update(const struct public_key_signature *sig,
+ struct crypto_akcipher *tfm_pkey)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ size_t desc_size;
+ unsigned char dgst[SM3_DIGEST_SIZE];
+ int ret;
+
+ BUG_ON(!sig->data);
+
+ ret = sm2_compute_z_digest(tfm_pkey, SM2_DEFAULT_USERID,
+ SM2_DEFAULT_USERID_LEN, dgst);
+ if (ret)
+ return ret;
+
+ tfm = crypto_alloc_shash(sig->hash_algo, 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
+ desc = kzalloc(desc_size, GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto error_free_tfm;
+ }
+
+ desc->tfm = tfm;
+
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error_free_desc;
+
+ ret = crypto_shash_update(desc, dgst, SM3_DIGEST_SIZE);
+ if (ret < 0)
+ goto error_free_desc;
+
+ ret = crypto_shash_finup(desc, sig->data, sig->data_size, sig->digest);
+
+error_free_desc:
+ kfree(desc);
+error_free_tfm:
+ crypto_free_shash(tfm);
+ return ret;
+}
+#else
+static inline int cert_sig_digest_update(
+ const struct public_key_signature *sig,
+ struct crypto_akcipher *tfm_pkey)
+{
+ return -ENOTSUPP;
+}
+#endif /* ! IS_REACHABLE(CONFIG_CRYPTO_SM2) */
+
/*
* Verify a signature using a public key.
*/
@@ -299,6 +356,12 @@ int public_key_verify_signature(const struct public_key *pkey,
if (ret)
goto error_free_key;
+ if (strcmp(sig->pkey_algo, "sm2") == 0 && sig->data_size) {
+ ret = cert_sig_digest_update(sig, tfm);
+ if (ret)
+ goto error_free_key;
+ }
+
sg_init_table(src_sg, 2);
sg_set_buf(&src_sg[0], sig->s, sig->s_size);
sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 26ec20ef4899..52c9b455fc7d 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -234,6 +234,10 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
case OID_gost2012Signature512:
ctx->cert->sig->hash_algo = "streebog512";
goto ecrdsa;
+
+ case OID_SM2_with_SM3:
+ ctx->cert->sig->hash_algo = "sm3";
+ goto sm2;
}
rsa_pkcs1:
@@ -246,6 +250,11 @@ ecrdsa:
ctx->cert->sig->encoding = "raw";
ctx->algo_oid = ctx->last_oid;
return 0;
+sm2:
+ ctx->cert->sig->pkey_algo = "sm2";
+ ctx->cert->sig->encoding = "raw";
+ ctx->algo_oid = ctx->last_oid;
+ return 0;
}
/*
@@ -266,7 +275,8 @@ int x509_note_signature(void *context, size_t hdrlen,
}
if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
- strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0) {
+ strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 ||
+ strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0) {
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
return -EBADMSG;
@@ -451,13 +461,20 @@ int x509_extract_key_data(void *context, size_t hdrlen,
struct x509_parse_context *ctx = context;
ctx->key_algo = ctx->last_oid;
- if (ctx->last_oid == OID_rsaEncryption)
+ switch (ctx->last_oid) {
+ case OID_rsaEncryption:
ctx->cert->pub->pkey_algo = "rsa";
- else if (ctx->last_oid == OID_gost2012PKey256 ||
- ctx->last_oid == OID_gost2012PKey512)
+ break;
+ case OID_gost2012PKey256:
+ case OID_gost2012PKey512:
ctx->cert->pub->pkey_algo = "ecrdsa";
- else
+ break;
+ case OID_id_ecPublicKey:
+ ctx->cert->pub->pkey_algo = "sm2";
+ break;
+ default:
return -ENOPKG;
+ }
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index d964cc82b69c..ae450eb8be14 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -30,6 +30,9 @@ int x509_get_sig_params(struct x509_certificate *cert)
pr_devel("==>%s()\n", __func__);
+ sig->data = cert->tbs;
+ sig->data_size = cert->tbs_size;
+
if (!cert->pub->pkey_algo)
cert->unsupported_key = true;
diff --git a/crypto/cbc.c b/crypto/cbc.c
index e6f6273a7d39..0d9509dff891 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/cbc.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -14,34 +13,157 @@
#include <linux/log2.h>
#include <linux/module.h>
-static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm,
- const u8 *src, u8 *dst)
+static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk,
+ struct crypto_skcipher *skcipher)
{
- crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
+ unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ struct crypto_cipher *cipher;
+ struct crypto_tfm *tfm;
+ u8 *iv = walk->iv;
+
+ cipher = skcipher_cipher_simple(skcipher);
+ tfm = crypto_cipher_tfm(cipher);
+ fn = crypto_cipher_alg(cipher)->cia_encrypt;
+
+ do {
+ crypto_xor(iv, src, bsize);
+ fn(tfm, dst, iv);
+ memcpy(iv, dst, bsize);
+
+ src += bsize;
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ return nbytes;
+}
+
+static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk,
+ struct crypto_skcipher *skcipher)
+{
+ unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ struct crypto_cipher *cipher;
+ struct crypto_tfm *tfm;
+ u8 *iv = walk->iv;
+
+ cipher = skcipher_cipher_simple(skcipher);
+ tfm = crypto_cipher_tfm(cipher);
+ fn = crypto_cipher_alg(cipher)->cia_encrypt;
+
+ do {
+ crypto_xor(src, iv, bsize);
+ fn(tfm, src, src);
+ iv = src;
+
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
}
static int crypto_cbc_encrypt(struct skcipher_request *req)
{
- return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct skcipher_walk walk;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes) {
+ if (walk.src.virt.addr == walk.dst.virt.addr)
+ err = crypto_cbc_encrypt_inplace(&walk, skcipher);
+ else
+ err = crypto_cbc_encrypt_segment(&walk, skcipher);
+ err = skcipher_walk_done(&walk, err);
+ }
+
+ return err;
+}
+
+static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
+ struct crypto_skcipher *skcipher)
+{
+ unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ struct crypto_cipher *cipher;
+ struct crypto_tfm *tfm;
+ u8 *iv = walk->iv;
+
+ cipher = skcipher_cipher_simple(skcipher);
+ tfm = crypto_cipher_tfm(cipher);
+ fn = crypto_cipher_alg(cipher)->cia_decrypt;
+
+ do {
+ fn(tfm, dst, src);
+ crypto_xor(dst, iv, bsize);
+ iv = src;
+
+ src += bsize;
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
}
-static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm,
- const u8 *src, u8 *dst)
+static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk,
+ struct crypto_skcipher *skcipher)
{
- crypto_cipher_decrypt_one(skcipher_cipher_simple(tfm), dst, src);
+ unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 last_iv[MAX_CIPHER_BLOCKSIZE];
+ struct crypto_cipher *cipher;
+ struct crypto_tfm *tfm;
+
+ cipher = skcipher_cipher_simple(skcipher);
+ tfm = crypto_cipher_tfm(cipher);
+ fn = crypto_cipher_alg(cipher)->cia_decrypt;
+
+ /* Start of the last block. */
+ src += nbytes - (nbytes & (bsize - 1)) - bsize;
+ memcpy(last_iv, src, bsize);
+
+ for (;;) {
+ fn(tfm, src, src);
+ if ((nbytes -= bsize) < bsize)
+ break;
+ crypto_xor(src, src - bsize, bsize);
+ src -= bsize;
+ }
+
+ crypto_xor(src, walk->iv, bsize);
+ memcpy(walk->iv, last_iv, bsize);
+
+ return nbytes;
}
static int crypto_cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
- err = crypto_cbc_decrypt_blocks(&walk, tfm,
- crypto_cbc_decrypt_one);
+ if (walk.src.virt.addr == walk.dst.virt.addr)
+ err = crypto_cbc_decrypt_inplace(&walk, skcipher);
+ else
+ err = crypto_cbc_decrypt_segment(&walk, skcipher);
err = skcipher_walk_done(&walk, err);
}
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 7fa9b0788685..768614738541 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -15,7 +15,7 @@
* pages = {},
* month = {June},
*}
- * Used by the iSCSI driver, possibly others, and derived from the
+ * Used by the iSCSI driver, possibly others, and derived from
* the iscsi-crc.c module of the linux-iscsi driver at
* http://linux-iscsi.sourceforge.net.
*
@@ -50,7 +50,7 @@ struct chksum_desc_ctx {
};
/*
- * Steps through buffer one byte at at time, calculates reflected
+ * Steps through buffer one byte at a time, calculates reflected
* crc using table.
*/
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index d90c0070710e..e843982073bb 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -35,7 +35,7 @@ struct chksum_desc_ctx {
};
/*
- * Steps through buffer one byte at at time, calculates reflected
+ * Steps through buffer one byte at a time, calculates reflected
* crc using table.
*/
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 198a8eb1cd56..cff21f4e03e3 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -9,6 +9,7 @@
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <crypto/engine.h>
#include <uapi/linux/sched/types.h>
#include "internal.h"
@@ -465,7 +466,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
* crypto-engine queue.
* @dev: the device attached with one hardware engine
* @retry_support: whether hardware has support for retry mechanism
- * @cbk_do_batch: pointer to a callback function to be invoked when executing a
+ * @cbk_do_batch: pointer to a callback function to be invoked when executing
* a batch of requests.
* This has the form:
* callback(struct crypto_engine *engine)
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
index 887ec21aee49..6a3fd09057d0 100644
--- a/crypto/ecrdsa.c
+++ b/crypto/ecrdsa.c
@@ -22,6 +22,7 @@
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
#include <linux/oid_registry.h>
+#include <linux/scatterlist.h>
#include "ecrdsa_params.asn1.h"
#include "ecrdsa_pub_key.asn1.h"
#include "ecc.h"
diff --git a/crypto/internal.h b/crypto/internal.h
index 1b92a5a61852..976ec9dfc76d 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -10,16 +10,14 @@
#include <crypto/algapi.h>
#include <linux/completion.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/notifier.h>
+#include <linux/numa.h>
+#include <linux/refcount.h>
#include <linux/rwsem.h>
-#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
struct crypto_instance;
struct crypto_template;
@@ -140,5 +138,11 @@ static inline void crypto_notify(unsigned long val, void *v)
blocking_notifier_call_chain(&crypto_chain, val, v);
}
+static inline void crypto_yield(u32 flags)
+{
+ if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+ cond_resched();
+}
+
#endif /* _CRYPTO_INTERNAL_H */
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index eb7d1dd506bf..e8a4165a1874 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -37,11 +37,11 @@
* DAMAGE.
*/
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fips.h>
#include <linux/time.h>
-#include <linux/crypto.h>
#include <crypto/internal/rng.h>
#include "jitterentropy.h"
diff --git a/crypto/proc.c b/crypto/proc.c
index 08d8c2bc7e62..12fccb9c5205 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -36,7 +36,7 @@ static void c_stop(struct seq_file *m, void *p)
static int c_show(struct seq_file *m, void *p)
{
struct crypto_alg *alg = list_entry(p, struct crypto_alg, cra_list);
-
+
seq_printf(m, "name : %s\n", alg->cra_name);
seq_printf(m, "driver : %s\n", alg->cra_driver_name);
seq_printf(m, "module : %s\n", module_name(alg->cra_module));
@@ -59,7 +59,7 @@ static int c_show(struct seq_file *m, void *p)
alg->cra_type->show(m, alg);
goto out;
}
-
+
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_CIPHER:
seq_printf(m, "type : cipher\n");
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index ddd3d10ffc15..8ac3e73e8ea6 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
+#include <linux/scatterlist.h>
/*
* Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
diff --git a/crypto/sm2.c b/crypto/sm2.c
new file mode 100644
index 000000000000..767e160333f6
--- /dev/null
+++ b/crypto/sm2.c
@@ -0,0 +1,481 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM2 asymmetric public-key algorithm
+ * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and
+ * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
+ *
+ * Copyright (c) 2020, Alibaba Group.
+ * Authors: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mpi.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/hash.h>
+#include <crypto/sm3_base.h>
+#include <crypto/rng.h>
+#include <crypto/sm2.h>
+#include "sm2signature.asn1.h"
+
+#define MPI_NBYTES(m) ((mpi_get_nbits(m) + 7) / 8)
+
+struct ecc_domain_parms {
+ const char *desc; /* Description of the curve. */
+ unsigned int nbits; /* Number of bits. */
+ unsigned int fips:1; /* True if this is a FIPS140-2 approved curve */
+
+ /* The model describing this curve. This is mainly used to select
+ * the group equation.
+ */
+ enum gcry_mpi_ec_models model;
+
+ /* The actual ECC dialect used. This is used for curve specific
+ * optimizations and to select encodings etc.
+ */
+ enum ecc_dialects dialect;
+
+ const char *p; /* The prime defining the field. */
+ const char *a, *b; /* The coefficients. For Twisted Edwards
+ * Curves b is used for d. For Montgomery
+ * Curves (a,b) has ((A-2)/4,B^-1).
+ */
+ const char *n; /* The order of the base point. */
+ const char *g_x, *g_y; /* Base point. */
+ unsigned int h; /* Cofactor. */
+};
+
+static const struct ecc_domain_parms sm2_ecp = {
+ .desc = "sm2p256v1",
+ .nbits = 256,
+ .fips = 0,
+ .model = MPI_EC_WEIERSTRASS,
+ .dialect = ECC_DIALECT_STANDARD,
+ .p = "0xfffffffeffffffffffffffffffffffffffffffff00000000ffffffffffffffff",
+ .a = "0xfffffffeffffffffffffffffffffffffffffffff00000000fffffffffffffffc",
+ .b = "0x28e9fa9e9d9f5e344d5a9e4bcf6509a7f39789f515ab8f92ddbcbd414d940e93",
+ .n = "0xfffffffeffffffffffffffffffffffff7203df6b21c6052b53bbf40939d54123",
+ .g_x = "0x32c4ae2c1f1981195f9904466a39c9948fe30bbff2660be1715a4589334c74c7",
+ .g_y = "0xbc3736a2f4f6779c59bdcee36b692153d0a9877cc62a474002df32e52139f0a0",
+ .h = 1
+};
+
+static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
+{
+ const struct ecc_domain_parms *ecp = &sm2_ecp;
+ MPI p, a, b;
+ MPI x, y;
+ int rc = -EINVAL;
+
+ p = mpi_scanval(ecp->p);
+ a = mpi_scanval(ecp->a);
+ b = mpi_scanval(ecp->b);
+ if (!p || !a || !b)
+ goto free_p;
+
+ x = mpi_scanval(ecp->g_x);
+ y = mpi_scanval(ecp->g_y);
+ if (!x || !y)
+ goto free;
+
+ rc = -ENOMEM;
+ /* mpi_ec_setup_elliptic_curve */
+ ec->G = mpi_point_new(0);
+ if (!ec->G)
+ goto free;
+
+ mpi_set(ec->G->x, x);
+ mpi_set(ec->G->y, y);
+ mpi_set_ui(ec->G->z, 1);
+
+ rc = -EINVAL;
+ ec->n = mpi_scanval(ecp->n);
+ if (!ec->n) {
+ mpi_point_release(ec->G);
+ goto free;
+ }
+
+ ec->h = ecp->h;
+ ec->name = ecp->desc;
+ mpi_ec_init(ec, ecp->model, ecp->dialect, 0, p, a, b);
+
+ rc = 0;
+
+free:
+ mpi_free(x);
+ mpi_free(y);
+free_p:
+ mpi_free(p);
+ mpi_free(a);
+ mpi_free(b);
+
+ return rc;
+}
+
+static void sm2_ec_ctx_deinit(struct mpi_ec_ctx *ec)
+{
+ mpi_ec_deinit(ec);
+
+ memset(ec, 0, sizeof(*ec));
+}
+
+static int sm2_ec_ctx_reset(struct mpi_ec_ctx *ec)
+{
+ sm2_ec_ctx_deinit(ec);
+ return sm2_ec_ctx_init(ec);
+}
+
+/* RESULT must have been initialized and is set on success to the
+ * point given by VALUE.
+ */
+static int sm2_ecc_os2ec(MPI_POINT result, MPI value)
+{
+ int rc;
+ size_t n;
+ const unsigned char *buf;
+ unsigned char *buf_memory;
+ MPI x, y;
+
+ n = (mpi_get_nbits(value)+7)/8;
+ buf_memory = kmalloc(n, GFP_KERNEL);
+ rc = mpi_print(GCRYMPI_FMT_USG, buf_memory, n, &n, value);
+ if (rc) {
+ kfree(buf_memory);
+ return rc;
+ }
+ buf = buf_memory;
+
+ if (n < 1) {
+ kfree(buf_memory);
+ return -EINVAL;
+ }
+ if (*buf != 4) {
+ kfree(buf_memory);
+ return -EINVAL; /* No support for point compression. */
+ }
+ if (((n-1)%2)) {
+ kfree(buf_memory);
+ return -EINVAL;
+ }
+ n = (n-1)/2;
+ x = mpi_read_raw_data(buf + 1, n);
+ if (!x) {
+ kfree(buf_memory);
+ return -ENOMEM;
+ }
+ y = mpi_read_raw_data(buf + 1 + n, n);
+ kfree(buf_memory);
+ if (!y) {
+ mpi_free(x);
+ return -ENOMEM;
+ }
+
+ mpi_normalize(x);
+ mpi_normalize(y);
+
+ mpi_set(result->x, x);
+ mpi_set(result->y, y);
+ mpi_set_ui(result->z, 1);
+
+ mpi_free(x);
+ mpi_free(y);
+
+ return 0;
+}
+
+struct sm2_signature_ctx {
+ MPI sig_r;
+ MPI sig_s;
+};
+
+int sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct sm2_signature_ctx *sig = context;
+
+ if (!value || !vlen)
+ return -EINVAL;
+
+ sig->sig_r = mpi_read_raw_data(value, vlen);
+ if (!sig->sig_r)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct sm2_signature_ctx *sig = context;
+
+ if (!value || !vlen)
+ return -EINVAL;
+
+ sig->sig_s = mpi_read_raw_data(value, vlen);
+ if (!sig->sig_s)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sm2_z_digest_update(struct shash_desc *desc,
+ MPI m, unsigned int pbytes)
+{
+ static const unsigned char zero[32];
+ unsigned char *in;
+ unsigned int inlen;
+
+ in = mpi_get_buffer(m, &inlen, NULL);
+ if (!in)
+ return -EINVAL;
+
+ if (inlen < pbytes) {
+ /* padding with zero */
+ crypto_sm3_update(desc, zero, pbytes - inlen);
+ crypto_sm3_update(desc, in, inlen);
+ } else if (inlen > pbytes) {
+ /* skip the starting zero */
+ crypto_sm3_update(desc, in + inlen - pbytes, pbytes);
+ } else {
+ crypto_sm3_update(desc, in, inlen);
+ }
+
+ kfree(in);
+ return 0;
+}
+
+static int sm2_z_digest_update_point(struct shash_desc *desc,
+ MPI_POINT point, struct mpi_ec_ctx *ec, unsigned int pbytes)
+{
+ MPI x, y;
+ int ret = -EINVAL;
+
+ x = mpi_new(0);
+ y = mpi_new(0);
+
+ if (!mpi_ec_get_affine(x, y, point, ec) &&
+ !sm2_z_digest_update(desc, x, pbytes) &&
+ !sm2_z_digest_update(desc, y, pbytes))
+ ret = 0;
+
+ mpi_free(x);
+ mpi_free(y);
+ return ret;
+}
+
+int sm2_compute_z_digest(struct crypto_akcipher *tfm,
+ const unsigned char *id, size_t id_len,
+ unsigned char dgst[SM3_DIGEST_SIZE])
+{
+ struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
+ uint16_t bits_len;
+ unsigned char entl[2];
+ SHASH_DESC_ON_STACK(desc, NULL);
+ unsigned int pbytes;
+
+ if (id_len > (USHRT_MAX / 8) || !ec->Q)
+ return -EINVAL;
+
+ bits_len = (uint16_t)(id_len * 8);
+ entl[0] = bits_len >> 8;
+ entl[1] = bits_len & 0xff;
+
+ pbytes = MPI_NBYTES(ec->p);
+
+ /* ZA = H256(ENTLA | IDA | a | b | xG | yG | xA | yA) */
+ sm3_base_init(desc);
+ crypto_sm3_update(desc, entl, 2);
+ crypto_sm3_update(desc, id, id_len);
+
+ if (sm2_z_digest_update(desc, ec->a, pbytes) ||
+ sm2_z_digest_update(desc, ec->b, pbytes) ||
+ sm2_z_digest_update_point(desc, ec->G, ec, pbytes) ||
+ sm2_z_digest_update_point(desc, ec->Q, ec, pbytes))
+ return -EINVAL;
+
+ crypto_sm3_final(desc, dgst);
+ return 0;
+}
+EXPORT_SYMBOL(sm2_compute_z_digest);
+
+static int _sm2_verify(struct mpi_ec_ctx *ec, MPI hash, MPI sig_r, MPI sig_s)
+{
+ int rc = -EINVAL;
+ struct gcry_mpi_point sG, tP;
+ MPI t = NULL;
+ MPI x1 = NULL, y1 = NULL;
+
+ mpi_point_init(&sG);
+ mpi_point_init(&tP);
+ x1 = mpi_new(0);
+ y1 = mpi_new(0);
+ t = mpi_new(0);
+
+ /* r, s in [1, n-1] */
+ if (mpi_cmp_ui(sig_r, 1) < 0 || mpi_cmp(sig_r, ec->n) > 0 ||
+ mpi_cmp_ui(sig_s, 1) < 0 || mpi_cmp(sig_s, ec->n) > 0) {
+ goto leave;
+ }
+
+ /* t = (r + s) % n, t == 0 */
+ mpi_addm(t, sig_r, sig_s, ec->n);
+ if (mpi_cmp_ui(t, 0) == 0)
+ goto leave;
+
+ /* sG + tP = (x1, y1) */
+ rc = -EBADMSG;
+ mpi_ec_mul_point(&sG, sig_s, ec->G, ec);
+ mpi_ec_mul_point(&tP, t, ec->Q, ec);
+ mpi_ec_add_points(&sG, &sG, &tP, ec);
+ if (mpi_ec_get_affine(x1, y1, &sG, ec))
+ goto leave;
+
+ /* R = (e + x1) % n */
+ mpi_addm(t, hash, x1, ec->n);
+
+ /* check R == r */
+ rc = -EKEYREJECTED;
+ if (mpi_cmp(t, sig_r))
+ goto leave;
+
+ rc = 0;
+
+leave:
+ mpi_point_free_parts(&sG);
+ mpi_point_free_parts(&tP);
+ mpi_free(x1);
+ mpi_free(y1);
+ mpi_free(t);
+
+ return rc;
+}
+
+static int sm2_verify(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
+ unsigned char *buffer;
+ struct sm2_signature_ctx sig;
+ MPI hash;
+ int ret;
+
+ if (unlikely(!ec->Q))
+ return -EINVAL;
+
+ buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ sg_pcopy_to_buffer(req->src,
+ sg_nents_for_len(req->src, req->src_len + req->dst_len),
+ buffer, req->src_len + req->dst_len, 0);
+
+ sig.sig_r = NULL;
+ sig.sig_s = NULL;
+ ret = asn1_ber_decoder(&sm2signature_decoder, &sig,
+ buffer, req->src_len);
+ if (ret)
+ goto error;
+
+ ret = -ENOMEM;
+ hash = mpi_read_raw_data(buffer + req->src_len, req->dst_len);
+ if (!hash)
+ goto error;
+
+ ret = _sm2_verify(ec, hash, sig.sig_r, sig.sig_s);
+
+ mpi_free(hash);
+error:
+ mpi_free(sig.sig_r);
+ mpi_free(sig.sig_s);
+ kfree(buffer);
+ return ret;
+}
+
+static int sm2_set_pub_key(struct crypto_akcipher *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
+ MPI a;
+ int rc;
+
+ rc = sm2_ec_ctx_reset(ec);
+ if (rc)
+ return rc;
+
+ ec->Q = mpi_point_new(0);
+ if (!ec->Q)
+ return -ENOMEM;
+
+ /* include the uncompressed flag '0x04' */
+ rc = -ENOMEM;
+ a = mpi_read_raw_data(key, keylen);
+ if (!a)
+ goto error;
+
+ mpi_normalize(a);
+ rc = sm2_ecc_os2ec(ec->Q, a);
+ mpi_free(a);
+ if (rc)
+ goto error;
+
+ return 0;
+
+error:
+ mpi_point_release(ec->Q);
+ ec->Q = NULL;
+ return rc;
+}
+
+static unsigned int sm2_max_size(struct crypto_akcipher *tfm)
+{
+ /* Unlimited max size */
+ return PAGE_SIZE;
+}
+
+static int sm2_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
+
+ return sm2_ec_ctx_init(ec);
+}
+
+static void sm2_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
+
+ sm2_ec_ctx_deinit(ec);
+}
+
+static struct akcipher_alg sm2 = {
+ .verify = sm2_verify,
+ .set_pub_key = sm2_set_pub_key,
+ .max_size = sm2_max_size,
+ .init = sm2_init_tfm,
+ .exit = sm2_exit_tfm,
+ .base = {
+ .cra_name = "sm2",
+ .cra_driver_name = "sm2-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct mpi_ec_ctx),
+ },
+};
+
+static int sm2_init(void)
+{
+ return crypto_register_akcipher(&sm2);
+}
+
+static void sm2_exit(void)
+{
+ crypto_unregister_akcipher(&sm2);
+}
+
+subsys_initcall(sm2_init);
+module_exit(sm2_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_DESCRIPTION("SM2 generic algorithm");
+MODULE_ALIAS_CRYPTO("sm2-generic");
diff --git a/crypto/sm2signature.asn1 b/crypto/sm2signature.asn1
new file mode 100644
index 000000000000..ab8c0b754d21
--- /dev/null
+++ b/crypto/sm2signature.asn1
@@ -0,0 +1,4 @@
+Sm2Signature ::= SEQUENCE {
+ sig_r INTEGER ({ sm2_get_signature_r }),
+ sig_s INTEGER ({ sm2_get_signature_s })
+}
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 3468975215ca..193c4584bd00 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -149,17 +149,18 @@ int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
}
EXPORT_SYMBOL(crypto_sm3_update);
-static int sm3_final(struct shash_desc *desc, u8 *out)
+int crypto_sm3_final(struct shash_desc *desc, u8 *out)
{
sm3_base_do_finalize(desc, sm3_generic_block_fn);
return sm3_base_finish(desc, out);
}
+EXPORT_SYMBOL(crypto_sm3_final);
int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
- return sm3_final(desc, hash);
+ return crypto_sm3_final(desc, hash);
}
EXPORT_SYMBOL(crypto_sm3_finup);
@@ -167,7 +168,7 @@ static struct shash_alg sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = crypto_sm3_update,
- .final = sm3_final,
+ .final = crypto_sm3_final,
.finup = crypto_sm3_finup,
.descsize = sizeof(struct sm3_state),
.base = {
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 12e82a61b896..eea0f453cfb6 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -63,6 +63,7 @@ static u32 type;
static u32 mask;
static int mode;
static u32 num_mb = 8;
+static unsigned int klen;
static char *tvmem[TVMEMSIZE];
static const char *check[] = {
@@ -398,7 +399,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
ret = do_one_aead_op(cur->req, ret);
if (ret) {
- pr_err("calculating auth failed failed (%d)\n",
+ pr_err("calculating auth failed (%d)\n",
ret);
break;
}
@@ -648,7 +649,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
crypto_aead_encrypt(req));
if (ret) {
- pr_err("calculating auth failed failed (%d)\n",
+ pr_err("calculating auth failed (%d)\n",
ret);
break;
}
@@ -864,8 +865,8 @@ static void test_mb_ahash_speed(const char *algo, unsigned int secs,
goto out;
}
- if (speed[i].klen)
- crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
+ if (klen)
+ crypto_ahash_setkey(tfm, tvmem[0], klen);
for (k = 0; k < num_mb; k++)
ahash_request_set_crypt(data[k].req, data[k].sg,
@@ -1099,8 +1100,8 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
break;
}
- if (speed[i].klen)
- crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
+ if (klen)
+ crypto_ahash_setkey(tfm, tvmem[0], klen);
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
@@ -2418,7 +2419,8 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
if (mode > 300 && mode < 400) break;
fallthrough;
case 318:
- test_hash_speed("ghash-generic", sec, hash_speed_template_16);
+ klen = 16;
+ test_hash_speed("ghash", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 319:
@@ -3076,6 +3078,8 @@ MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
"(defaults to zero which uses CPU cycles instead)");
module_param(num_mb, uint, 0000);
MODULE_PARM_DESC(num_mb, "Number of concurrent requests to be used in mb speed tests (defaults to 8)");
+module_param(klen, uint, 0);
+MODULE_PARM_DESC(klen, "Key length (defaults to 0)");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Quick & dirty crypto testing module");
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 7e5fea811670..9f654677172a 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -25,7 +25,6 @@ struct aead_speed_template {
struct hash_speed {
unsigned int blen; /* buffer length */
unsigned int plen; /* per-update length */
- unsigned int klen; /* key length */
};
/*
@@ -97,34 +96,6 @@ static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 0, .plen = 0, }
};
-static struct hash_speed hash_speed_template_16[] = {
- { .blen = 16, .plen = 16, .klen = 16, },
- { .blen = 64, .plen = 16, .klen = 16, },
- { .blen = 64, .plen = 64, .klen = 16, },
- { .blen = 256, .plen = 16, .klen = 16, },
- { .blen = 256, .plen = 64, .klen = 16, },
- { .blen = 256, .plen = 256, .klen = 16, },
- { .blen = 1024, .plen = 16, .klen = 16, },
- { .blen = 1024, .plen = 256, .klen = 16, },
- { .blen = 1024, .plen = 1024, .klen = 16, },
- { .blen = 2048, .plen = 16, .klen = 16, },
- { .blen = 2048, .plen = 256, .klen = 16, },
- { .blen = 2048, .plen = 1024, .klen = 16, },
- { .blen = 2048, .plen = 2048, .klen = 16, },
- { .blen = 4096, .plen = 16, .klen = 16, },
- { .blen = 4096, .plen = 256, .klen = 16, },
- { .blen = 4096, .plen = 1024, .klen = 16, },
- { .blen = 4096, .plen = 4096, .klen = 16, },
- { .blen = 8192, .plen = 16, .klen = 16, },
- { .blen = 8192, .plen = 256, .klen = 16, },
- { .blen = 8192, .plen = 1024, .klen = 16, },
- { .blen = 8192, .plen = 4096, .klen = 16, },
- { .blen = 8192, .plen = 8192, .klen = 16, },
-
- /* End marker */
- { .blen = 0, .plen = 0, .klen = 0, }
-};
-
static struct hash_speed poly1305_speed_template[] = {
{ .blen = 96, .plen = 16, },
{ .blen = 96, .plen = 32, },
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 23c27fc96394..a64a639eddfa 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -27,6 +27,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/uio.h>
#include <crypto/rng.h>
#include <crypto/drbg.h>
#include <crypto/akcipher.h>
@@ -3954,7 +3955,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len,
GFP_KERNEL);
if (!key)
- goto free_xbuf;
+ goto free_req;
memcpy(key, vecs->key, vecs->key_len);
ptr = key + vecs->key_len;
ptr = test_pack_u32(ptr, vecs->algo);
@@ -3966,7 +3967,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
else
err = crypto_akcipher_set_priv_key(tfm, key, vecs->key_len);
if (err)
- goto free_req;
+ goto free_key;
/*
* First run test which do not require a private key, such as
@@ -3976,7 +3977,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
out_len_max = crypto_akcipher_maxsize(tfm);
outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
if (!outbuf_enc)
- goto free_req;
+ goto free_key;
if (!vecs->siggen_sigver_test) {
m = vecs->m;
@@ -3995,6 +3996,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
op = "verify";
}
+ err = -E2BIG;
if (WARN_ON(m_size > PAGE_SIZE))
goto free_all;
memcpy(xbuf[0], m, m_size);
@@ -4025,7 +4027,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
goto free_all;
}
- if (!vecs->siggen_sigver_test) {
+ if (!vecs->siggen_sigver_test && c) {
if (req->dst_len != c_size) {
pr_err("alg: akcipher: %s test failed. Invalid output len\n",
op);
@@ -4056,6 +4058,12 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
goto free_all;
}
+ if (!vecs->siggen_sigver_test && !c) {
+ c = outbuf_enc;
+ c_size = req->dst_len;
+ }
+
+ err = -E2BIG;
op = vecs->siggen_sigver_test ? "sign" : "decrypt";
if (WARN_ON(c_size > PAGE_SIZE))
goto free_all;
@@ -4092,9 +4100,10 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
free_all:
kfree(outbuf_dec);
kfree(outbuf_enc);
+free_key:
+ kfree(key);
free_req:
akcipher_request_free(req);
- kfree(key);
free_xbuf:
testmgr_free_buf(xbuf);
return err;
@@ -5377,6 +5386,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(sha512_tv_template)
}
}, {
+ .alg = "sm2",
+ .test = alg_test_akcipher,
+ .suite = {
+ .akcipher = __VECS(sm2_tv_template)
+ }
+ }, {
.alg = "sm3",
.test = alg_test_hash,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index b9a2d73d9f8d..8c83811c0e35 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -3792,6 +3792,65 @@ static const struct hash_testvec hmac_streebog512_tv_template[] = {
},
};
+/*
+ * SM2 test vectors.
+ */
+static const struct akcipher_testvec sm2_tv_template[] = {
+ { /* Generated from openssl */
+ .key =
+ "\x04"
+ "\x8e\xa0\x33\x69\x91\x7e\x3d\xec\xad\x8e\xf0\x45\x5e\x13\x3e\x68"
+ "\x5b\x8c\xab\x5c\xc6\xc8\x50\xdf\x91\x00\xe0\x24\x73\x4d\x31\xf2"
+ "\x2e\xc0\xd5\x6b\xee\xda\x98\x93\xec\xd8\x36\xaa\xb9\xcf\x63\x82"
+ "\xef\xa7\x1a\x03\xed\x16\xba\x74\xb8\x8b\xf9\xe5\x70\x39\xa4\x70",
+ .key_len = 65,
+ .param_len = 0,
+ .c =
+ "\x30\x45"
+ "\x02\x20"
+ "\x70\xab\xb6\x7d\xd6\x54\x80\x64\x42\x7e\x2d\x05\x08\x36\xc9\x96"
+ "\x25\xc2\xbb\xff\x08\xe5\x43\x15\x5e\xf3\x06\xd9\x2b\x2f\x0a\x9f"
+ "\x02\x21"
+ "\x00"
+ "\xbf\x21\x5f\x7e\x5d\x3f\x1a\x4d\x8f\x84\xc2\xe9\xa6\x4c\xa4\x18"
+ "\xb2\xb8\x46\xf4\x32\x96\xfa\x57\xc6\x29\xd4\x89\xae\xcc\xda\xdb",
+ .c_size = 71,
+ .algo = OID_SM2_with_SM3,
+ .m =
+ "\x47\xa7\xbf\xd3\xda\xc4\x79\xee\xda\x8b\x4f\xe8\x40\x94\xd4\x32"
+ "\x8f\xf1\xcd\x68\x4d\xbd\x9b\x1d\xe0\xd8\x9a\x5d\xad\x85\x47\x5c",
+ .m_size = 32,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ { /* From libgcrypt */
+ .key =
+ "\x04"
+ "\x87\x59\x38\x9a\x34\xaa\xad\x07\xec\xf4\xe0\xc8\xc2\x65\x0a\x44"
+ "\x59\xc8\xd9\x26\xee\x23\x78\x32\x4e\x02\x61\xc5\x25\x38\xcb\x47"
+ "\x75\x28\x10\x6b\x1e\x0b\x7c\x8d\xd5\xff\x29\xa9\xc8\x6a\x89\x06"
+ "\x56\x56\xeb\x33\x15\x4b\xc0\x55\x60\x91\xef\x8a\xc9\xd1\x7d\x78",
+ .key_len = 65,
+ .param_len = 0,
+ .c =
+ "\x30\x44"
+ "\x02\x20"
+ "\xd9\xec\xef\xe8\x5f\xee\x3c\x59\x57\x8e\x5b\xab\xb3\x02\xe1\x42"
+ "\x4b\x67\x2c\x0b\x26\xb6\x51\x2c\x3e\xfc\xc6\x49\xec\xfe\x89\xe5"
+ "\x02\x20"
+ "\x43\x45\xd0\xa5\xff\xe5\x13\x27\x26\xd0\xec\x37\xad\x24\x1e\x9a"
+ "\x71\x9a\xa4\x89\xb0\x7e\x0f\xc4\xbb\x2d\x50\xd0\xe5\x7f\x7a\x68",
+ .c_size = 70,
+ .algo = OID_SM2_with_SM3,
+ .m =
+ "\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc\xdd\xee\xff\x00"
+ "\x12\x34\x56\x78\x9a\xbc\xde\xf0\x12\x34\x56\x78\x9a\xbc\xde\xf0",
+ .m_size = 32,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+};
+
/* Example vectors below taken from
* http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
*
diff --git a/crypto/xor.c b/crypto/xor.c
index ea7349e6ed23..eacbf4f93990 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -54,49 +54,63 @@ EXPORT_SYMBOL(xor_blocks);
/* Set of all registered templates. */
static struct xor_block_template *__initdata template_list;
-#define BENCH_SIZE (PAGE_SIZE)
+#ifndef MODULE
+static void __init do_xor_register(struct xor_block_template *tmpl)
+{
+ tmpl->next = template_list;
+ template_list = tmpl;
+}
+
+static int __init register_xor_blocks(void)
+{
+ active_template = XOR_SELECT_TEMPLATE(NULL);
+
+ if (!active_template) {
+#define xor_speed do_xor_register
+ // register all the templates and pick the first as the default
+ XOR_TRY_TEMPLATES;
+#undef xor_speed
+ active_template = template_list;
+ }
+ return 0;
+}
+#endif
+
+#define BENCH_SIZE 4096
+#define REPS 800U
static void __init
do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
{
int speed;
- unsigned long now, j;
- int i, count, max;
+ int i, j;
+ ktime_t min, start, diff;
tmpl->next = template_list;
template_list = tmpl;
preempt_disable();
- /*
- * Count the number of XORs done during a whole jiffy, and use
- * this to calculate the speed of checksumming. We use a 2-page
- * allocation to have guaranteed color L1-cache layout.
- */
- max = 0;
- for (i = 0; i < 5; i++) {
- j = jiffies;
- count = 0;
- while ((now = jiffies) == j)
- cpu_relax();
- while (time_before(jiffies, now + 1)) {
+ min = (ktime_t)S64_MAX;
+ for (i = 0; i < 3; i++) {
+ start = ktime_get();
+ for (j = 0; j < REPS; j++) {
mb(); /* prevent loop optimzation */
tmpl->do_2(BENCH_SIZE, b1, b2);
mb();
- count++;
- mb();
}
- if (count > max)
- max = count;
+ diff = ktime_sub(ktime_get(), start);
+ if (diff < min)
+ min = diff;
}
preempt_enable();
- speed = max * (HZ * BENCH_SIZE / 1024);
+ // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
+ speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
tmpl->speed = speed;
- printk(KERN_INFO " %-10s: %5d.%03d MB/sec\n", tmpl->name,
- speed / 1000, speed % 1000);
+ pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed);
}
static int __init
@@ -129,14 +143,15 @@ calibrate_xor_blocks(void)
#define xor_speed(templ) do_xor_speed((templ), b1, b2)
printk(KERN_INFO "xor: measuring software checksum speed\n");
+ template_list = NULL;
XOR_TRY_TEMPLATES;
fastest = template_list;
for (f = fastest; f; f = f->next)
if (f->speed > fastest->speed)
fastest = f;
- printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n",
- fastest->name, fastest->speed / 1000, fastest->speed % 1000);
+ pr_info("xor: using function: %s (%d MB/sec)\n",
+ fastest->name, fastest->speed);
#undef xor_speed
@@ -150,6 +165,10 @@ static __exit void xor_exit(void) { }
MODULE_LICENSE("GPL");
+#ifndef MODULE
/* when built-in xor.o must initialize before drivers/md/md.o */
-core_initcall(calibrate_xor_blocks);
+core_initcall(register_xor_blocks);
+#endif
+
+module_init(calibrate_xor_blocks);
module_exit(xor_exit);