aboutsummaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig18
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/ablkcipher.c7
-rw-r--r--crypto/aead.c3
-rw-r--r--crypto/af_alg.c51
-rw-r--r--crypto/ahash.c3
-rw-r--r--crypto/algapi.c1
-rw-r--r--crypto/algif_hash.c45
-rw-r--r--crypto/algif_rng.c192
-rw-r--r--crypto/algif_skcipher.c81
-rw-r--r--crypto/cts.c5
-rw-r--r--crypto/drbg.c34
-rw-r--r--crypto/scatterwalk.c6
-rw-r--r--crypto/seqiv.c12
-rw-r--r--crypto/tcrypt.c37
-rw-r--r--crypto/testmgr.c58
16 files changed, 376 insertions, 178 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 87bbc9c1e681..50f4da44a304 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -427,6 +427,15 @@ config CRYPTO_MD5
help
MD5 message digest algorithm (RFC1321).
+config CRYPTO_MD5_OCTEON
+ tristate "MD5 digest algorithm (OCTEON)"
+ depends on CPU_CAVIUM_OCTEON
+ select CRYPTO_MD5
+ select CRYPTO_HASH
+ help
+ MD5 message digest algorithm (RFC1321) implemented
+ using OCTEON crypto instructions, when available.
+
config CRYPTO_MD5_SPARC64
tristate "MD5 digest algorithm (SPARC64)"
depends on SPARC64
@@ -1505,6 +1514,15 @@ config CRYPTO_USER_API_SKCIPHER
This option enables the user-spaces interface for symmetric
key cipher algorithms.
+config CRYPTO_USER_API_RNG
+ tristate "User-space interface for random number generator algorithms"
+ depends on NET
+ select CRYPTO_RNG
+ select CRYPTO_USER_API
+ help
+ This option enables the user-spaces interface for random
+ number generator algorithms.
+
config CRYPTO_HASH_INFO
bool
diff --git a/crypto/Makefile b/crypto/Makefile
index 1445b9100c05..ba19465f9ad3 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
+obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
#
# generic algorithms and the async_tx api
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 40886c489903..db201bca1581 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -69,6 +69,7 @@ static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
{
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
+
return max(start, end_page);
}
@@ -86,7 +87,7 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
if (n == len_this_page)
break;
n -= len_this_page;
- scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
+ scatterwalk_start(&walk->out, sg_next(walk->out.sg));
}
return bsize;
@@ -284,6 +285,7 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
walk->iv = req->info;
if (unlikely(((unsigned long)walk->iv & alignmask))) {
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
+
if (err)
return err;
}
@@ -589,7 +591,8 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
if (IS_ERR(inst))
goto put_tmpl;
- if ((err = crypto_register_instance(tmpl, inst))) {
+ err = crypto_register_instance(tmpl, inst);
+ if (err) {
tmpl->free(inst);
goto put_tmpl;
}
diff --git a/crypto/aead.c b/crypto/aead.c
index 547491e35c63..222271070b49 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -448,7 +448,8 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
if (IS_ERR(inst))
goto put_tmpl;
- if ((err = crypto_register_instance(tmpl, inst))) {
+ err = crypto_register_instance(tmpl, inst);
+ if (err) {
tmpl->free(inst);
goto put_tmpl;
}
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 4665b79c729a..7f8b7edcadca 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -188,7 +188,7 @@ static int alg_setkey(struct sock *sk, char __user *ukey,
err = type->setkey(ask->private, key, keylen);
out:
- sock_kfree_s(sk, key, keylen);
+ sock_kzfree_s(sk, key, keylen);
return err;
}
@@ -215,6 +215,13 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
goto unlock;
err = alg_setkey(sk, optval, optlen);
+ break;
+ case ALG_SET_AEAD_AUTHSIZE:
+ if (sock->state == SS_CONNECTED)
+ goto unlock;
+ if (!type->setauthsize)
+ goto unlock;
+ err = type->setauthsize(ask->private, optlen);
}
unlock:
@@ -338,49 +345,31 @@ static const struct net_proto_family alg_family = {
.owner = THIS_MODULE,
};
-int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
- int write)
+int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
{
- unsigned long from = (unsigned long)addr;
- unsigned long npages;
- unsigned off;
- int err;
- int i;
+ size_t off;
+ ssize_t n;
+ int npages, i;
- err = -EFAULT;
- if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len))
- goto out;
+ n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
+ if (n < 0)
+ return n;
- off = from & ~PAGE_MASK;
- npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (npages > ALG_MAX_PAGES)
- npages = ALG_MAX_PAGES;
-
- err = get_user_pages_fast(from, npages, write, sgl->pages);
- if (err < 0)
- goto out;
-
- npages = err;
- err = -EINVAL;
+ npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (WARN_ON(npages == 0))
- goto out;
-
- err = 0;
+ return -EINVAL;
sg_init_table(sgl->sg, npages);
- for (i = 0; i < npages; i++) {
+ for (i = 0, len = n; i < npages; i++) {
int plen = min_t(int, len, PAGE_SIZE - off);
sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
off = 0;
len -= plen;
- err += plen;
}
-
-out:
- return err;
+ return n;
}
EXPORT_SYMBOL_GPL(af_alg_make_sg);
@@ -405,7 +394,7 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
if (cmsg->cmsg_level != SOL_ALG)
continue;
- switch(cmsg->cmsg_type) {
+ switch (cmsg->cmsg_type) {
case ALG_SET_IV:
if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
return -EINVAL;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index f6a36a52d738..8acb886032ae 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -55,6 +55,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
if (offset & alignmask) {
unsigned int unaligned = alignmask + 1 - (offset & alignmask);
+
if (nbytes > unaligned)
nbytes = unaligned;
}
@@ -120,7 +121,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (!walk->total)
return 0;
- walk->sg = scatterwalk_sg_next(walk->sg);
+ walk->sg = sg_next(walk->sg);
return hash_walk_new_entry(walk);
}
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 71a8143e23b1..83b04e0884b1 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -473,6 +473,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
list = &tmpl->instances;
hlist_for_each_entry(inst, list, list) {
int err = crypto_remove_alg(&inst->alg, &users);
+
BUG_ON(err);
}
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 01f56eb7816e..01da360bdb55 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -41,8 +41,6 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
- unsigned long iovlen;
- const struct iovec *iov;
long copied = 0;
int err;
@@ -58,37 +56,28 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
ctx->more = 0;
- for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
- iovlen--, iov++) {
- unsigned long seglen = iov->iov_len;
- char __user *from = iov->iov_base;
+ while (iov_iter_count(&msg->msg_iter)) {
+ int len = iov_iter_count(&msg->msg_iter);
- while (seglen) {
- int len = min_t(unsigned long, seglen, limit);
- int newlen;
+ if (len > limit)
+ len = limit;
- newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
- if (newlen < 0) {
- err = copied ? 0 : newlen;
- goto unlock;
- }
-
- ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
- newlen);
-
- err = af_alg_wait_for_completion(
- crypto_ahash_update(&ctx->req),
- &ctx->completion);
+ len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
+ if (len < 0) {
+ err = copied ? 0 : len;
+ goto unlock;
+ }
- af_alg_free_sg(&ctx->sgl);
+ ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
- if (err)
- goto unlock;
+ err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
+ &ctx->completion);
+ af_alg_free_sg(&ctx->sgl);
+ if (err)
+ goto unlock;
- seglen -= newlen;
- from += newlen;
- copied += newlen;
- }
+ copied += len;
+ iov_iter_advance(&msg->msg_iter, len);
}
err = 0;
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
new file mode 100644
index 000000000000..67f612cfed97
--- /dev/null
+++ b/crypto/algif_rng.c
@@ -0,0 +1,192 @@
+/*
+ * algif_rng: User-space interface for random number generators
+ *
+ * This file provides the user-space API for random number generators.
+ *
+ * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL2
+ * are required INSTEAD OF the above restrictions. (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <crypto/rng.h>
+#include <linux/random.h>
+#include <crypto/if_alg.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("User-space interface for random number generators");
+
+struct rng_ctx {
+#define MAXSIZE 128
+ unsigned int len;
+ struct crypto_rng *drng;
+};
+
+static int rng_recvmsg(struct kiocb *unused, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct rng_ctx *ctx = ask->private;
+ int err = -EFAULT;
+ int genlen = 0;
+ u8 result[MAXSIZE];
+
+ if (len == 0)
+ return 0;
+ if (len > MAXSIZE)
+ len = MAXSIZE;
+
+ /*
+ * although not strictly needed, this is a precaution against coding
+ * errors
+ */
+ memset(result, 0, len);
+
+ /*
+ * The enforcement of a proper seeding of an RNG is done within an
+ * RNG implementation. Some RNGs (DRBG, krng) do not need specific
+ * seeding as they automatically seed. The X9.31 DRNG will return
+ * an error if it was not seeded properly.
+ */
+ genlen = crypto_rng_get_bytes(ctx->drng, result, len);
+ if (genlen < 0)
+ return genlen;
+
+ err = memcpy_to_msg(msg, result, len);
+ memzero_explicit(result, genlen);
+
+ return err ? err : len;
+}
+
+static struct proto_ops algif_rng_ops = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .accept = sock_no_accept,
+ .setsockopt = sock_no_setsockopt,
+ .poll = sock_no_poll,
+ .sendmsg = sock_no_sendmsg,
+ .sendpage = sock_no_sendpage,
+
+ .release = af_alg_release,
+ .recvmsg = rng_recvmsg,
+};
+
+static void *rng_bind(const char *name, u32 type, u32 mask)
+{
+ return crypto_alloc_rng(name, type, mask);
+}
+
+static void rng_release(void *private)
+{
+ crypto_free_rng(private);
+}
+
+static void rng_sock_destruct(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct rng_ctx *ctx = ask->private;
+
+ sock_kfree_s(sk, ctx, ctx->len);
+ af_alg_release_parent(sk);
+}
+
+static int rng_accept_parent(void *private, struct sock *sk)
+{
+ struct rng_ctx *ctx;
+ struct alg_sock *ask = alg_sk(sk);
+ unsigned int len = sizeof(*ctx);
+
+ ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->len = len;
+
+ /*
+ * No seeding done at that point -- if multiple accepts are
+ * done on one RNG instance, each resulting FD points to the same
+ * state of the RNG.
+ */
+
+ ctx->drng = private;
+ ask->private = ctx;
+ sk->sk_destruct = rng_sock_destruct;
+
+ return 0;
+}
+
+static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen)
+{
+ /*
+ * Check whether seedlen is of sufficient size is done in RNG
+ * implementations.
+ */
+ return crypto_rng_reset(private, (u8 *)seed, seedlen);
+}
+
+static const struct af_alg_type algif_type_rng = {
+ .bind = rng_bind,
+ .release = rng_release,
+ .accept = rng_accept_parent,
+ .setkey = rng_setkey,
+ .ops = &algif_rng_ops,
+ .name = "rng",
+ .owner = THIS_MODULE
+};
+
+static int __init rng_init(void)
+{
+ return af_alg_register_type(&algif_type_rng);
+}
+
+static void __exit rng_exit(void)
+{
+ int err = af_alg_unregister_type(&algif_type_rng);
+ BUG_ON(err);
+}
+
+module_init(rng_init);
+module_exit(rng_exit);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index c12207c8dde9..0c8a1e5ccadf 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -330,6 +330,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
sg = sgl->sg;
+ sg_unmark_end(sg + sgl->cur);
do {
i = sgl->cur;
plen = min_t(int, len, PAGE_SIZE);
@@ -355,6 +356,9 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
sgl->cur++;
} while (len && sgl->cur < MAX_SGL_ENTS);
+ if (!size)
+ sg_mark_end(sg + sgl->cur - 1);
+
ctx->merge = plen & (PAGE_SIZE - 1);
}
@@ -401,6 +405,10 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
ctx->merge = 0;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+ if (sgl->cur)
+ sg_unmark_end(sgl->sg + sgl->cur - 1);
+
+ sg_mark_end(sgl->sg + sgl->cur);
get_page(page);
sg_set_page(sgl->sg + sgl->cur, page, size, offset);
sgl->cur++;
@@ -426,67 +434,58 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
&ctx->req));
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
- unsigned long iovlen;
- const struct iovec *iov;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk);
- for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
- iovlen--, iov++) {
- unsigned long seglen = iov->iov_len;
- char __user *from = iov->iov_base;
-
- while (seglen) {
- sgl = list_first_entry(&ctx->tsgl,
- struct skcipher_sg_list, list);
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
- if (!ctx->used) {
- err = skcipher_wait_for_data(sk, flags);
- if (err)
- goto unlock;
- }
+ while (iov_iter_count(&msg->msg_iter)) {
+ sgl = list_first_entry(&ctx->tsgl,
+ struct skcipher_sg_list, list);
+ sg = sgl->sg;
- used = min_t(unsigned long, ctx->used, seglen);
+ while (!sg->length)
+ sg++;
- used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
- err = used;
- if (err < 0)
+ if (!ctx->used) {
+ err = skcipher_wait_for_data(sk, flags);
+ if (err)
goto unlock;
+ }
+
+ used = min_t(unsigned long, ctx->used, iov_iter_count(&msg->msg_iter));
- if (ctx->more || used < ctx->used)
- used -= used % bs;
+ used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
+ err = used;
+ if (err < 0)
+ goto unlock;
+
+ if (ctx->more || used < ctx->used)
+ used -= used % bs;
- err = -EINVAL;
- if (!used)
- goto free;
+ err = -EINVAL;
+ if (!used)
+ goto free;
- ablkcipher_request_set_crypt(&ctx->req, sg,
- ctx->rsgl.sg, used,
- ctx->iv);
+ ablkcipher_request_set_crypt(&ctx->req, sg,
+ ctx->rsgl.sg, used,
+ ctx->iv);
- err = af_alg_wait_for_completion(
+ err = af_alg_wait_for_completion(
ctx->enc ?
crypto_ablkcipher_encrypt(&ctx->req) :
crypto_ablkcipher_decrypt(&ctx->req),
&ctx->completion);
free:
- af_alg_free_sg(&ctx->rsgl);
+ af_alg_free_sg(&ctx->rsgl);
- if (err)
- goto unlock;
+ if (err)
+ goto unlock;
- copied += used;
- from += used;
- seglen -= used;
- skcipher_pull_sgl(sk, used);
- }
+ copied += used;
+ skcipher_pull_sgl(sk, used);
+ iov_iter_advance(&msg->msg_iter, used);
}
err = 0;
diff --git a/crypto/cts.c b/crypto/cts.c
index bd9405820e8a..e467ec0acf9f 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -290,6 +290,9 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb)
if (!is_power_of_2(alg->cra_blocksize))
goto out_put_alg;
+ if (strncmp(alg->cra_name, "cbc(", 4))
+ goto out_put_alg;
+
inst = crypto_alloc_instance("cts", alg);
if (IS_ERR(inst))
goto out_put_alg;
@@ -307,8 +310,6 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb)
inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
- inst->alg.cra_blkcipher.geniv = "seqiv";
-
inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx);
inst->alg.cra_init = crypto_cts_init_tfm;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index d748a1d0ca24..d8ff16e5c322 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -98,7 +98,6 @@
*/
#include <crypto/drbg.h>
-#include <linux/string.h>
/***************************************************************
* Backend cipher definitions available to DRBG
@@ -223,15 +222,6 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
* function. Thus, the function implicitly knows the size of the
* buffer.
*
- * The FIPS test can be called in an endless loop until it returns
- * true. Although the code looks like a potential for a deadlock, it
- * is not the case, because returning a false cannot mathematically
- * occur (except once when a reseed took place and the updated state
- * would is now set up such that the generation of new value returns
- * an identical one -- this is most unlikely and would happen only once).
- * Thus, if this function repeatedly returns false and thus would cause
- * a deadlock, the integrity of the entire kernel is lost.
- *
* @drbg DRBG handle
* @buf output buffer of random data to be checked
*
@@ -258,6 +248,8 @@ static bool drbg_fips_continuous_test(struct drbg_state *drbg,
return false;
}
ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg));
+ if (!ret)
+ panic("DRBG continuous self test failed\n");
memcpy(drbg->prev, buf, drbg_blocklen(drbg));
/* the test shall pass when the two compared values are not equal */
return ret != 0;
@@ -498,9 +490,9 @@ static int drbg_ctr_df(struct drbg_state *drbg,
ret = 0;
out:
- memzero_explicit(iv, drbg_blocklen(drbg));
- memzero_explicit(temp, drbg_statelen(drbg));
- memzero_explicit(pad, drbg_blocklen(drbg));
+ memset(iv, 0, drbg_blocklen(drbg));
+ memset(temp, 0, drbg_statelen(drbg));
+ memset(pad, 0, drbg_blocklen(drbg));
return ret;
}
@@ -574,9 +566,9 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
ret = 0;
out:
- memzero_explicit(temp, drbg_statelen(drbg) + drbg_blocklen(drbg));
+ memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
if (2 != reseed)
- memzero_explicit(df_data, drbg_statelen(drbg));
+ memset(df_data, 0, drbg_statelen(drbg));
return ret;
}
@@ -634,7 +626,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
len = ret;
out:
- memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
+ memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return len;
}
@@ -872,7 +864,7 @@ static int drbg_hash_df(struct drbg_state *drbg,
}
out:
- memzero_explicit(tmp, drbg_blocklen(drbg));
+ memset(tmp, 0, drbg_blocklen(drbg));
return ret;
}
@@ -916,7 +908,7 @@ static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
out:
- memzero_explicit(drbg->scratchpad, drbg_statelen(drbg));
+ memset(drbg->scratchpad, 0, drbg_statelen(drbg));
return ret;
}
@@ -951,7 +943,7 @@ static int drbg_hash_process_addtl(struct drbg_state *drbg,
drbg->scratchpad, drbg_blocklen(drbg));
out:
- memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
+ memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return ret;
}
@@ -998,7 +990,7 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
}
out:
- memzero_explicit(drbg->scratchpad,
+ memset(drbg->scratchpad, 0,
(drbg_statelen(drbg) + drbg_blocklen(drbg)));
return len;
}
@@ -1047,7 +1039,7 @@ static int drbg_hash_generate(struct drbg_state *drbg,
drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
out:
- memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
+ memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return len;
}
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 79ca2278c2a3..3bd749c7bb70 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -62,7 +62,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
walk->offset += PAGE_SIZE - 1;
walk->offset &= PAGE_MASK;
if (walk->offset >= walk->sg->offset + walk->sg->length)
- scatterwalk_start(walk, scatterwalk_sg_next(walk->sg));
+ scatterwalk_start(walk, sg_next(walk->sg));
}
}
@@ -116,7 +116,7 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
break;
offset += sg->length;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
scatterwalk_advance(&walk, start - offset);
@@ -136,7 +136,7 @@ int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes)
do {
offset += sg->length;
n++;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
/* num_bytes is too large */
if (unlikely(!sg && (num_bytes < offset)))
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 9daa854cc485..b7bb9a2f4a31 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -267,6 +267,12 @@ static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
if (IS_ERR(inst))
goto out;
+ if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) {
+ skcipher_geniv_free(inst);
+ inst = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
inst->alg.cra_init = seqiv_init;
@@ -287,6 +293,12 @@ static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb)
if (IS_ERR(inst))
goto out;
+ if (inst->alg.cra_aead.ivsize < sizeof(u64)) {
+ aead_geniv_free(inst);
+ inst = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
inst->alg.cra_init = seqiv_aead_init;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1d864e988ea9..4b9e23fa4204 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -250,19 +250,19 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
int k, rem;
- np = (np > XBUFSIZE) ? XBUFSIZE : np;
- rem = buflen % PAGE_SIZE;
if (np > XBUFSIZE) {
rem = PAGE_SIZE;
np = XBUFSIZE;
+ } else {
+ rem = buflen % PAGE_SIZE;
}
+
sg_init_table(sg, np);
- for (k = 0; k < np; ++k) {
- if (k == (np-1))
- sg_set_buf(&sg[k], xbuf[k], rem);
- else
- sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE);
- }
+ np--;
+ for (k = 0; k < np; k++)
+ sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE);
+
+ sg_set_buf(&sg[k], xbuf[k], rem);
}
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
@@ -280,16 +280,20 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
struct scatterlist *sgout;
const char *e;
void *assoc;
- char iv[MAX_IVLEN];
+ char *iv;
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
unsigned int *b_size;
unsigned int iv_len;
+ iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
+ if (!iv)
+ return;
+
if (aad_size >= PAGE_SIZE) {
pr_err("associate data length (%u) too big\n", aad_size);
- return;
+ goto out_noxbuf;
}
if (enc == ENCRYPT)
@@ -355,7 +359,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
iv_len = crypto_aead_ivsize(tfm);
if (iv_len)
- memset(&iv, 0xff, iv_len);
+ memset(iv, 0xff, iv_len);
crypto_aead_clear_flags(tfm, ~0);
printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
@@ -408,6 +412,7 @@ out_nooutbuf:
out_noaxbuf:
testmgr_free_buf(xbuf);
out_noxbuf:
+ kfree(iv);
return;
}
@@ -764,10 +769,9 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
if (ret == -EINPROGRESS || ret == -EBUSY) {
struct tcrypt_result *tr = req->base.data;
- ret = wait_for_completion_interruptible(&tr->completion);
- if (!ret)
- ret = tr->err;
+ wait_for_completion(&tr->completion);
reinit_completion(&tr->completion);
+ ret = tr->err;
}
return ret;
}
@@ -993,10 +997,9 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
if (ret == -EINPROGRESS || ret == -EBUSY) {
struct tcrypt_result *tr = req->base.data;
- ret = wait_for_completion_interruptible(&tr->completion);
- if (!ret)
- ret = tr->err;
+ wait_for_completion(&tr->completion);
reinit_completion(&tr->completion);
+ ret = tr->err;
}
return ret;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 037368d34586..f4ed6d4205e7 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -181,10 +181,9 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
static int wait_async_op(struct tcrypt_result *tr, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
- ret = wait_for_completion_interruptible(&tr->completion);
- if (!ret)
- ret = tr->err;
+ wait_for_completion(&tr->completion);
reinit_completion(&tr->completion);
+ ret = tr->err;
}
return ret;
}
@@ -353,12 +352,11 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
break;
case -EINPROGRESS:
case -EBUSY:
- ret = wait_for_completion_interruptible(
- &tresult.completion);
- if (!ret && !(ret = tresult.err)) {
- reinit_completion(&tresult.completion);
+ wait_for_completion(&tresult.completion);
+ reinit_completion(&tresult.completion);
+ ret = tresult.err;
+ if (!ret)
break;
- }
/* fall through */
default:
printk(KERN_ERR "alg: hash: digest failed "
@@ -431,7 +429,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
struct scatterlist *sgout;
const char *e, *d;
struct tcrypt_result result;
- unsigned int authsize;
+ unsigned int authsize, iv_len;
void *input;
void *output;
void *assoc;
@@ -502,10 +500,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(input, template[i].input, template[i].ilen);
memcpy(assoc, template[i].assoc, template[i].alen);
+ iv_len = crypto_aead_ivsize(tfm);
if (template[i].iv)
- memcpy(iv, template[i].iv, MAX_IVLEN);
+ memcpy(iv, template[i].iv, iv_len);
else
- memset(iv, 0, MAX_IVLEN);
+ memset(iv, 0, iv_len);
crypto_aead_clear_flags(tfm, ~0);
if (template[i].wk)
@@ -569,12 +568,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
break;
case -EINPROGRESS:
case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !(ret = result.err)) {
- reinit_completion(&result.completion);
+ wait_for_completion(&result.completion);
+ reinit_completion(&result.completion);
+ ret = result.err;
+ if (!ret)
break;
- }
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -720,12 +718,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
break;
case -EINPROGRESS:
case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !(ret = result.err)) {
- reinit_completion(&result.completion);
+ wait_for_completion(&result.completion);
+ reinit_completion(&result.completion);
+ ret = result.err;
+ if (!ret)
break;
- }
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -1002,12 +999,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
break;
case -EINPROGRESS:
case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !((ret = result.err))) {
- reinit_completion(&result.completion);
+ wait_for_completion(&result.completion);
+ reinit_completion(&result.completion);
+ ret = result.err;
+ if (!ret)
break;
- }
/* fall through */
default:
pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
@@ -1097,12 +1093,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
break;
case -EINPROGRESS:
case -EBUSY:
- ret = wait_for_completion_interruptible(
- &result.completion);
- if (!ret && !((ret = result.err))) {
- reinit_completion(&result.completion);
+ wait_for_completion(&result.completion);
+ reinit_completion(&result.completion);
+ ret = result.err;
+ if (!ret)
break;
- }
/* fall through */
default:
pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
@@ -3299,6 +3294,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "rfc4106(gcm(aes))",
.test = alg_test_aead,
+ .fips_allowed = 1,
.suite = {
.aead = {
.enc = {