diff options
author | Linus Torvalds | 2012-05-23 09:02:42 -0700 |
---|---|---|
committer | Linus Torvalds | 2012-05-23 09:02:42 -0700 |
commit | 6f73b3629f774c6cba589b15fd095112b25ca923 (patch) | |
tree | 50a60feae71cb5f40078f552b9b08468bc7b29c9 /drivers/crypto | |
parent | 3a8580f82024e30b31c662aa49346adf7a3bcdb5 (diff) | |
parent | 2074b1d9d53ae696dd3f49482bad43254f40f01d (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt:
"Here are the powerpc goodies for 3.5. Main highlights are:
- Support for the NX crypto engine in Power7+
- A bunch of Anton goodness, including some micro optimization of our
syscall entry on Power7
- I converted a pile of our thermal control drivers to the new i2c
APIs (essentially turning the old therm_pm72 into a proper set of
windfarm drivers). That's one more step toward removing the
deprecated i2c APIs, there's still a few drivers to fix, but we are
getting close
- kexec/kdump support for 47x embedded cores
The big missing thing here is no updates from Freescale. Not sure
what's up here, but with Kumar not working for them anymore things are
a bit in a state of flux in that area."
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (71 commits)
powerpc: Fix irq distribution
Revert "powerpc/hw-breakpoint: Use generic hw-breakpoint interfaces for new PPC ptrace flags"
powerpc: Fixing a cputhread code documentation
powerpc/crypto: Enable the PFO-based encryption device
powerpc/crypto: Build files for the nx device driver
powerpc/crypto: debugfs routines and docs for the nx device driver
powerpc/crypto: SHA512 hash routines for nx encryption
powerpc/crypto: SHA256 hash routines for nx encryption
powerpc/crypto: AES-XCBC mode routines for nx encryption
powerpc/crypto: AES-GCM mode routines for nx encryption
powerpc/crypto: AES-ECB mode routines for nx encryption
powerpc/crypto: AES-CTR mode routines for nx encryption
powerpc/crypto: AES-CCM mode routines for nx encryption
powerpc/crypto: AES-CBC mode routines for nx encryption
powerpc/crypto: nx driver code supporting nx encryption
powerpc/pseries: Enable the PFO-based RNG accelerator
powerpc/pseries/hwrng: PFO-based hwrng driver
powerpc/pseries: Add PFO support to the VIO bus
powerpc/pseries: Add pseries update notifier for OFDT prop changes
powerpc/pseries: Add new hvcall constants to support PFO
...
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 17 | ||||
-rw-r--r-- | drivers/crypto/nx/Makefile | 11 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-aes-cbc.c | 141 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-aes-ccm.c | 468 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-aes-ctr.c | 178 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-aes-ecb.c | 139 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-aes-gcm.c | 353 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-aes-xcbc.c | 236 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-sha256.c | 246 | ||||
-rw-r--r-- | drivers/crypto/nx/nx-sha512.c | 265 | ||||
-rw-r--r-- | drivers/crypto/nx/nx.c | 716 | ||||
-rw-r--r-- | drivers/crypto/nx/nx.h | 193 | ||||
-rw-r--r-- | drivers/crypto/nx/nx_csbcpb.h | 205 | ||||
-rw-r--r-- | drivers/crypto/nx/nx_debugfs.c | 103 |
14 files changed, 3271 insertions, 0 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 371f13cc38eb..6373fa0ddb65 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -297,4 +297,21 @@ config CRYPTO_DEV_TEGRA_AES To compile this driver as a module, choose M here: the module will be called tegra-aes. +config CRYPTO_DEV_NX + tristate "Support for Power7+ in-Nest cryptographic accleration" + depends on PPC64 && IBMVIO + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_ECB + select CRYPTO_CCM + select CRYPTO_GCM + select CRYPTO_AUTHENC + select CRYPTO_XCBC + select CRYPTO_SHA256 + select CRYPTO_SHA512 + help + Support for Power7+ in-Nest cryptographic acceleration. This + module supports acceleration for AES and SHA2 algorithms. If you + choose 'M' here, this module will be called nx_crypto. + endif # CRYPTO_HW diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile new file mode 100644 index 000000000000..411ce59c80d1 --- /dev/null +++ b/drivers/crypto/nx/Makefile @@ -0,0 +1,11 @@ +obj-$(CONFIG_CRYPTO_DEV_NX) += nx-crypto.o +nx-crypto-objs := nx.o \ + nx_debugfs.o \ + nx-aes-cbc.o \ + nx-aes-ecb.o \ + nx-aes-gcm.o \ + nx-aes-ccm.o \ + nx-aes-ctr.o \ + nx-aes-xcbc.o \ + nx-sha256.o \ + nx-sha512.o diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c new file mode 100644 index 000000000000..69ed796ee327 --- /dev/null +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -0,0 +1,141 @@ +/** + * AES CBC routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int cbc_aes_nx_set_key(struct crypto_tfm *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + switch (key_len) { + case AES_KEYSIZE_128: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + case AES_KEYSIZE_192: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; + break; + case AES_KEYSIZE_256: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; + break; + default: + return -EINVAL; + } + + csbcpb->cpb.hdr.mode = NX_MODE_AES_CBC; + memcpy(csbcpb->cpb.aes_cbc.key, in_key, key_len); + + return 0; +} + +static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes, + int enc) +{ + struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + int rc; + + if (nbytes > nx_ctx->ap->databytelen) + return -EINVAL; + + if (enc) + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, + csbcpb->cpb.aes_cbc.iv); + if (rc) + goto out; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); +out: + return rc; +} + +static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1); +} + +static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0); +} + +struct crypto_alg nx_cbc_aes_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(nx_cbc_aes_alg.cra_list), + .cra_init = nx_crypto_ctx_aes_cbc_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = cbc_aes_nx_set_key, + .encrypt = cbc_aes_nx_encrypt, + .decrypt = cbc_aes_nx_decrypt, + } +}; diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c new file mode 100644 index 000000000000..7aeac678b9c0 --- /dev/null +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -0,0 +1,468 @@ +/** + * AES CCM routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/aead.h> +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int ccm_aes_nx_set_key(struct crypto_aead *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + switch (key_len) { + case AES_KEYSIZE_128: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + default: + return -EINVAL; + } + + csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM; + memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len); + + csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA; + memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len); + + return 0; + +} + +static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + + if (key_len < 3) + return -EINVAL; + + key_len -= 3; + + memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3); + + return ccm_aes_nx_set_key(tfm, in_key, key_len); +} + +static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + switch (authsize) { + case 4: + case 6: + case 8: + case 10: + case 12: + case 14: + case 16: + break; + default: + return -EINVAL; + } + + crypto_aead_crt(tfm)->authsize = authsize; + + return 0; +} + +static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + crypto_aead_crt(tfm)->authsize = authsize; + + return 0; +} + +/* taken from crypto/ccm.c */ +static int set_msg_len(u8 *block, unsigned int msglen, int csize) +{ + __be32 data; + + memset(block, 0, csize); + block += csize; + + if (csize >= 4) + csize = 4; + else if (msglen > (unsigned int)(1 << (8 * csize))) + return -EOVERFLOW; + + data = cpu_to_be32(msglen); + memcpy(block - csize, (u8 *)&data + 4 - csize, csize); + + return 0; +} + +/* taken from crypto/ccm.c */ +static inline int crypto_ccm_check_iv(const u8 *iv) +{ + /* 2 <= L <= 8, so 1 <= L' <= 7. */ + if (1 > iv[0] || iv[0] > 7) + return -EINVAL; + + return 0; +} + +/* based on code from crypto/ccm.c */ +static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize, + unsigned int cryptlen, u8 *b0) +{ + unsigned int l, lp, m = authsize; + int rc; + + memcpy(b0, iv, 16); + + lp = b0[0]; + l = lp + 1; + + /* set m, bits 3-5 */ + *b0 |= (8 * ((m - 2) / 2)); + + /* set adata, bit 6, if associated data is used */ + if (assoclen) + *b0 |= 64; + + rc = set_msg_len(b0 + 16 - l, cryptlen, l); + + return rc; +} + +static int generate_pat(u8 *iv, + struct aead_request *req, + struct nx_crypto_ctx *nx_ctx, + unsigned int authsize, + unsigned int nbytes, + u8 *out) +{ + struct nx_sg *nx_insg = nx_ctx->in_sg; + struct nx_sg *nx_outsg = nx_ctx->out_sg; + unsigned int iauth_len = 0; + struct vio_pfo_op *op = NULL; + u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; + int rc; + + /* zero the ctr value */ + memset(iv + 15 - iv[0], 0, iv[0] + 1); + + if (!req->assoclen) { + b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; + } else if (req->assoclen <= 14) { + /* if associated data is 14 bytes or less, we do 1 GCM + * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1, + * which is fed in through the source buffers here */ + b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; + b1 = nx_ctx->priv.ccm.iauth_tag; + iauth_len = req->assoclen; + + nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen); + nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16, + nx_ctx->ap->sglen); + + /* inlen should be negative, indicating to phyp that its a + * pointer to an sg list */ + nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * + sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * + sizeof(struct nx_sg); + + NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; + NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; + + op = &nx_ctx->op; + result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; + } else if (req->assoclen <= 65280) { + /* if associated data is less than (2^16 - 2^8), we construct + * B1 differently and feed in the associated data to a CCA + * operation */ + b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; + b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; + iauth_len = 14; + + /* remaining assoc data must have scatterlist built for it */ + nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, + req->assoc, iauth_len, + req->assoclen - iauth_len); + nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * + sizeof(struct nx_sg); + + op = &nx_ctx->op_aead; + result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; + } else { + /* if associated data is less than (2^32), we construct B1 + * differently yet again and feed in the associated data to a + * CCA operation */ + pr_err("associated data len is %u bytes (returning -EINVAL)\n", + req->assoclen); + rc = -EINVAL; + } + + rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); + if (rc) + goto done; + + if (b1) { + memset(b1, 0, 16); + *(u16 *)b1 = (u16)req->assoclen; + + scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, + iauth_len, SCATTERWALK_FROM_SG); + + rc = nx_hcall_sync(nx_ctx, op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto done; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + + memcpy(out, result, AES_BLOCK_SIZE); + } +done: + return rc; +} + +static int ccm_nx_decrypt(struct aead_request *req, + struct blkcipher_desc *desc) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned int nbytes = req->cryptlen; + unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); + struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; + int rc = -1; + + if (nbytes > nx_ctx->ap->databytelen) + return -EINVAL; + + nbytes -= authsize; + + /* copy out the auth tag to compare with later */ + scatterwalk_map_and_copy(priv->oauth_tag, + req->src, nbytes, authsize, + SCATTERWALK_FROM_SG); + + rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, + csbcpb->cpb.aes_ccm.in_pat_or_b0); + if (rc) + goto out; + + rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, + csbcpb->cpb.aes_ccm.iv_or_ctr); + if (rc) + goto out; + + NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_INTERMEDIATE; + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, + authsize) ? -EBADMSG : 0; +out: + return rc; +} + +static int ccm_nx_encrypt(struct aead_request *req, + struct blkcipher_desc *desc) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + unsigned int nbytes = req->cryptlen; + unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); + int rc = -1; + + if (nbytes > nx_ctx->ap->databytelen) + return -EINVAL; + + rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, + csbcpb->cpb.aes_ccm.in_pat_or_b0); + if (rc) + goto out; + + rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, + csbcpb->cpb.aes_ccm.iv_or_ctr); + if (rc) + goto out; + + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + /* copy out the auth tag */ + scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, + req->dst, nbytes, authsize, + SCATTERWALK_TO_SG); +out: + return rc; +} + +static int ccm4309_aes_nx_encrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct blkcipher_desc desc; + u8 *iv = nx_ctx->priv.ccm.iv; + + iv[0] = 3; + memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); + memcpy(iv + 4, req->iv, 8); + + desc.info = iv; + desc.tfm = (struct crypto_blkcipher *)req->base.tfm; + + return ccm_nx_encrypt(req, &desc); +} + +static int ccm_aes_nx_encrypt(struct aead_request *req) +{ + struct blkcipher_desc desc; + int rc; + + desc.info = req->iv; + desc.tfm = (struct crypto_blkcipher *)req->base.tfm; + + rc = crypto_ccm_check_iv(desc.info); + if (rc) + return rc; + + return ccm_nx_encrypt(req, &desc); +} + +static int ccm4309_aes_nx_decrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct blkcipher_desc desc; + u8 *iv = nx_ctx->priv.ccm.iv; + + iv[0] = 3; + memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); + memcpy(iv + 4, req->iv, 8); + + desc.info = iv; + desc.tfm = (struct crypto_blkcipher *)req->base.tfm; + + return ccm_nx_decrypt(req, &desc); +} + +static int ccm_aes_nx_decrypt(struct aead_request *req) +{ + struct blkcipher_desc desc; + int rc; + + desc.info = req->iv; + desc.tfm = (struct crypto_blkcipher *)req->base.tfm; + + rc = crypto_ccm_check_iv(desc.info); + if (rc) + return rc; + + return ccm_nx_decrypt(req, &desc); +} + +/* tell the block cipher walk routines that this is a stream cipher by + * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block + * during encrypt/decrypt doesn't solve this problem, because it calls + * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, + * but instead uses this tfm->blocksize. */ +struct crypto_alg nx_ccm_aes_alg = { + .cra_name = "ccm(aes)", + .cra_driver_name = "ccm-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(nx_ccm_aes_alg.cra_list), + .cra_init = nx_crypto_ctx_aes_ccm_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = ccm_aes_nx_set_key, + .setauthsize = ccm_aes_nx_setauthsize, + .encrypt = ccm_aes_nx_encrypt, + .decrypt = ccm_aes_nx_decrypt, + } +}; + +struct crypto_alg nx_ccm4309_aes_alg = { + .cra_name = "rfc4309(ccm(aes))", + .cra_driver_name = "rfc4309-ccm-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_nivaead_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(nx_ccm4309_aes_alg.cra_list), + .cra_init = nx_crypto_ctx_aes_ccm_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_aead = { + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = ccm4309_aes_nx_set_key, + .setauthsize = ccm4309_aes_nx_setauthsize, + .encrypt = ccm4309_aes_nx_encrypt, + .decrypt = ccm4309_aes_nx_decrypt, + .geniv = "seqiv", + } +}; diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c new file mode 100644 index 000000000000..52d4eb05e8f7 --- /dev/null +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -0,0 +1,178 @@ +/** + * AES CTR routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/algapi.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int ctr_aes_nx_set_key(struct crypto_tfm *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + switch (key_len) { + case AES_KEYSIZE_128: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + case AES_KEYSIZE_192: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; + break; + case AES_KEYSIZE_256: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; + break; + default: + return -EINVAL; + } + + csbcpb->cpb.hdr.mode = NX_MODE_AES_CTR; + memcpy(csbcpb->cpb.aes_ctr.key, in_key, key_len); + + return 0; +} + +static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + + if (key_len < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + memcpy(nx_ctx->priv.ctr.iv, + in_key + key_len - CTR_RFC3686_NONCE_SIZE, + CTR_RFC3686_NONCE_SIZE); + + key_len -= CTR_RFC3686_NONCE_SIZE; + + return ctr_aes_nx_set_key(tfm, in_key, key_len); +} + +static int ctr_aes_nx_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + int rc; + + if (nbytes > nx_ctx->ap->databytelen) + return -EINVAL; + + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, + csbcpb->cpb.aes_ctr.iv); + if (rc) + goto out; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); +out: + return rc; +} + +static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); + u8 *iv = nx_ctx->priv.ctr.iv; + + memcpy(iv + CTR_RFC3686_NONCE_SIZE, + desc->info, CTR_RFC3686_IV_SIZE); + iv[15] = 1; + + desc->info = nx_ctx->priv.ctr.iv; + + return ctr_aes_nx_crypt(desc, dst, src, nbytes); +} + +struct crypto_alg nx_ctr_aes_alg = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(nx_ctr_aes_alg.cra_list), + .cra_init = nx_crypto_ctx_aes_ctr_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ctr_aes_nx_set_key, + .encrypt = ctr_aes_nx_crypt, + .decrypt = ctr_aes_nx_crypt, + } +}; + +struct crypto_alg nx_ctr3686_aes_alg = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686-ctr-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(nx_ctr3686_aes_alg.cra_list), + .cra_init = nx_crypto_ctx_aes_ctr_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .geniv = "seqiv", + .setkey = ctr3686_aes_nx_set_key, + .encrypt = ctr3686_aes_nx_crypt, + .decrypt = ctr3686_aes_nx_crypt, + } +}; diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c new file mode 100644 index 000000000000..7b77bc2d1df4 --- /dev/null +++ b/drivers/crypto/nx/nx-aes-ecb.c @@ -0,0 +1,139 @@ +/** + * AES ECB routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int ecb_aes_nx_set_key(struct crypto_tfm *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + switch (key_len) { + case AES_KEYSIZE_128: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + case AES_KEYSIZE_192: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; + break; + case AES_KEYSIZE_256: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; + break; + default: + return -EINVAL; + } + + csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; + memcpy(csbcpb->cpb.aes_ecb.key, in_key, key_len); + + return 0; +} + +static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes, + int enc) +{ + struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + int rc; + + if (nbytes > nx_ctx->ap->databytelen) + return -EINVAL; + + if (enc) + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + else + NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; + + rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL); + if (rc) + goto out; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); +out: + return rc; +} + +static int ecb_aes_nx_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return ecb_aes_nx_crypt(desc, dst, src, nbytes, 1); +} + +static int ecb_aes_nx_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return ecb_aes_nx_crypt(desc, dst, src, nbytes, 0); +} + +struct crypto_alg nx_ecb_aes_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(nx_ecb_aes_alg.cra_list), + .cra_init = nx_crypto_ctx_aes_ecb_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = ecb_aes_nx_set_key, + .encrypt = ecb_aes_nx_encrypt, + .decrypt = ecb_aes_nx_decrypt, + } +}; diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c new file mode 100644 index 000000000000..9ab1c7341dac --- /dev/null +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -0,0 +1,353 @@ +/** + * AES GCM routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/aead.h> +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int gcm_aes_nx_set_key(struct crypto_aead *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + switch (key_len) { + case AES_KEYSIZE_128: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + case AES_KEYSIZE_192: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); + NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; + break; + case AES_KEYSIZE_256: + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); + NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256); + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; + break; + default: + return -EINVAL; + } + + csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; + memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len); + + csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA; + memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len); + + return 0; +} + +static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + char *nonce = nx_ctx->priv.gcm.nonce; + int rc; + + if (key_len < 4) + return -EINVAL; + + key_len -= 4; + + rc = gcm_aes_nx_set_key(tfm, in_key, key_len); + if (rc) + goto out; + + memcpy(nonce, in_key + key_len, 4); +out: + return rc; +} + +static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > crypto_aead_alg(tfm)->maxauthsize) + return -EINVAL; + + crypto_aead_crt(tfm)->authsize = authsize; + + return 0; +} + +static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + crypto_aead_crt(tfm)->authsize = authsize; + + return 0; +} + +static int nx_gca(struct nx_crypto_ctx *nx_ctx, + struct aead_request *req, + u8 *out) +{ + struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; + int rc = -EINVAL; + struct scatter_walk walk; + struct nx_sg *nx_sg = nx_ctx->in_sg; + + if (req->assoclen > nx_ctx->ap->databytelen) + goto out; + + if (req->assoclen <= AES_BLOCK_SIZE) { + scatterwalk_start(&walk, req->assoc); + scatterwalk_copychunks(out, &walk, req->assoclen, + SCATTERWALK_FROM_SG); + scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); + + rc = 0; + goto out; + } + + nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0, + req->assoclen); + nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + + memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); +out: + return rc; +} + +static int gcm_aes_nx_crypt(struct aead_request *req, int enc) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct blkcipher_desc desc; + unsigned int nbytes = req->cryptlen; + int rc = -EINVAL; + + if (nbytes > nx_ctx->ap->databytelen) + goto out; + + desc.info = nx_ctx->priv.gcm.iv; + /* initialize the counter */ + *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; + + /* For scenarios where the input message is zero length, AES CTR mode + * may be used. Set the source data to be a single block (16B) of all + * zeros, and set the input IV value to be the same as the GMAC IV + * value. - nx_wb 4.8.1.3 */ + if (nbytes == 0) { + char src[AES_BLOCK_SIZE] = {}; + struct scatterlist sg; + + desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); + if (IS_ERR(desc.tfm)) { + rc = -ENOMEM; + goto out; + } + + crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key, + NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 : + NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32); + + sg_init_one(&sg, src, AES_BLOCK_SIZE); + if (enc) + crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg, + AES_BLOCK_SIZE); + else + crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg, + AES_BLOCK_SIZE); + crypto_free_blkcipher(desc.tfm); + + rc = 0; + goto out; + } + + desc.tfm = (struct crypto_blkcipher *)req->base.tfm; + + csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; + + if (req->assoclen) { + rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); + if (rc) + goto out; + } + + if (enc) + NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; + else + nbytes -= AES_BLOCK_SIZE; + + csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; + + rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes, + csbcpb->cpb.aes_gcm.iv_or_cnt); + if (rc) + goto out; + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + atomic64_add(csbcpb->csb.processed_byte_count, + &(nx_ctx->stats->aes_bytes)); + + if (enc) { + /* copy out the auth tag */ + scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, + req->dst, nbytes, + crypto_aead_authsize(crypto_aead_reqtfm(req)), + SCATTERWALK_TO_SG); + } else if (req->assoclen) { + u8 *itag = nx_ctx->priv.gcm.iauth_tag; + u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; + + scatterwalk_map_and_copy(itag, req->dst, nbytes, + crypto_aead_authsize(crypto_aead_reqtfm(req)), + SCATTERWALK_FROM_SG); + rc = memcmp(itag, otag, + crypto_aead_authsize(crypto_aead_reqtfm(req))) ? + -EBADMSG : 0; + } +out: + return rc; +} + +static int gcm_aes_nx_encrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + char *iv = nx_ctx->priv.gcm.iv; + + memcpy(iv, req->iv, 12); + + return gcm_aes_nx_crypt(req, 1); +} + +static int gcm_aes_nx_decrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + char *iv = nx_ctx->priv.gcm.iv; + + memcpy(iv, req->iv, 12); + + return gcm_aes_nx_crypt(req, 0); +} + +static int gcm4106_aes_nx_encrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + char *iv = nx_ctx->priv.gcm.iv; + char *nonce = nx_ctx->priv.gcm.nonce; + + memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); + memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); + + return gcm_aes_nx_crypt(req, 1); +} + +static int gcm4106_aes_nx_decrypt(struct aead_request *req) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + char *iv = nx_ctx->priv.gcm.iv; + char *nonce = nx_ctx->priv.gcm.nonce; + + memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); + memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); + + return gcm_aes_nx_crypt(req, 0); +} + +/* tell the block cipher walk routines that this is a stream cipher by + * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block + * during encrypt/decrypt doesn't solve this problem, because it calls + * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, + * but instead uses this tfm->blocksize. */ +struct crypto_alg nx_gcm_aes_alg = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(nx_gcm_aes_alg.cra_list), + .cra_init = nx_crypto_ctx_aes_gcm_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = gcm_aes_nx_set_key, + .setauthsize = gcm_aes_nx_setauthsize, + .encrypt = gcm_aes_nx_encrypt, + .decrypt = gcm_aes_nx_decrypt, + } +}; + +struct crypto_alg nx_gcm4106_aes_alg = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_type = &crypto_nivaead_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(nx_gcm4106_aes_alg.cra_list), + .cra_init = nx_crypto_ctx_aes_gcm_init, + .cra_exit = nx_crypto_ctx_exit, + .cra_aead = { + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + .geniv = "seqiv", + .setkey = gcm4106_aes_nx_set_key, + .setauthsize = gcm4106_aes_nx_setauthsize, + .encrypt = gcm4106_aes_nx_encrypt, + .decrypt = gcm4106_aes_nx_decrypt, + } +}; diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c new file mode 100644 index 000000000000..93923e4628c0 --- /dev/null +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -0,0 +1,236 @@ +/** + * AES XCBC routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/hash.h> +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +struct xcbc_state { + u8 state[AES_BLOCK_SIZE]; + unsigned int count; + u8 buffer[AES_BLOCK_SIZE]; +}; + +static int nx_xcbc_set_key(struct crypto_shash *desc, + const u8 *in_key, + unsigned int key_len) +{ + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); + + switch (key_len) { + case AES_KEYSIZE_128: + nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; + break; + default: + return -EINVAL; + } + + memcpy(nx_ctx->priv.xcbc.key, in_key, key_len); + + return 0; +} + +static int nx_xcbc_init(struct shash_desc *desc) +{ + struct xcbc_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_sg *out_sg; + + nx_ctx_init(nx_ctx, HCOP_FC_AES); + + memset(sctx, 0, sizeof *sctx); + + NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); + csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; + + memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); + memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); + + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + AES_BLOCK_SIZE, nx_ctx->ap->sglen); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + return 0; +} + +static int nx_xcbc_update(struct shash_desc *desc, + const u8 *data, + unsigned int len) +{ + struct xcbc_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_sg *in_sg; + u32 to_process, leftover; + int rc = 0; + + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + /* we've hit the nx chip previously and we're updating again, + * so copy over the partial digest */ + memcpy(csbcpb->cpb.aes_xcbc.cv, + csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); + } + + /* 2 cases for total data len: + * 1: <= AES_BLOCK_SIZE: copy into state, return 0 + * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover + */ + if (len + sctx->count <= AES_BLOCK_SIZE) { + memcpy(sctx->buffer + sctx->count, data, len); + sctx->count += len; + goto out; + } + + /* to_process: the AES_BLOCK_SIZE data chunk to process in this + * update */ + to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1); + leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1); + + /* the hardware will not accept a 0 byte operation for this algorithm + * and the operation MUST be finalized to be correct. So if we happen + * to get an update that falls on a block sized boundary, we must + * save off the last block to finalize with later. */ + if (!leftover) { + to_process -= AES_BLOCK_SIZE; + leftover = AES_BLOCK_SIZE; + } + + if (sctx->count) { + in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer, + sctx->count, nx_ctx->ap->sglen); + in_sg = nx_build_sg_list(in_sg, (u8 *)data, + to_process - sctx->count, + nx_ctx->ap->sglen); + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * + sizeof(struct nx_sg); + } else { + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process, + nx_ctx->ap->sglen); + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * + sizeof(struct nx_sg); + } + + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + + /* copy the leftover back into the state struct */ + memcpy(sctx->buffer, data + len - leftover, leftover); + sctx->count = leftover; + + /* everything after the first update is continuation */ + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; +out: + return rc; +} + +static int nx_xcbc_final(struct shash_desc *desc, u8 *out) +{ + struct xcbc_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; + int rc = 0; + + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + /* we've hit the nx chip previously, now we're finalizing, + * so copy over the partial digest */ + memcpy(csbcpb->cpb.aes_xcbc.cv, + csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); + } else if (sctx->count == 0) { + /* we've never seen an update, so this is a 0 byte op. The + * hardware cannot handle a 0 byte op, so just copy out the + * known 0 byte result. This is cheaper than allocating a + * software context to do a 0 byte op */ + u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c, + 0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 }; + memcpy(out, data, sizeof(data)); + goto out; + } + + /* final is represented by continuing the operation and indicating that + * this is not an intermediate operation */ + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, + sctx->count, nx_ctx->ap->sglen); + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, + nx_ctx->ap->sglen); + + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + if (!nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->aes_ops)); + + memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); +out: + return rc; +} + +struct shash_alg nx_shash_aes_xcbc_alg = { + .digestsize = AES_BLOCK_SIZE, + .init = nx_xcbc_init, + .update = nx_xcbc_update, + .final = nx_xcbc_final, + .setkey = nx_xcbc_set_key, + .descsize = sizeof(struct xcbc_state), + .statesize = sizeof(struct xcbc_state), + .base = { + .cra_name = "xcbc(aes)", + .cra_driver_name = "xcbc-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_init = nx_crypto_ctx_aes_xcbc_init, + .cra_exit = nx_crypto_ctx_exit, + } +}; diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c new file mode 100644 index 000000000000..9767315f8c0b --- /dev/null +++ b/drivers/crypto/nx/nx-sha256.c @@ -0,0 +1,246 @@ +/** + * SHA-256 routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <linux/module.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int nx_sha256_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_sg *out_sg; + + nx_ctx_init(nx_ctx, HCOP_FC_SHA); + + memset(sctx, 0, sizeof *sctx); + + nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; + + NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + SHA256_DIGEST_SIZE, nx_ctx->ap->sglen); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + return 0; +} + +static int nx_sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg; + u64 to_process, leftover; + int rc = 0; + + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + /* we've hit the nx chip previously and we're updating again, + * so copy over the partial digest */ + memcpy(csbcpb->cpb.sha256.input_partial_digest, + csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); + } + + /* 2 cases for total data len: + * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 + * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover + */ + if (len + sctx->count <= SHA256_BLOCK_SIZE) { + memcpy(sctx->buf + sctx->count, data, len); + sctx->count += len; + goto out; + } + + /* to_process: the SHA256_BLOCK_SIZE data chunk to process in this + * update */ + to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1); + leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1); + + if (sctx->count) { + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, + sctx->count, nx_ctx->ap->sglen); + in_sg = nx_build_sg_list(in_sg, (u8 *)data, + to_process - sctx->count, + nx_ctx->ap->sglen); + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * + sizeof(struct nx_sg); + } else { + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, + to_process, nx_ctx->ap->sglen); + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * + sizeof(struct nx_sg); + } + + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->sha256_ops)); + + /* copy the leftover back into the state struct */ + memcpy(sctx->buf, data + len - leftover, leftover); + sctx->count = leftover; + + csbcpb->cpb.sha256.message_bit_length += (u64) + (csbcpb->cpb.sha256.spbc * 8); + + /* everything after the first update is continuation */ + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; +out: + return rc; +} + +static int nx_sha256_final(struct shash_desc *desc, u8 *out) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; + int rc; + + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + /* we've hit the nx chip previously, now we're finalizing, + * so copy over the partial digest */ + memcpy(csbcpb->cpb.sha256.input_partial_digest, + csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); + } + + /* final is represented by continuing the operation and indicating that + * this is not an intermediate operation */ + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8); + + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, + sctx->count, nx_ctx->ap->sglen); + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE, + nx_ctx->ap->sglen); + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + if (!nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->sha256_ops)); + + atomic64_add(csbcpb->cpb.sha256.message_bit_length, + &(nx_ctx->stats->sha256_bytes)); + memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); +out: + return rc; +} + +static int nx_sha256_export(struct shash_desc *desc, void *out) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct sha256_state *octx = out; + + octx->count = sctx->count + + (csbcpb->cpb.sha256.message_bit_length / 8); + memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); + + /* if no data has been processed yet, we need to export SHA256's + * initial data, in case this context gets imported into a software + * context */ + if (csbcpb->cpb.sha256.message_bit_length) + memcpy(octx->state, csbcpb->cpb.sha256.message_digest, + SHA256_DIGEST_SIZE); + else { + octx->state[0] = SHA256_H0; + octx->state[1] = SHA256_H1; + octx->state[2] = SHA256_H2; + octx->state[3] = SHA256_H3; + octx->state[4] = SHA256_H4; + octx->state[5] = SHA256_H5; + octx->state[6] = SHA256_H6; + octx->state[7] = SHA256_H7; + } + + return 0; +} + +static int nx_sha256_import(struct shash_desc *desc, const void *in) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + const struct sha256_state *ictx = in; + + memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); + + sctx->count = ictx->count & 0x3f; + csbcpb->cpb.sha256.message_bit_length = (ictx->count & ~0x3f) * 8; + + if (csbcpb->cpb.sha256.message_bit_length) { + memcpy(csbcpb->cpb.sha256.message_digest, ictx->state, + SHA256_DIGEST_SIZE); + + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + } + + return 0; +} + +struct shash_alg nx_shash_sha256_alg = { + .digestsize = SHA256_DIGEST_SIZE, + .init = nx_sha256_init, + .update = nx_sha256_update, + .final = nx_sha256_final, + .export = nx_sha256_export, + .import = nx_sha256_import, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_init = nx_crypto_ctx_sha_init, + .cra_exit = nx_crypto_ctx_exit, + } +}; diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c new file mode 100644 index 000000000000..3177b8c3d5f1 --- /dev/null +++ b/drivers/crypto/nx/nx-sha512.c @@ -0,0 +1,265 @@ +/** + * SHA-512 routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <linux/module.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +static int nx_sha512_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_sg *out_sg; + + nx_ctx_init(nx_ctx, HCOP_FC_SHA); + + memset(sctx, 0, sizeof *sctx); + + nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; + + NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + SHA512_DIGEST_SIZE, nx_ctx->ap->sglen); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + return 0; +} + +static int nx_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg; + u64 to_process, leftover, spbc_bits; + int rc = 0; + + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + /* we've hit the nx chip previously and we're updating again, + * so copy over the partial digest */ + memcpy(csbcpb->cpb.sha512.input_partial_digest, + csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); + } + + /* 2 cases for total data len: + * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 + * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover + */ + if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) { + memcpy(sctx->buf + sctx->count[0], data, len); + sctx->count[0] += len; + goto out; + } + + /* to_process: the SHA512_BLOCK_SIZE data chunk to process in this + * update */ + to_process = (sctx->count[0] + len) & ~(SHA512_BLOCK_SIZE - 1); + leftover = (sctx->count[0] + len) & (SHA512_BLOCK_SIZE - 1); + + if (sctx->count[0]) { + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, + sctx->count[0], nx_ctx->ap->sglen); + in_sg = nx_build_sg_list(in_sg, (u8 *)data, + to_process - sctx->count[0], + nx_ctx->ap->sglen); + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * + sizeof(struct nx_sg); + } else { + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, + to_process, nx_ctx->ap->sglen); + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * + sizeof(struct nx_sg); + } + + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + + if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->sha512_ops)); + + /* copy the leftover back into the state struct */ + memcpy(sctx->buf, data + len - leftover, leftover); + sctx->count[0] = leftover; + + spbc_bits = csbcpb->cpb.sha512.spbc * 8; + csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits; + if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits) + csbcpb->cpb.sha512.message_bit_length_hi++; + + /* everything after the first update is continuation */ + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; +out: + return rc; +} + +static int nx_sha512_final(struct shash_desc *desc, u8 *out) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; + u64 count0; + int rc; + + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { + /* we've hit the nx chip previously, now we're finalizing, + * so copy over the partial digest */ + memcpy(csbcpb->cpb.sha512.input_partial_digest, + csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); + } + + /* final is represented by continuing the operation and indicating that + * this is not an intermediate operation */ + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + + count0 = sctx->count[0] * 8; + + csbcpb->cpb.sha512.message_bit_length_lo += count0; + if (csbcpb->cpb.sha512.message_bit_length_lo < count0) + csbcpb->cpb.sha512.message_bit_length_hi++; + + in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0], + nx_ctx->ap->sglen); + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE, + nx_ctx->ap->sglen); + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); + + if (!nx_ctx->op.outlen) { + rc = -EINVAL; + goto out; + } + + rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, + desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); + if (rc) + goto out; + + atomic_inc(&(nx_ctx->stats->sha512_ops)); + atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo, + &(nx_ctx->stats->sha512_bytes)); + + memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); +out: + return rc; +} + +static int nx_sha512_export(struct shash_desc *desc, void *out) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct sha512_state *octx = out; + + /* move message_bit_length (128 bits) into count and convert its value + * to bytes */ + octx->count[0] = csbcpb->cpb.sha512.message_bit_length_lo >> 3 | + ((csbcpb->cpb.sha512.message_bit_length_hi & 7) << 61); + octx->count[1] = csbcpb->cpb.sha512.message_bit_length_hi >> 3; + + octx->count[0] += sctx->count[0]; + if (octx->count[0] < sctx->count[0]) + octx->count[1]++; + + memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); + + /* if no data has been processed yet, we need to export SHA512's + * initial data, in case this context gets imported into a software + * context */ + if (csbcpb->cpb.sha512.message_bit_length_hi || + csbcpb->cpb.sha512.message_bit_length_lo) + memcpy(octx->state, csbcpb->cpb.sha512.message_digest, + SHA512_DIGEST_SIZE); + else { + octx->state[0] = SHA512_H0; + octx->state[1] = SHA512_H1; + octx->state[2] = SHA512_H2; + octx->state[3] = SHA512_H3; + octx->state[4] = SHA512_H4; + octx->state[5] = SHA512_H5; + octx->state[6] = SHA512_H6; + octx->state[7] = SHA512_H7; + } + + return 0; +} + +static int nx_sha512_import(struct shash_desc *desc, const void *in) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + const struct sha512_state *ictx = in; + + memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); + sctx->count[0] = ictx->count[0] & 0x3f; + csbcpb->cpb.sha512.message_bit_length_lo = (ictx->count[0] & ~0x3f) + << 3; + csbcpb->cpb.sha512.message_bit_length_hi = ictx->count[1] << 3 | + ictx->count[0] >> 61; + + if (csbcpb->cpb.sha512.message_bit_length_hi || + csbcpb->cpb.sha512.message_bit_length_lo) { + memcpy(csbcpb->cpb.sha512.message_digest, ictx->state, + SHA512_DIGEST_SIZE); + + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + } + + return 0; +} + +struct shash_alg nx_shash_sha512_alg = { + .digestsize = SHA512_DIGEST_SIZE, + .init = nx_sha512_init, + .update = nx_sha512_update, + .final = nx_sha512_final, + .export = nx_sha512_export, + .import = nx_sha512_import, + .descsize = sizeof(struct sha512_state), + .statesize = sizeof(struct sha512_state), + .base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_init = nx_crypto_ctx_sha_init, + .cra_exit = nx_crypto_ctx_exit, + } +}; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c new file mode 100644 index 000000000000..d7f179cc2e98 --- /dev/null +++ b/drivers/crypto/nx/nx.c @@ -0,0 +1,716 @@ +/** + * Routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <crypto/internal/hash.h> +#include <crypto/hash.h> +#include <crypto/aes.h> +#include <crypto/sha.h> +#include <crypto/algapi.h> +#include <crypto/scatterwalk.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/crypto.h> +#include <linux/scatterlist.h> +#include <linux/device.h> +#include <linux/of.h> +#include <asm/pSeries_reconfig.h> +#include <asm/abs_addr.h> +#include <asm/hvcall.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + + +/** + * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure + * + * @nx_ctx: the crypto context handle + * @op: PFO operation struct to pass in + * @may_sleep: flag indicating the request can sleep + * + * Make the hcall, retrying while the hardware is busy. If we cannot yield + * the thread, limit the number of retries to 10 here. + */ +int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx, + struct vio_pfo_op *op, + u32 may_sleep) +{ + int rc, retries = 10; + struct vio_dev *viodev = nx_driver.viodev; + + atomic_inc(&(nx_ctx->stats->sync_ops)); + + do { + rc = vio_h_cop_sync(viodev, op); + } while ((rc == -EBUSY && !may_sleep && retries--) || + (rc == -EBUSY && may_sleep && cond_resched())); + + if (rc) { + dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " + "hcall rc: %ld\n", rc, op->hcall_err); + atomic_inc(&(nx_ctx->stats->errors)); + atomic_set(&(nx_ctx->stats->last_error), op->hcall_err); + atomic_set(&(nx_ctx->stats->last_error_pid), current->pid); + } + + return rc; +} + +/** + * nx_build_sg_list - build an NX scatter list describing a single buffer + * + * @sg_head: pointer to the first scatter list element to build + * @start_addr: pointer to the linear buffer + * @len: length of the data at @start_addr + * @sgmax: the largest number of scatter list elements we're allowed to create + * + * This function will start writing nx_sg elements at @sg_head and keep + * writing them until all of the data from @start_addr is described or + * until sgmax elements have been written. Scatter list elements will be + * created such that none of the elements describes a buffer that crosses a 4K + * boundary. + */ +struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, + u8 *start_addr, + unsigned int len, + u32 sgmax) +{ + unsigned int sg_len = 0; + struct nx_sg *sg; + u64 sg_addr = (u64)start_addr; + u64 end_addr; + + /* determine the start and end for this address range - slightly + * different if this is in VMALLOC_REGION */ + if (is_vmalloc_addr(start_addr)) + sg_addr = phys_to_abs(page_to_phys(vmalloc_to_page(start_addr))) + + offset_in_page(sg_addr); + else + sg_addr = virt_to_abs(sg_addr); + + end_addr = sg_addr + len; + + /* each iteration will write one struct nx_sg element and add the + * length of data described by that element to sg_len. Once @len bytes + * have been described (or @sgmax elements have been written), the + * loop ends. min_t is used to ensure @end_addr falls on the same page + * as sg_addr, if not, we need to create another nx_sg element for the + * data on the next page */ + for (sg = sg_head; sg_len < len; sg++) { + sg->addr = sg_addr; + sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr); + sg->len = sg_addr - sg->addr; + sg_len += sg->len; + + if ((sg - sg_head) == sgmax) { + pr_err("nx: scatter/gather list overflow, pid: %d\n", + current->pid); + return NULL; + } + } + + /* return the moved sg_head pointer */ + return sg; +} + +/** + * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist + * + * @nx_dst: pointer to the first nx_sg element to write + * @sglen: max number of nx_sg entries we're allowed to write + * @sg_src: pointer to the source linux scatterlist to walk + * @start: number of bytes to fast-forward past at the beginning of @sg_src + * @src_len: number of bytes to walk in @sg_src + */ +struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, + unsigned int sglen, + struct scatterlist *sg_src, + unsigned int start, + unsigned int src_len) +{ + struct scatter_walk walk; + struct nx_sg *nx_sg = nx_dst; + unsigned int n, offset = 0, len = src_len; + char *dst; + + /* we need to fast forward through @start bytes first */ + for (;;) { + scatterwalk_start(&walk, sg_src); + + if (start < offset + sg_src->length) + break; + + offset += sg_src->length; + sg_src = scatterwalk_sg_next(sg_src); + } + + /* start - offset is the number of bytes to advance in the scatterlist + * element we're currently looking at */ + scatterwalk_advance(&walk, start - offset); + + while (len && nx_sg) { + n = scatterwalk_clamp(&walk, len); + if (!n) { + scatterwalk_start(&walk, sg_next(walk.sg)); + n = scatterwalk_clamp(&walk, len); + } + dst = scatterwalk_map(&walk); + + nx_sg = nx_build_sg_list(nx_sg, dst, n, sglen); + len -= n; + + scatterwalk_unmap(dst); + scatterwalk_advance(&walk, n); + scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); + } + + /* return the moved destination pointer */ + return nx_sg; +} + +/** + * nx_build_sg_lists - walk the input scatterlists and build arrays of NX + * scatterlists based on them. + * + * @nx_ctx: NX crypto context for the lists we're building + * @desc: the block cipher descriptor for the operation + * @dst: destination scatterlist + * @src: source scatterlist + * @nbytes: length of data described in the scatterlists + * @iv: destination for the iv data, if the algorithm requires it + * + * This is common code shared by all the AES algorithms. It uses the block + * cipher walk routines to traverse input and output scatterlists, building + * corresponding NX scatterlists + */ +int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, + struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes, + u8 *iv) +{ + struct nx_sg *nx_insg = nx_ctx->in_sg; + struct nx_sg *nx_outsg = nx_ctx->out_sg; + struct blkcipher_walk walk; + int rc; + + blkcipher_walk_init(&walk, dst, src, nbytes); + rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + if (rc) + goto out; + + if (iv) + memcpy(iv, walk.iv, AES_BLOCK_SIZE); + + while (walk.nbytes) { + nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, + walk.nbytes, nx_ctx->ap->sglen); + nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, + walk.nbytes, nx_ctx->ap->sglen); + + rc = blkcipher_walk_done(desc, &walk, 0); + if (rc) + break; + } + + if (walk.nbytes) { + nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, + walk.nbytes, nx_ctx->ap->sglen); + nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, + walk.nbytes, nx_ctx->ap->sglen); + + rc = 0; + } + + /* these lengths should be negative, which will indicate to phyp that + * the input and output parameters are scatterlists, not linear + * buffers */ + nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); +out: + return rc; +} + +/** + * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct + * + * @nx_ctx: the nx context to initialize + * @function: the function code for the op + */ +void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) +{ + memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); + nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; + + nx_ctx->op.flags = function; + nx_ctx->op.csbcpb = virt_to_abs(nx_ctx->csbcpb); + nx_ctx->op.in = virt_to_abs(nx_ctx->in_sg); + nx_ctx->op.out = virt_to_abs(nx_ctx->out_sg); + + if (nx_ctx->csbcpb_aead) { + nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT; + + nx_ctx->op_aead.flags = function; + nx_ctx->op_aead.csbcpb = virt_to_abs(nx_ctx->csbcpb_aead); + nx_ctx->op_aead.in = virt_to_abs(nx_ctx->in_sg); + nx_ctx->op_aead.out = virt_to_abs(nx_ctx->out_sg); + } +} + +static void nx_of_update_status(struct device *dev, + struct property *p, + struct nx_of *props) +{ + if (!strncmp(p->value, "okay", p->length)) { + props->status = NX_WAITING; + props->flags |= NX_OF_FLAG_STATUS_SET; + } else { + dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__, + (char *)p->value); + } +} + +static void nx_of_update_sglen(struct device *dev, + struct property *p, + struct nx_of *props) +{ + if (p->length != sizeof(props->max_sg_len)) { + dev_err(dev, "%s: unexpected format for " + "ibm,max-sg-len property\n", __func__); + dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes " + "long, expected %zd bytes\n", __func__, + p->length, sizeof(props->max_sg_len)); + return; + } + + props->max_sg_len = *(u32 *)p->value; + props->flags |= NX_OF_FLAG_MAXSGLEN_SET; +} + +static void nx_of_update_msc(struct device *dev, + struct property *p, + struct nx_of *props) +{ + struct msc_triplet *trip; + struct max_sync_cop *msc; + unsigned int bytes_so_far, i, lenp; + + msc = (struct max_sync_cop *)p->value; + lenp = p->length; + + /* You can't tell if the data read in for this property is sane by its + * size alone. This is because there are sizes embedded in the data + * structure. The best we can do is check lengths as we parse and bail + * as soon as a length error is detected. */ + bytes_so_far = 0; + + while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) { + bytes_so_far += sizeof(struct max_sync_cop); + + trip = msc->trip; + + for (i = 0; + ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) && + i < msc->triplets; + i++) { + if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) { + dev_err(dev, "unknown function code/mode " + "combo: %d/%d (ignored)\n", msc->fc, + msc->mode); + goto next_loop; + } + + switch (trip->keybitlen) { + case 128: + case 160: + props->ap[msc->fc][msc->mode][0].databytelen = + trip->databytelen; + props->ap[msc->fc][msc->mode][0].sglen = + trip->sglen; + break; + case 192: + props->ap[msc->fc][msc->mode][1].databytelen = + trip->databytelen; + props->ap[msc->fc][msc->mode][1].sglen = + trip->sglen; + break; + case 256: + if (msc->fc == NX_FC_AES) { + props->ap[msc->fc][msc->mode][2]. + databytelen = trip->databytelen; + props->ap[msc->fc][msc->mode][2].sglen = + trip->sglen; + } else if (msc->fc == NX_FC_AES_HMAC || + msc->fc == NX_FC_SHA) { + props->ap[msc->fc][msc->mode][1]. + databytelen = trip->databytelen; + props->ap[msc->fc][msc->mode][1].sglen = + trip->sglen; + } else { + dev_warn(dev, "unknown function " + "code/key bit len combo" + ": (%u/256)\n", msc->fc); + } + break; + case 512: + props->ap[msc->fc][msc->mode][2].databytelen = + trip->databytelen; + props->ap[msc->fc][msc->mode][2].sglen = + trip->sglen; + break; + default: + dev_warn(dev, "unknown function code/key bit " + "len combo: (%u/%u)\n", msc->fc, + trip->keybitlen); + break; + } +next_loop: + bytes_so_far += sizeof(struct msc_triplet); + trip++; + } + + msc = (struct max_sync_cop *)trip; + } + + props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET; +} + +/** + * nx_of_init - read openFirmware values from the device tree + * + * @dev: device handle + * @props: pointer to struct to hold the properties values + * + * Called once at driver probe time, this function will read out the + * openFirmware properties we use at runtime. If all the OF properties are + * acceptable, when we exit this function props->flags will indicate that + * we're ready to register our crypto algorithms. + */ +static void nx_of_init(struct device *dev, struct nx_of *props) +{ + struct device_node *base_node = dev->of_node; + struct property *p; + + p = of_find_property(base_node, "status", NULL); + if (!p) + dev_info(dev, "%s: property 'status' not found\n", __func__); + else + nx_of_update_status(dev, p, props); + + p = of_find_property(base_node, "ibm,max-sg-len", NULL); + if (!p) + dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n", + __func__); + else + nx_of_update_sglen(dev, p, props); + + p = of_find_property(base_node, "ibm,max-sync-cop", NULL); + if (!p) + dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n", + __func__); + else + nx_of_update_msc(dev, p, props); +} + +/** + * nx_register_algs - register algorithms with the crypto API + * + * Called from nx_probe() + * + * If all OF properties are in an acceptable state, the driver flags will + * indicate that we're ready and we'll create our debugfs files and register + * out crypto algorithms. + */ +static int nx_register_algs(void) +{ + int rc = -1; + + if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY) + goto out; + + memset(&nx_driver.stats, 0, sizeof(struct nx_stats)); + + rc = NX_DEBUGFS_INIT(&nx_driver); + if (rc) + goto out; + + rc = crypto_register_alg(&nx_ecb_aes_alg); + if (rc) + goto out; + + rc = crypto_register_alg(&nx_cbc_aes_alg); + if (rc) + goto out_unreg_ecb; + + rc = crypto_register_alg(&nx_ctr_aes_alg); + if (rc) + goto out_unreg_cbc; + + rc = crypto_register_alg(&nx_ctr3686_aes_alg); + if (rc) + goto out_unreg_ctr; + + rc = crypto_register_alg(&nx_gcm_aes_alg); + if (rc) + goto out_unreg_ctr3686; + + rc = crypto_register_alg(&nx_gcm4106_aes_alg); + if (rc) + goto out_unreg_gcm; + + rc = crypto_register_alg(&nx_ccm_aes_alg); + if (rc) + goto out_unreg_gcm4106; + + rc = crypto_register_alg(&nx_ccm4309_aes_alg); + if (rc) + goto out_unreg_ccm; + + rc = crypto_register_shash(&nx_shash_sha256_alg); + if (rc) + goto out_unreg_ccm4309; + + rc = crypto_register_shash(&nx_shash_sha512_alg); + if (rc) + goto out_unreg_s256; + + rc = crypto_register_shash(&nx_shash_aes_xcbc_alg); + if (rc) + goto out_unreg_s512; + + nx_driver.of.status = NX_OKAY; + + goto out; + +out_unreg_s512: + crypto_unregister_shash(&nx_shash_sha512_alg); +out_unreg_s256: + crypto_unregister_shash(&nx_shash_sha256_alg); +out_unreg_ccm4309: + crypto_unregister_alg(&nx_ccm4309_aes_alg); +out_unreg_ccm: + crypto_unregister_alg(&nx_ccm_aes_alg); +out_unreg_gcm4106: + crypto_unregister_alg(&nx_gcm4106_aes_alg); +out_unreg_gcm: + crypto_unregister_alg(&nx_gcm_aes_alg); +out_unreg_ctr3686: + crypto_unregister_alg(&nx_ctr3686_aes_alg); +out_unreg_ctr: + crypto_unregister_alg(&nx_ctr_aes_alg); +out_unreg_cbc: + crypto_unregister_alg(&nx_cbc_aes_alg); +out_unreg_ecb: + crypto_unregister_alg(&nx_ecb_aes_alg); +out: + return rc; +} + +/** + * nx_crypto_ctx_init - create and initialize a crypto api context + * + * @nx_ctx: the crypto api context + * @fc: function code for the context + * @mode: the function code specific mode for this context + */ +static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode) +{ + if (nx_driver.of.status != NX_OKAY) { + pr_err("Attempt to initialize NX crypto context while device " + "is not available!\n"); + return -ENODEV; + } + + /* we need an extra page for csbcpb_aead for these modes */ + if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) + nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) + + sizeof(struct nx_csbcpb); + else + nx_ctx->kmem_len = (3 * NX_PAGE_SIZE) + + sizeof(struct nx_csbcpb); + + nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL); + if (!nx_ctx->kmem) + return -ENOMEM; + + /* the csbcpb and scatterlists must be 4K aligned pages */ + nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem, + (u64)NX_PAGE_SIZE)); + nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE); + nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE); + + if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) + nx_ctx->csbcpb_aead = + (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg + + NX_PAGE_SIZE); + + /* give each context a pointer to global stats and their OF + * properties */ + nx_ctx->stats = &nx_driver.stats; + memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode], + sizeof(struct alg_props) * 3); + + return 0; +} + +/* entry points from the crypto tfm initializers */ +int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_CCM); +} + +int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_GCM); +} + +int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_CTR); +} + +int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_CBC); +} + +int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_ECB); +} + +int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA); +} + +int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm) +{ + return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + NX_MODE_AES_XCBC_MAC); +} + +/** + * nx_crypto_ctx_exit - destroy a crypto api context + * + * @tfm: the crypto transform pointer for the context + * + * As crypto API contexts are destroyed, this exit hook is called to free the + * memory associated with it. + */ +void nx_crypto_ctx_exit(struct crypto_tfm *tfm) +{ + struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + + kzfree(nx_ctx->kmem); + nx_ctx->csbcpb = NULL; + nx_ctx->csbcpb_aead = NULL; + nx_ctx->in_sg = NULL; + nx_ctx->out_sg = NULL; +} + +static int __devinit nx_probe(struct vio_dev *viodev, + const struct vio_device_id *id) +{ + dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n", + viodev->name, viodev->resource_id); + + if (nx_driver.viodev) { + dev_err(&viodev->dev, "%s: Attempt to register more than one " + "instance of the hardware\n", __func__); + return -EINVAL; + } + + nx_driver.viodev = viodev; + + nx_of_init(&viodev->dev, &nx_driver.of); + + return nx_register_algs(); +} + +static int __devexit nx_remove(struct vio_dev *viodev) +{ + dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n", + viodev->unit_address); + + if (nx_driver.of.status == NX_OKAY) { + NX_DEBUGFS_FINI(&nx_driver); + + crypto_unregister_alg(&nx_ccm_aes_alg); + crypto_unregister_alg(&nx_ccm4309_aes_alg); + crypto_unregister_alg(&nx_gcm_aes_alg); + crypto_unregister_alg(&nx_gcm4106_aes_alg); + crypto_unregister_alg(&nx_ctr_aes_alg); + crypto_unregister_alg(&nx_ctr3686_aes_alg); + crypto_unregister_alg(&nx_cbc_aes_alg); + crypto_unregister_alg(&nx_ecb_aes_alg); + crypto_unregister_shash(&nx_shash_sha256_alg); + crypto_unregister_shash(&nx_shash_sha512_alg); + crypto_unregister_shash(&nx_shash_aes_xcbc_alg); + } + + return 0; +} + + +/* module wide initialization/cleanup */ +static int __init nx_init(void) +{ + return vio_register_driver(&nx_driver.viodriver); +} + +static void __exit nx_fini(void) +{ + vio_unregister_driver(&nx_driver.viodriver); +} + +static struct vio_device_id nx_crypto_driver_ids[] __devinitdata = { + { "ibm,sym-encryption-v1", "ibm,sym-encryption" }, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids); + +/* driver state structure */ +struct nx_crypto_driver nx_driver = { + .viodriver = { + .id_table = nx_crypto_driver_ids, + .probe = nx_probe, + .remove = nx_remove, + .name = NX_NAME, + }, +}; + +module_init(nx_init); +module_exit(nx_fini); + +MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>"); +MODULE_DESCRIPTION(NX_STRING); +MODULE_LICENSE("GPL"); +MODULE_VERSION(NX_VERSION); diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h new file mode 100644 index 000000000000..3232b182dd28 --- /dev/null +++ b/drivers/crypto/nx/nx.h @@ -0,0 +1,193 @@ + +#ifndef __NX_H__ +#define __NX_H__ + +#define NX_NAME "nx-crypto" +#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" +#define NX_VERSION "1.0" + +static const char nx_driver_string[] = NX_STRING; +static const char nx_driver_version[] = NX_VERSION; + +/* a scatterlist in the format PHYP is expecting */ +struct nx_sg { + u64 addr; + u32 rsvd; + u32 len; +} __attribute((packed)); + +#define NX_PAGE_SIZE (4096) +#define NX_MAX_SG_ENTRIES (NX_PAGE_SIZE/(sizeof(struct nx_sg))) + +enum nx_status { + NX_DISABLED, + NX_WAITING, + NX_OKAY +}; + +/* msc_triplet and max_sync_cop are used only to assist in parsing the + * openFirmware property */ +struct msc_triplet { + u32 keybitlen; + u32 databytelen; + u32 sglen; +} __packed; + +struct max_sync_cop { + u32 fc; + u32 mode; + u32 triplets; + struct msc_triplet trip[0]; +} __packed; + +struct alg_props { + u32 databytelen; + u32 sglen; +}; + +#define NX_OF_FLAG_MAXSGLEN_SET (1) +#define NX_OF_FLAG_STATUS_SET (2) +#define NX_OF_FLAG_MAXSYNCCOP_SET (4) +#define NX_OF_FLAG_MASK_READY (NX_OF_FLAG_MAXSGLEN_SET | \ + NX_OF_FLAG_STATUS_SET | \ + NX_OF_FLAG_MAXSYNCCOP_SET) +struct nx_of { + u32 flags; + u32 max_sg_len; + enum nx_status status; + struct alg_props ap[NX_MAX_FC][NX_MAX_MODE][3]; +}; + +struct nx_stats { + atomic_t aes_ops; + atomic64_t aes_bytes; + atomic_t sha256_ops; + atomic64_t sha256_bytes; + atomic_t sha512_ops; + atomic64_t sha512_bytes; + + atomic_t sync_ops; + + atomic_t errors; + atomic_t last_error; + atomic_t last_error_pid; +}; + +struct nx_debugfs { + struct dentry *dfs_root; + struct dentry *dfs_aes_ops, *dfs_aes_bytes; + struct dentry *dfs_sha256_ops, *dfs_sha256_bytes; + struct dentry *dfs_sha512_ops, *dfs_sha512_bytes; + struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid; +}; + +struct nx_crypto_driver { + struct nx_stats stats; + struct nx_of of; + struct vio_dev *viodev; + struct vio_driver viodriver; + struct nx_debugfs dfs; +}; + +#define NX_GCM4106_NONCE_LEN (4) +#define NX_GCM_CTR_OFFSET (12) +struct nx_gcm_priv { + u8 iv[16]; + u8 iauth_tag[16]; + u8 nonce[NX_GCM4106_NONCE_LEN]; +}; + +#define NX_CCM_AES_KEY_LEN (16) +#define NX_CCM4309_AES_KEY_LEN (19) +#define NX_CCM4309_NONCE_LEN (3) +struct nx_ccm_priv { + u8 iv[16]; + u8 b0[16]; + u8 iauth_tag[16]; + u8 oauth_tag[16]; + u8 nonce[NX_CCM4309_NONCE_LEN]; +}; + +struct nx_xcbc_priv { + u8 key[16]; +}; + +struct nx_ctr_priv { + u8 iv[16]; +}; + +struct nx_crypto_ctx { + void *kmem; /* unaligned, kmalloc'd buffer */ + size_t kmem_len; /* length of kmem */ + struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */ + struct vio_pfo_op op; /* operation struct with hcall parameters */ + struct nx_csbcpb *csbcpb_aead; /* secondary csbcpb used by AEAD algs */ + struct vio_pfo_op op_aead;/* operation struct for csbcpb_aead */ + + struct nx_sg *in_sg; /* aligned pointer into kmem to an sg list */ + struct nx_sg *out_sg; /* aligned pointer into kmem to an sg list */ + + struct alg_props *ap; /* pointer into props based on our key size */ + struct alg_props props[3];/* openFirmware properties for requests */ + struct nx_stats *stats; /* pointer into an nx_crypto_driver for stats + reporting */ + + union { + struct nx_gcm_priv gcm; + struct nx_ccm_priv ccm; + struct nx_xcbc_priv xcbc; + struct nx_ctr_priv ctr; + } priv; +}; + +/* prototypes */ +int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm); +void nx_crypto_ctx_exit(struct crypto_tfm *tfm); +void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); +int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, + u32 may_sleep); +struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32); +int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, + struct scatterlist *, struct scatterlist *, unsigned int, + u8 *); +struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, + struct scatterlist *, unsigned int, + unsigned int); + +#ifdef CONFIG_DEBUG_FS +#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv) +#define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv) + +int nx_debugfs_init(struct nx_crypto_driver *); +void nx_debugfs_fini(struct nx_crypto_driver *); +#else +#define NX_DEBUGFS_INIT(drv) (0) +#define NX_DEBUGFS_FINI(drv) (0) +#endif + +#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL) + +extern struct crypto_alg nx_cbc_aes_alg; +extern struct crypto_alg nx_ecb_aes_alg; +extern struct crypto_alg nx_gcm_aes_alg; +extern struct crypto_alg nx_gcm4106_aes_alg; +extern struct crypto_alg nx_ctr_aes_alg; +extern struct crypto_alg nx_ctr3686_aes_alg; +extern struct crypto_alg nx_ccm_aes_alg; +extern struct crypto_alg nx_ccm4309_aes_alg; +extern struct shash_alg nx_shash_aes_xcbc_alg; +extern struct shash_alg nx_shash_sha512_alg; +extern struct shash_alg nx_shash_sha256_alg; + +extern struct nx_crypto_driver nx_driver; + +#define SCATTERWALK_TO_SG 1 +#define SCATTERWALK_FROM_SG 0 + +#endif diff --git a/drivers/crypto/nx/nx_csbcpb.h b/drivers/crypto/nx/nx_csbcpb.h new file mode 100644 index 000000000000..a304f956d6f8 --- /dev/null +++ b/drivers/crypto/nx/nx_csbcpb.h @@ -0,0 +1,205 @@ + +#ifndef __NX_CSBCPB_H__ +#define __NX_CSBCPB_H__ + +struct cop_symcpb_aes_ecb { + u8 key[32]; + u8 __rsvd[80]; +} __packed; + +struct cop_symcpb_aes_cbc { + u8 iv[16]; + u8 key[32]; + u8 cv[16]; + u32 spbc; + u8 __rsvd[44]; +} __packed; + +struct cop_symcpb_aes_gca { + u8 in_pat[16]; + u8 key[32]; + u8 out_pat[16]; + u32 spbc; + u8 __rsvd[44]; +} __packed; + +struct cop_symcpb_aes_gcm { + u8 in_pat_or_aad[16]; + u8 iv_or_cnt[16]; + u64 bit_length_aad; + u64 bit_length_data; + u8 in_s0[16]; + u8 key[32]; + u8 __rsvd1[16]; + u8 out_pat_or_mac[16]; + u8 out_s0[16]; + u8 out_cnt[16]; + u32 spbc; + u8 __rsvd2[12]; +} __packed; + +struct cop_symcpb_aes_ctr { + u8 iv[16]; + u8 key[32]; + u8 cv[16]; + u32 spbc; + u8 __rsvd2[44]; +} __packed; + +struct cop_symcpb_aes_cca { + u8 b0[16]; + u8 b1[16]; + u8 key[16]; + u8 out_pat_or_b0[16]; + u32 spbc; + u8 __rsvd[44]; +} __packed; + +struct cop_symcpb_aes_ccm { + u8 in_pat_or_b0[16]; + u8 iv_or_ctr[16]; + u8 in_s0[16]; + u8 key[16]; + u8 __rsvd1[48]; + u8 out_pat_or_mac[16]; + u8 out_s0[16]; + u8 out_ctr[16]; + u32 spbc; + u8 __rsvd2[12]; +} __packed; + +struct cop_symcpb_aes_xcbc { + u8 cv[16]; + u8 key[16]; + u8 __rsvd1[16]; + u8 out_cv_mac[16]; + u32 spbc; + u8 __rsvd2[44]; +} __packed; + +struct cop_symcpb_sha256 { + u64 message_bit_length; + u64 __rsvd1; + u8 input_partial_digest[32]; + u8 message_digest[32]; + u32 spbc; + u8 __rsvd2[44]; +} __packed; + +struct cop_symcpb_sha512 { + u64 message_bit_length_hi; + u64 message_bit_length_lo; + u8 input_partial_digest[64]; + u8 __rsvd1[32]; + u8 message_digest[64]; + u32 spbc; + u8 __rsvd2[76]; +} __packed; + +#define NX_FDM_INTERMEDIATE 0x01 +#define NX_FDM_CONTINUATION 0x02 +#define NX_FDM_ENDE_ENCRYPT 0x80 + +#define NX_CPB_FDM(c) ((c)->cpb.hdr.fdm) +#define NX_CPB_KS_DS(c) ((c)->cpb.hdr.ks_ds) + +#define NX_CPB_KEY_SIZE(c) (NX_CPB_KS_DS(c) >> 4) +#define NX_CPB_SET_KEY_SIZE(c, x) NX_CPB_KS_DS(c) |= ((x) << 4) +#define NX_CPB_SET_DIGEST_SIZE(c, x) NX_CPB_KS_DS(c) |= (x) + +struct cop_symcpb_header { + u8 mode; + u8 fdm; + u8 ks_ds; + u8 pad_byte; + u8 __rsvd[12]; +} __packed; + +struct cop_parameter_block { + struct cop_symcpb_header hdr; + union { + struct cop_symcpb_aes_ecb aes_ecb; + struct cop_symcpb_aes_cbc aes_cbc; + struct cop_symcpb_aes_gca aes_gca; + struct cop_symcpb_aes_gcm aes_gcm; + struct cop_symcpb_aes_cca aes_cca; + struct cop_symcpb_aes_ccm aes_ccm; + struct cop_symcpb_aes_ctr aes_ctr; + struct cop_symcpb_aes_xcbc aes_xcbc; + struct cop_symcpb_sha256 sha256; + struct cop_symcpb_sha512 sha512; + }; +} __packed; + +#define NX_CSB_VALID_BIT 0x80 + +/* co-processor status block */ +struct cop_status_block { + u8 valid; + u8 crb_seq_number; + u8 completion_code; + u8 completion_extension; + u32 processed_byte_count; + u64 address; +} __packed; + +/* Nest accelerator workbook section 4.4 */ +struct nx_csbcpb { + unsigned char __rsvd[112]; + struct cop_status_block csb; + struct cop_parameter_block cpb; +} __packed; + +/* nx_csbcpb related definitions */ +#define NX_MODE_AES_ECB 0 +#define NX_MODE_AES_CBC 1 +#define NX_MODE_AES_GMAC 2 +#define NX_MODE_AES_GCA 3 +#define NX_MODE_AES_GCM 4 +#define NX_MODE_AES_CCA 5 +#define NX_MODE_AES_CCM 6 +#define NX_MODE_AES_CTR 7 +#define NX_MODE_AES_XCBC_MAC 20 +#define NX_MODE_SHA 0 +#define NX_MODE_SHA_HMAC 1 +#define NX_MODE_AES_CBC_HMAC_ETA 8 +#define NX_MODE_AES_CBC_HMAC_ATE 9 +#define NX_MODE_AES_CBC_HMAC_EAA 10 +#define NX_MODE_AES_CTR_HMAC_ETA 12 +#define NX_MODE_AES_CTR_HMAC_ATE 13 +#define NX_MODE_AES_CTR_HMAC_EAA 14 + +#define NX_FDM_CI_FULL 0 +#define NX_FDM_CI_FIRST 1 +#define NX_FDM_CI_LAST 2 +#define NX_FDM_CI_MIDDLE 3 + +#define NX_FDM_PR_NONE 0 +#define NX_FDM_PR_PAD 1 + +#define NX_KS_AES_128 1 +#define NX_KS_AES_192 2 +#define NX_KS_AES_256 3 + +#define NX_DS_SHA256 2 +#define NX_DS_SHA512 3 + +#define NX_FC_AES 0 +#define NX_FC_SHA 2 +#define NX_FC_AES_HMAC 6 + +#define NX_MAX_FC (NX_FC_AES_HMAC + 1) +#define NX_MAX_MODE (NX_MODE_AES_XCBC_MAC + 1) + +#define HCOP_FC_AES NX_FC_AES +#define HCOP_FC_SHA NX_FC_SHA +#define HCOP_FC_AES_HMAC NX_FC_AES_HMAC + +/* indices into the array of algorithm properties */ +#define NX_PROPS_AES_128 0 +#define NX_PROPS_AES_192 1 +#define NX_PROPS_AES_256 2 +#define NX_PROPS_SHA256 1 +#define NX_PROPS_SHA512 2 + +#endif diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c new file mode 100644 index 000000000000..7ab2e8dcd9b4 --- /dev/null +++ b/drivers/crypto/nx/nx_debugfs.c @@ -0,0 +1,103 @@ +/** + * debugfs routines supporting the Power 7+ Nest Accelerators driver + * + * Copyright (C) 2011-2012 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Kent Yoder <yoder1@us.ibm.com> + */ + +#include <linux/device.h> +#include <linux/kobject.h> +#include <linux/string.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/crypto.h> +#include <crypto/hash.h> +#include <asm/vio.h> + +#include "nx_csbcpb.h" +#include "nx.h" + +#ifdef CONFIG_DEBUG_FS + +/* + * debugfs + * + * For documentation on these attributes, please see: + * + * Documentation/ABI/testing/debugfs-pfo-nx-crypto + */ + +int nx_debugfs_init(struct nx_crypto_driver *drv) +{ + struct nx_debugfs *dfs = &drv->dfs; + + dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL); + + dfs->dfs_aes_ops = + debugfs_create_u32("aes_ops", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, (u32 *)&drv->stats.aes_ops); + dfs->dfs_sha256_ops = + debugfs_create_u32("sha256_ops", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u32 *)&drv->stats.sha256_ops); + dfs->dfs_sha512_ops = + debugfs_create_u32("sha512_ops", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u32 *)&drv->stats.sha512_ops); + dfs->dfs_aes_bytes = + debugfs_create_u64("aes_bytes", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u64 *)&drv->stats.aes_bytes); + dfs->dfs_sha256_bytes = + debugfs_create_u64("sha256_bytes", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u64 *)&drv->stats.sha256_bytes); + dfs->dfs_sha512_bytes = + debugfs_create_u64("sha512_bytes", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u64 *)&drv->stats.sha512_bytes); + dfs->dfs_errors = + debugfs_create_u32("errors", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, (u32 *)&drv->stats.errors); + dfs->dfs_last_error = + debugfs_create_u32("last_error", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u32 *)&drv->stats.last_error); + dfs->dfs_last_error_pid = + debugfs_create_u32("last_error_pid", + S_IRUSR | S_IRGRP | S_IROTH, + dfs->dfs_root, + (u32 *)&drv->stats.last_error_pid); + return 0; +} + +void +nx_debugfs_fini(struct nx_crypto_driver *drv) +{ + debugfs_remove_recursive(drv->dfs.dfs_root); +} + +#endif |