diff options
author | Linus Torvalds | 2020-10-16 12:36:38 -0700 |
---|---|---|
committer | Linus Torvalds | 2020-10-16 12:36:38 -0700 |
commit | 847d4287a0c6709fd1ce24002b96d404a6da8b5b (patch) | |
tree | d6725cb3381730e2ae2864bc5b1f417d5cb94096 /drivers/s390 | |
parent | 96685f8666714233d34abb71b242448c80077536 (diff) | |
parent | 10e5afb3d260f2d2521889d87ebdefb7fc3d4087 (diff) |
Merge tag 's390-5.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Vasily Gorbik:
- Remove address space overrides using set_fs()
- Convert to generic vDSO
- Convert to generic page table dumper
- Add ARCH_HAS_DEBUG_WX support
- Add leap seconds handling support
- Add NVMe firmware-assisted kernel dump support
- Extend NVMe boot support with memory clearing control and addition of
kernel parameters
- AP bus and zcrypt api code rework. Add adapter configure/deconfigure
interface. Extend debug features. Add failure injection support
- Add ECC secure private keys support
- Add KASan support for running protected virtualization host with
4-level paging
- Utilize destroy page ultravisor call to speed up secure guests
shutdown
- Implement ioremap_wc() and ioremap_prot() with MIO in PCI code
- Various checksum improvements
- Other small various fixes and improvements all over the code
* tag 's390-5.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (85 commits)
s390/uaccess: fix indentation
s390/uaccess: add default cases for __put_user_fn()/__get_user_fn()
s390/zcrypt: fix wrong format specifications
s390/kprobes: move insn_page to text segment
s390/sie: fix typo in SIGP code description
s390/lib: fix kernel doc for memcmp()
s390/zcrypt: Introduce Failure Injection feature
s390/zcrypt: move ap_msg param one level up the call chain
s390/ap/zcrypt: revisit ap and zcrypt error handling
s390/ap: Support AP card SCLP config and deconfig operations
s390/sclp: Add support for SCLP AP adapter config/deconfig
s390/ap: add card/queue deconfig state
s390/ap: add error response code field for ap queue devices
s390/ap: split ap queue state machine state from device state
s390/zcrypt: New config switch CONFIG_ZCRYPT_DEBUG
s390/zcrypt: introduce msg tracking in zcrypt functions
s390/startup: correct early pgm check info formatting
s390: remove orphaned extern variables declarations
s390/kasan: make sure int handler always run with DAT on
s390/ipl: add support to control memory clearing for nvme re-IPL
...
Diffstat (limited to 'drivers/s390')
40 files changed, 2328 insertions, 955 deletions
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 845e12ac5954..c6fdb81a068a 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -34,6 +34,8 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o obj-$(CONFIG_PCI) += sclp_pci.o +obj-$(subst m,y,$(CONFIG_ZCRYPT)) += sclp_ap.o + obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o obj-$(CONFIG_VMCP) += vmcp.o diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 92757f9bd010..d8acabbb1ed3 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -978,7 +978,6 @@ static int tty3215_install(struct tty_driver *driver, struct tty_struct *tty) static int tty3215_open(struct tty_struct *tty, struct file * filp) { struct raw3215_info *raw = tty->driver_data; - int retval; tty_port_tty_set(&raw->port, tty); @@ -986,11 +985,7 @@ static int tty3215_open(struct tty_struct *tty, struct file * filp) /* * Start up 3215 device */ - retval = raw3215_startup(raw); - if (retval) - return retval; - - return 0; + return raw3215_startup(raw); } /* diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index 08f36e973b43..8d979e0ee605 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h @@ -110,7 +110,6 @@ struct raw3270_request { }; struct raw3270_request *raw3270_request_alloc(size_t size); -struct raw3270_request *raw3270_request_alloc_bootmem(size_t size); void raw3270_request_free(struct raw3270_request *); void raw3270_request_reset(struct raw3270_request *); void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd); diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 196333013e54..69d9cde9ff5a 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -229,7 +229,7 @@ static inline void sclp_fill_core_info(struct sclp_core_info *info, #define SCLP_HAS_CPU_INFO (sclp.facilities & 0x0800000000000000ULL) #define SCLP_HAS_CPU_RECONFIG (sclp.facilities & 0x0400000000000000ULL) #define SCLP_HAS_PCI_RECONFIG (sclp.facilities & 0x0000000040000000ULL) - +#define SCLP_HAS_AP_RECONFIG (sclp.facilities & 0x0000000100000000ULL) struct gds_subvector { u8 length; @@ -305,9 +305,7 @@ int sclp_deactivate(void); int sclp_reactivate(void); int sclp_sync_request(sclp_cmdw_t command, void *sccb); int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout); - int sclp_sdias_init(void); -void sclp_sdias_exit(void); enum { sclp_init_state_uninitialized, diff --git a/drivers/s390/char/sclp_ap.c b/drivers/s390/char/sclp_ap.c new file mode 100644 index 000000000000..0dd1ca712795 --- /dev/null +++ b/drivers/s390/char/sclp_ap.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * s390 crypto adapter related sclp functions. + * + * Copyright IBM Corp. 2020 + */ +#define KMSG_COMPONENT "sclp_cmd" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/export.h> +#include <linux/slab.h> +#include <asm/sclp.h> +#include "sclp.h" + +#define SCLP_CMDW_CONFIGURE_AP 0x001f0001 +#define SCLP_CMDW_DECONFIGURE_AP 0x001e0001 + +struct ap_cfg_sccb { + struct sccb_header header; +} __packed; + +static int do_ap_configure(sclp_cmdw_t cmd, u32 apid) +{ + struct ap_cfg_sccb *sccb; + int rc; + + if (!SCLP_HAS_AP_RECONFIG) + return -EOPNOTSUPP; + + sccb = (struct ap_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sccb) + return -ENOMEM; + + sccb->header.length = PAGE_SIZE; + cmd |= (apid & 0xFF) << 8; + rc = sclp_sync_request(cmd, sccb); + if (rc) + goto out; + switch (sccb->header.response_code) { + case 0x0020: case 0x0120: case 0x0440: case 0x0450: + break; + default: + pr_warn("configure AP adapter %u failed: cmd=0x%08x response=0x%04x\n", + apid, cmd, sccb->header.response_code); + rc = -EIO; + break; + } +out: + free_page((unsigned long) sccb); + return rc; +} + +int sclp_ap_configure(u32 apid) +{ + return do_ap_configure(SCLP_CMDW_CONFIGURE_AP, apid); +} +EXPORT_SYMBOL(sclp_ap_configure); + +int sclp_ap_deconfigure(u32 apid) +{ + return do_ap_configure(SCLP_CMDW_DECONFIGURE_AP, apid); +} +EXPORT_SYMBOL(sclp_ap_deconfigure); diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index 7737470f8498..a960afa974bf 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c @@ -17,12 +17,12 @@ static struct read_info_sccb __bootdata(sclp_info_sccb); static int __bootdata(sclp_info_sccb_valid); char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET; -int sclp_init_state __section(.data) = sclp_init_state_uninitialized; +int sclp_init_state = sclp_init_state_uninitialized; /* * Used to keep track of the size of the event masks. Qemu until version 2.11 * only supports 4 and needs a workaround. */ -bool sclp_mask_compat_mode __section(.data); +bool sclp_mask_compat_mode; void sclp_early_wait_irq(void) { @@ -214,11 +214,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220) * Output one or more lines of text on the SCLP console (VT220 and / * or line-mode). */ -void __sclp_early_printk(const char *str, unsigned int len, unsigned int force) +void __sclp_early_printk(const char *str, unsigned int len) { int have_linemode, have_vt220; - if (!force && sclp_init_state != sclp_init_state_uninitialized) + if (sclp_init_state != sclp_init_state_uninitialized) return; if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0) return; @@ -231,12 +231,7 @@ void __sclp_early_printk(const char *str, unsigned int len, unsigned int force) void sclp_early_printk(const char *str) { - __sclp_early_printk(str, strlen(str), 0); -} - -void sclp_early_printk_force(const char *str) -{ - __sclp_early_printk(str, strlen(str), 1); + __sclp_early_printk(str, strlen(str)); } int __init sclp_early_read_info(void) diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 44594a492553..d6c84e354df5 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c @@ -337,24 +337,6 @@ sclp_chars_in_buffer(struct sclp_buffer *buffer) } /* - * sets or provides some values that influence the drivers behaviour - */ -void -sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns) -{ - buffer->columns = columns; - if (buffer->current_line != NULL && - buffer->current_length > buffer->columns) - sclp_finalize_mto(buffer); -} - -void -sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab) -{ - buffer->htab = htab; -} - -/* * called by sclp_console_init and/or sclp_tty_init */ int diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h index a2eb22f67393..93d706e4935c 100644 --- a/drivers/s390/char/sclp_rw.h +++ b/drivers/s390/char/sclp_rw.h @@ -86,8 +86,6 @@ void *sclp_unmake_buffer(struct sclp_buffer *); int sclp_buffer_space(struct sclp_buffer *); int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int); int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int)); -void sclp_set_columns(struct sclp_buffer *, unsigned short); -void sclp_set_htab(struct sclp_buffer *, unsigned short); int sclp_chars_in_buffer(struct sclp_buffer *); #ifdef CONFIG_SCLP_CONSOLE diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 644b61013679..215d4b4a5ff5 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -257,7 +257,7 @@ static int __init sclp_sdias_init_async(void) int __init sclp_sdias_init(void) { - if (ipl_info.type != IPL_TYPE_FCP_DUMP) + if (!is_ipl_type_dump()) return 0; sclp_sdias_sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); BUG_ON(!sclp_sdias_sccb); @@ -275,9 +275,3 @@ out: TRACE("init done\n"); return 0; } - -void __exit sclp_sdias_exit(void) -{ - debug_unregister(sdias_dbf); - sclp_unregister(&sclp_sdias_register); -} diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 8bec5f9ea92c..e2c60475dfa8 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h @@ -238,7 +238,6 @@ extern int tape_do_io(struct tape_device *, struct tape_request *); extern int tape_do_io_async(struct tape_device *, struct tape_request *); extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *); extern int tape_cancel_io(struct tape_device *, struct tape_request *); -void tape_hotplug_event(struct tape_device *, int major, int action); static inline int tape_do_io_free(struct tape_device *device, struct tape_request *request) @@ -258,8 +257,6 @@ tape_do_io_async_free(struct tape_device *device, struct tape_request *request) tape_do_io_async(device, request); } -extern int tape_oper_handler(int irq, int status); -extern void tape_noper_handler(int irq, int status); extern int tape_open(struct tape_device *); extern int tape_release(struct tape_device *); extern int tape_mtop(struct tape_device *, int, int); diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h index 53ec8e2870d4..dcc63ff587f9 100644 --- a/drivers/s390/char/tape_std.h +++ b/drivers/s390/char/tape_std.h @@ -101,7 +101,6 @@ struct tape_request *tape_std_read_block(struct tape_device *, size_t); void tape_std_read_backward(struct tape_device *device, struct tape_request *request); struct tape_request *tape_std_write_block(struct tape_device *, size_t); -void tape_std_check_locate(struct tape_device *, struct tape_request *); /* Some non-mtop commands. */ int tape_std_assign(struct tape_device *); @@ -131,19 +130,8 @@ int tape_std_mtunload(struct tape_device *, int); int tape_std_mtweof(struct tape_device *, int); /* Event handlers */ -void tape_std_default_handler(struct tape_device *); -void tape_std_unexpect_uchk_handler(struct tape_device *); -void tape_std_irq(struct tape_device *); void tape_std_process_eov(struct tape_device *); -// the error recovery stuff: -void tape_std_error_recovery(struct tape_device *); -void tape_std_error_recovery_has_failed(struct tape_device *,int error_id); -void tape_std_error_recovery_succeded(struct tape_device *); -void tape_std_error_recovery_do_retry(struct tape_device *); -void tape_std_error_recovery_read_opposite(struct tape_device *); -void tape_std_error_recovery_HWBUG(struct tape_device *, int condno); - /* S390 tape types */ enum s390_tape_type { tape_3480, diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index d29f1b71618e..1515fdc3c1ab 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-1.0+ /* * zcore module to export memory content and register sets for creating system - * dumps on SCSI disks (zfcpdump). + * dumps on SCSI/NVMe disks (zfcp/nvme dump). * * For more information please refer to Documentation/s390/zfcpdump.rst * @@ -243,7 +243,7 @@ static int __init zcore_init(void) unsigned char arch; int rc; - if (ipl_info.type != IPL_TYPE_FCP_DUMP) + if (!is_ipl_type_dump()) return -ENODATA; if (OLDMEM_BASE) return -ENODATA; @@ -252,9 +252,16 @@ static int __init zcore_init(void) debug_register_view(zcore_dbf, &debug_sprintf_view); debug_set_level(zcore_dbf, 6); - TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno); - TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn); - TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun); + if (ipl_info.type == IPL_TYPE_FCP_DUMP) { + TRACE("type: fcp\n"); + TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno); + TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn); + TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun); + } else if (ipl_info.type == IPL_TYPE_NVME_DUMP) { + TRACE("type: nvme\n"); + TRACE("fid: %x\n", ipl_info.data.nvme.fid); + TRACE("nsid: %x\n", ipl_info.data.nvme.nsid); + } rc = sclp_sdias_init(); if (rc) diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 38017c4a31e9..fc06a4002168 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -1265,6 +1265,27 @@ int chsc_sstpi(void *page, void *result, size_t size) return (rr->response.code == 0x0001) ? 0 : -EIO; } +int chsc_stzi(void *page, void *result, size_t size) +{ + struct { + struct chsc_header request; + unsigned int rsvd0[3]; + struct chsc_header response; + char data[]; + } *rr; + int rc; + + memset(page, 0, PAGE_SIZE); + rr = page; + rr->request.length = 0x0010; + rr->request.code = 0x003e; + rc = chsc(rr); + if (rc) + return -EIO; + memcpy(result, &rr->data, size); + return (rr->response.code == 0x0001) ? 0 : -EIO; +} + int chsc_siosl(struct subchannel_id schid) { struct { diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 1981eb62d329..cca1a7c4bb33 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1355,20 +1355,6 @@ static int __init channel_subsystem_init_sync(void) } subsys_initcall_sync(channel_subsystem_init_sync); -void channel_subsystem_reinit(void) -{ - struct channel_path *chp; - struct chp_id chpid; - - chsc_enable_facility(CHSC_SDA_OC_MSS); - chp_id_for_each(&chpid) { - chp = chpid_to_chp(chpid); - if (chp) - chp_update_desc(chp); - } - cmf_reactivate(); -} - #ifdef CONFIG_PROC_FS static ssize_t cio_settle_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index f5c427ec24b1..853b6a8ca095 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -96,7 +96,6 @@ int ccw_device_online(struct ccw_device *); int ccw_device_offline(struct ccw_device *); void ccw_device_update_sense_data(struct ccw_device *); int ccw_device_test_sense_data(struct ccw_device *); -void ccw_device_schedule_sch_unregister(struct ccw_device *); int ccw_purge_blacklisted(void); void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo); struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id); diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4fab8bba2cdd..f9a31c7819ae 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -531,26 +531,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start) return 1; } -static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) -{ - unsigned char state = 0; - int j, b = start; - - for (j = 0; j < count; ++j) { - get_buf_state(q, b, &state, 0); - if (state == SLSB_P_OUTPUT_PENDING) { - struct qaob *aob = q->u.out.aobs[b]; - if (aob == NULL) - continue; - - q->u.out.sbal_state[b].flags |= - QDIO_OUTBUF_STATE_FLAG_PENDING; - q->u.out.aobs[b] = NULL; - } - b = next_buf(b); - } -} - static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, int bufnr) { @@ -640,6 +620,19 @@ void qdio_inbound_processing(unsigned long data) __qdio_inbound_processing(q); } +static void qdio_check_pending(struct qdio_q *q, unsigned int index) +{ + unsigned char state; + + if (get_buf_state(q, index, &state, 0) > 0 && + state == SLSB_P_OUTPUT_PENDING && + q->u.out.aobs[index]) { + q->u.out.sbal_state[index].flags |= + QDIO_OUTBUF_STATE_FLAG_PENDING; + q->u.out.aobs[index] = NULL; + } +} + static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start) { unsigned char state = 0; @@ -712,8 +705,13 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start) if (count) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); - if (q->u.out.use_cq) - qdio_handle_aobs(q, start, count); + + if (q->u.out.use_cq) { + unsigned int i; + + for (i = 0; i < count; i++) + qdio_check_pending(q, QDIO_BUFNR(start + i)); + } } return count; @@ -1221,7 +1219,6 @@ static void qdio_trace_init_data(struct qdio_irq *irq, struct qdio_initialize *data) { DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format); - DBF_DEV_HEX(irq, data->adapter_name, 8, DBF_ERR); DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format); DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR); DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 2c5cc6ec668e..a5b2e16b7aa8 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -9,6 +9,8 @@ #include <linux/slab.h> #include <linux/export.h> #include <linux/io.h> + +#include <asm/ebcdic.h> #include <asm/qdio.h> #include "cio.h" @@ -403,28 +405,22 @@ void qdio_free_async_data(struct qdio_irq *irq_ptr) } } -static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, - struct qdio_q **irq_ptr_qs, - int i, int nr) +static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue) { - irq_ptr->qdr->qdf0[i + nr].sliba = - (unsigned long)irq_ptr_qs[i]->slib; - - irq_ptr->qdr->qdf0[i + nr].sla = - (unsigned long)irq_ptr_qs[i]->sl; - - irq_ptr->qdr->qdf0[i + nr].slsba = - (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; - - irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4; - irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4; - irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4; - irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4; + desc->sliba = virt_to_phys(queue->slib); + desc->sla = virt_to_phys(queue->sl); + desc->slsba = virt_to_phys(&queue->slsb); + + desc->akey = PAGE_DEFAULT_KEY >> 4; + desc->bkey = PAGE_DEFAULT_KEY >> 4; + desc->ckey = PAGE_DEFAULT_KEY >> 4; + desc->dkey = PAGE_DEFAULT_KEY >> 4; } static void setup_qdr(struct qdio_irq *irq_ptr, struct qdio_initialize *qdio_init) { + struct qdesfmt0 *desc = &irq_ptr->qdr->qdf0[0]; int i; irq_ptr->qdr->qfmt = qdio_init->q_format; @@ -433,15 +429,14 @@ static void setup_qdr(struct qdio_irq *irq_ptr, irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; - irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; + irq_ptr->qdr->qiba = virt_to_phys(&irq_ptr->qib); irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4; for (i = 0; i < qdio_init->no_input_qs; i++) - __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); + qdio_fill_qdr_desc(desc++, irq_ptr->input_qs[i]); for (i = 0; i < qdio_init->no_output_qs; i++) - __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i, - qdio_init->no_input_qs); + qdio_fill_qdr_desc(desc++, irq_ptr->output_qs[i]); } static void setup_qib(struct qdio_irq *irq_ptr, @@ -459,7 +454,8 @@ static void setup_qib(struct qdio_irq *irq_ptr, if (init_data->no_output_qs) irq_ptr->qib.osliba = (unsigned long)(irq_ptr->output_qs[0]->slib); - memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8); + memcpy(irq_ptr->qib.ebcnam, dev_name(&irq_ptr->cdev->dev), 8); + ASCEBC(irq_ptr->qib.ebcnam, 8); } int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 24a1940b829e..485cbfcbf06e 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -214,7 +214,7 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info) static void __init ap_init_qci_info(void) { if (!ap_qci_available()) { - AP_DBF(DBF_INFO, "%s QCI not supported\n", __func__); + AP_DBF_INFO("%s QCI not supported\n", __func__); return; } @@ -226,18 +226,18 @@ static void __init ap_init_qci_info(void) ap_qci_info = NULL; return; } - AP_DBF(DBF_INFO, "%s successful fetched initial qci info\n", __func__); + AP_DBF_INFO("%s successful fetched initial qci info\n", __func__); if (ap_qci_info->apxa) { if (ap_qci_info->Na) { ap_max_adapter_id = ap_qci_info->Na; - AP_DBF(DBF_INFO, "%s new ap_max_adapter_id is %d\n", - __func__, ap_max_adapter_id); + AP_DBF_INFO("%s new ap_max_adapter_id is %d\n", + __func__, ap_max_adapter_id); } if (ap_qci_info->Nd) { ap_max_domain_id = ap_qci_info->Nd; - AP_DBF(DBF_INFO, "%s new ap_max_domain_id is %d\n", - __func__, ap_max_domain_id); + AP_DBF_INFO("%s new ap_max_domain_id is %d\n", + __func__, ap_max_domain_id); } } } @@ -307,7 +307,7 @@ EXPORT_SYMBOL(ap_test_config_ctrl_domain); * false otherwise. */ static bool ap_queue_info(ap_qid_t qid, int *q_type, - unsigned int *q_fac, int *q_depth) + unsigned int *q_fac, int *q_depth, bool *q_decfg) { struct ap_queue_status status; unsigned long info = 0; @@ -322,6 +322,9 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type, switch (status.response_code) { case AP_RESPONSE_NORMAL: case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + case AP_RESPONSE_BUSY: /* * According to the architecture in all these cases the * info should be filled. All bits 0 is not possible as @@ -332,6 +335,7 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type, *q_type = (int)((info >> 24) & 0xff); *q_fac = (unsigned int)(info >> 32); *q_depth = (int)(info & 0xff); + *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED; switch (*q_type) { /* For CEX2 and CEX3 the available functions * are not reflected by the facilities bits. @@ -618,8 +622,8 @@ static int __ap_revise_reserved(struct device *dev, void *dummy) drvres = to_ap_drv(dev->driver)->flags & AP_DRIVER_FLAG_DEFAULT; if (!!devres != !!drvres) { - AP_DBF(DBF_DEBUG, "reprobing queue=%02x.%04x\n", - card, queue); + AP_DBF_DBG("reprobing queue=%02x.%04x\n", + card, queue); rc = device_reprobe(dev); } } @@ -796,7 +800,7 @@ EXPORT_SYMBOL(ap_bus_force_rescan); */ void ap_bus_cfg_chg(void) { - AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__); + AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__); ap_bus_force_rescan(); } @@ -947,7 +951,7 @@ static ssize_t ap_domain_store(struct bus_type *bus, ap_domain_index = domain; spin_unlock_bh(&ap_domain_lock); - AP_DBF(DBF_INFO, "stored new default domain=%d\n", domain); + AP_DBF_INFO("stored new default domain=%d\n", domain); return count; } @@ -1208,8 +1212,8 @@ static void ap_select_domain(void) } if (dom <= ap_max_domain_id) { ap_domain_index = dom; - AP_DBF(DBF_DEBUG, "%s new default domain is %d\n", - __func__, ap_domain_index); + AP_DBF_INFO("%s new default domain is %d\n", + __func__, ap_domain_index); } out: spin_unlock_bh(&ap_domain_lock); @@ -1225,8 +1229,11 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func) int comp_type = 0; /* < CEX2A is not supported */ - if (rawtype < AP_DEVICE_TYPE_CEX2A) + if (rawtype < AP_DEVICE_TYPE_CEX2A) { + AP_DBF_WARN("get_comp_type queue=%02x.%04x unsupported type %d\n", + AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype); return 0; + } /* up to CEX7 known and fully supported */ if (rawtype <= AP_DEVICE_TYPE_CEX7) return rawtype; @@ -1248,11 +1255,12 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func) comp_type = apinfo.cat; } if (!comp_type) - AP_DBF(DBF_WARN, "queue=%02x.%04x unable to map type %d\n", - AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype); + AP_DBF_WARN("get_comp_type queue=%02x.%04x unable to map type %d\n", + AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype); else if (comp_type != rawtype) - AP_DBF(DBF_INFO, "queue=%02x.%04x map type %d to %d\n", - AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype, comp_type); + AP_DBF_INFO("get_comp_type queue=%02x.%04x map type %d to %d\n", + AP_QID_CARD(qid), AP_QID_QUEUE(qid), + rawtype, comp_type); return comp_type; } @@ -1286,155 +1294,278 @@ static int __match_queue_device_with_queue_id(struct device *dev, const void *da /* * Helper function for ap_scan_bus(). - * Does the scan bus job for the given adapter id. + * Remove card device and associated queue devices. */ -static void _ap_scan_bus_adapter(int id) +static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac) { - bool broken; + bus_for_each_dev(&ap_bus_type, NULL, + (void *)(long) ac->id, + __ap_queue_devices_with_id_unregister); + device_unregister(&ac->ap_dev.device); +} + +/* + * Helper function for ap_scan_bus(). + * Does the scan bus job for all the domains within + * a valid adapter given by an ap_card ptr. + */ +static inline void ap_scan_domains(struct ap_card *ac) +{ + bool decfg; ap_qid_t qid; unsigned int func; - struct ap_card *ac; struct device *dev; struct ap_queue *aq; + int rc, dom, depth, type; + + /* + * Go through the configuration for the domains and compare them + * to the existing queue devices. Also take care of the config + * and error state for the queue devices. + */ + + for (dom = 0; dom <= ap_max_domain_id; dom++) { + qid = AP_MKQID(ac->id, dom); + dev = bus_find_device(&ap_bus_type, NULL, + (void *)(long) qid, + __match_queue_device_with_qid); + aq = dev ? to_ap_queue(dev) : NULL; + if (!ap_test_config_usage_domain(dom)) { + if (dev) { + AP_DBF_INFO("%s(%d,%d) not in config any more, rm queue device\n", + __func__, ac->id, dom); + device_unregister(dev); + put_device(dev); + } + continue; + } + /* domain is valid, get info from this APQN */ + if (!ap_queue_info(qid, &type, &func, &depth, &decfg)) { + if (aq) { + AP_DBF_INFO( + "%s(%d,%d) ap_queue_info() not successful, rm queue device\n", + __func__, ac->id, dom); + device_unregister(dev); + put_device(dev); + } + continue; + } + /* if no queue device exists, create a new one */ + if (!aq) { + aq = ap_queue_create(qid, ac->ap_dev.device_type); + if (!aq) { + AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n", + __func__, ac->id, dom); + continue; + } + aq->card = ac; + aq->config = !decfg; + dev = &aq->ap_dev.device; + dev->bus = &ap_bus_type; + dev->parent = &ac->ap_dev.device; + dev_set_name(dev, "%02x.%04x", ac->id, dom); + /* register queue device */ + rc = device_register(dev); + if (rc) { + AP_DBF_WARN("%s(%d,%d) device_register() failed\n", + __func__, ac->id, dom); + goto put_dev_and_continue; + } + if (decfg) + AP_DBF_INFO("%s(%d,%d) new (decfg) queue device created\n", + __func__, ac->id, dom); + else + AP_DBF_INFO("%s(%d,%d) new queue device created\n", + __func__, ac->id, dom); + goto put_dev_and_continue; + } + /* Check config state on the already existing queue device */ + spin_lock_bh(&aq->lock); + if (decfg && aq->config) { + /* config off this queue device */ + aq->config = false; + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) { + aq->dev_state = AP_DEV_STATE_ERROR; + aq->last_err_rc = AP_RESPONSE_DECONFIGURED; + } + spin_unlock_bh(&aq->lock); + AP_DBF_INFO("%s(%d,%d) queue device config off\n", + __func__, ac->id, dom); + /* 'receive' pending messages with -EAGAIN */ + ap_flush_queue(aq); + goto put_dev_and_continue; + } + if (!decfg && !aq->config) { + /* config on this queue device */ + aq->config = true; + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) { + aq->dev_state = AP_DEV_STATE_OPERATING; + aq->sm_state = AP_SM_STATE_RESET_START; + } + spin_unlock_bh(&aq->lock); + AP_DBF_INFO("%s(%d,%d) queue device config on\n", + __func__, ac->id, dom); + goto put_dev_and_continue; + } + /* handle other error states */ + if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) { + spin_unlock_bh(&aq->lock); + /* 'receive' pending messages with -EAGAIN */ + ap_flush_queue(aq); + /* re-init (with reset) the queue device */ + ap_queue_init_state(aq); + AP_DBF_INFO("%s(%d,%d) queue device reinit enforced\n", + __func__, ac->id, dom); + goto put_dev_and_continue; + } + spin_unlock_bh(&aq->lock); +put_dev_and_continue: + put_device(dev); + } +} + +/* + * Helper function for ap_scan_bus(). + * Does the scan bus job for the given adapter id. + */ +static inline void ap_scan_adapter(int ap) +{ + bool decfg; + ap_qid_t qid; + unsigned int func; + struct device *dev; + struct ap_card *ac; int rc, dom, depth, type, comp_type; - /* check if there is a card device registered with this id */ + /* Is there currently a card device for this adapter ? */ dev = bus_find_device(&ap_bus_type, NULL, - (void *)(long) id, + (void *)(long) ap, __match_card_device_with_id); ac = dev ? to_ap_card(dev) : NULL; - if (!ap_test_config_card_id(id)) { - if (dev) { - /* Card device has been removed from configuration */ - bus_for_each_dev(&ap_bus_type, NULL, - (void *)(long) id, - __ap_queue_devices_with_id_unregister); - device_unregister(dev); + + /* Adapter not in configuration ? */ + if (!ap_test_config_card_id(ap)) { + if (ac) { + AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devices\n", + __func__, ap); + ap_scan_rm_card_dev_and_queue_devs(ac); put_device(dev); } return; } /* - * This card id is enabled in the configuration. If we already have - * a card device with this id, check if type and functions are still - * the very same. Also verify that at least one queue is available. + * Adapter ap is valid in the current configuration. So do some checks: + * If no card device exists, build one. If a card device exists, check + * for type and functions changed. For all this we need to find a valid + * APQN first. */ - if (ac) { - /* find the first valid queue */ - for (dom = 0; dom < AP_DOMAINS; dom++) { - qid = AP_MKQID(id, dom); - if (ap_queue_info(qid, &type, &func, &depth)) + + for (dom = 0; dom <= ap_max_domain_id; dom++) + if (ap_test_config_usage_domain(dom)) { + qid = AP_MKQID(ap, dom); + if (ap_queue_info(qid, &type, &func, &depth, &decfg)) break; } - broken = false; - if (dom >= AP_DOMAINS) { - /* no accessible queue on this card */ - broken = true; - } else if (ac->raw_hwtype != type) { - /* card type has changed */ - AP_DBF(DBF_INFO, "card=%02x type changed.\n", id); - broken = true; - } else if (ac->functions != func) { - /* card functions have changed */ - AP_DBF(DBF_INFO, "card=%02x functions changed.\n", id); - broken = true; + if (dom > ap_max_domain_id) { + /* Could not find a valid APQN for this adapter */ + if (ac) { + AP_DBF_INFO( + "%s(%d) no type info (no APQN found), rm card and queue devices\n", + __func__, ap); + ap_scan_rm_card_dev_and_queue_devs(ac); + put_device(dev); + } else { + AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n", + __func__, ap); } - if (broken) { - /* unregister card device and associated queues */ - bus_for_each_dev(&ap_bus_type, NULL, - (void *)(long) id, - __ap_queue_devices_with_id_unregister); - device_unregister(dev); + return; + } + if (!type) { + /* No apdater type info available, an unusable adapter */ + if (ac) { + AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devices\n", + __func__, ap); + ap_scan_rm_card_dev_and_queue_devs(ac); put_device(dev); - /* go back if there is no valid queue on this card */ - if (dom >= AP_DOMAINS) - return; - ac = NULL; + } else { + AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n", + __func__, ap); } + return; } - /* - * Go through all possible queue ids. Check and maybe create or release - * queue devices for this card. If there exists no card device yet, - * create a card device also. - */ - for (dom = 0; dom < AP_DOMAINS; dom++) { - qid = AP_MKQID(id, dom); - dev = bus_find_device(&ap_bus_type, NULL, - (void *)(long) qid, - __match_queue_device_with_qid); - aq = dev ? to_ap_queue(dev) : NULL; - if (!ap_test_config_usage_domain(dom)) { - if (dev) { - /* Queue device exists but has been - * removed from configuration. - */ - device_unregister(dev); - put_device(dev); - } - continue; - } - /* try to fetch infos about this queue */ - broken = !ap_queue_info(qid, &type, &func, &depth); - if (dev) { - if (!broken) { - spin_lock_bh(&aq->lock); - broken = aq->sm_state == AP_SM_STATE_BORKED; - spin_unlock_bh(&aq->lock); + if (ac) { + /* Check APQN against existing card device for changes */ + if (ac->raw_hwtype != type) { + AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devices\n", + __func__, ap, type); + ap_scan_rm_card_dev_and_queue_devs(ac); + put_device(dev); + ac = NULL; + } else if (ac->functions != func) { + AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devices\n", + __func__, ap, type); + ap_scan_rm_card_dev_and_queue_devs(ac); + put_device(dev); + ac = NULL; + } else { + if (decfg && ac->config) { + ac->config = false; + AP_DBF_INFO("%s(%d) card device config off\n", + __func__, ap); + } - if (broken) { - /* Remove broken device */ - AP_DBF(DBF_DEBUG, - "removing broken queue=%02x.%04x\n", - id, dom); - device_unregister(dev); + if (!decfg && !ac->config) { + ac->config = true; + AP_DBF_INFO("%s(%d) card device config on\n", + __func__, ap); } - put_device(dev); - continue; } - if (broken) - continue; - /* a new queue device is needed, check out comp type */ + } + + if (!ac) { + /* Build a new card device */ comp_type = ap_get_compatible_type(qid, type, func); - if (!comp_type) - continue; - /* maybe a card device needs to be created first */ + if (!comp_type) { + AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n", + __func__, ap, type); + return; + } + ac = ap_card_create(ap, depth, type, comp_type, func); if (!ac) { - ac = ap_card_create(id, depth, type, comp_type, func); - if (!ac) - continue; - ac->ap_dev.device.bus = &ap_bus_type; - ac->ap_dev.device.parent = ap_root_device; - dev_set_name(&ac->ap_dev.device, "card%02x", id); - /* Register card device with AP bus */ - rc = device_register(&ac->ap_dev.device); - if (rc) { - put_device(&ac->ap_dev.device); - ac = NULL; - break; - } - /* get it and thus adjust reference counter */ - get_device(&ac->ap_dev.device); + AP_DBF_WARN("%s(%d) ap_card_create() failed\n", + __func__, ap); + return; } - /* now create the new queue device */ - aq = ap_queue_create(qid, comp_type); - if (!aq) - continue; - aq->card = ac; - aq->ap_dev.device.bus = &ap_bus_type; - aq->ap_dev.device.parent = &ac->ap_dev.device; - dev_set_name(&aq->ap_dev.device, "%02x.%04x", id, dom); - /* Register queue device */ - rc = device_register(&aq->ap_dev.device); + ac->config = !decfg; + dev = &ac->ap_dev.device; + dev->bus = &ap_bus_type; + dev->parent = ap_root_device; + dev_set_name(dev, "card%02x", ap); + /* Register the new card device with AP bus */ + rc = device_register(dev); if (rc) { - put_device(&aq->ap_dev.device); - continue; + AP_DBF_WARN("%s(%d) device_register() failed\n", + __func__, ap); + put_device(dev); + return; } - } /* end domain loop */ + /* get it and thus adjust reference counter */ + get_device(dev); + if (decfg) + AP_DBF_INFO("%s(%d) new (decfg) card device type=%d func=0x%08x created\n", + __func__, ap, type, func); + else + AP_DBF_INFO("%s(%d) new card device type=%d func=0x%08x created\n", + __func__, ap, type, func); + } + + /* Verify the domains and the queue devices for this card */ + ap_scan_domains(ac); - if (ac) - put_device(&ac->ap_dev.device); + /* release the card device */ + put_device(&ac->ap_dev.device); } /** @@ -1443,16 +1574,16 @@ static void _ap_scan_bus_adapter(int id) */ static void ap_scan_bus(struct work_struct *unused) { - int id; + int ap; ap_fetch_qci_info(ap_qci_info); ap_select_domain(); - AP_DBF(DBF_DEBUG, "%s running\n", __func__); + AP_DBF_DBG("%s running\n", __func__); /* loop over all possible adapters */ - for (id = 0; id < AP_DEVICES; id++) - _ap_scan_bus_adapter(id); + for (ap = 0; ap <= ap_max_adapter_id; ap++) + ap_scan_adapter(ap); /* check if there is at least one queue available with default domain */ if (ap_domain_index >= 0) { @@ -1463,9 +1594,8 @@ static void ap_scan_bus(struct work_struct *unused) if (dev) put_device(dev); else - AP_DBF(DBF_INFO, - "no queue device with default domain %d available\n", - ap_domain_index); + AP_DBF_INFO("no queue device with default domain %d available\n", + ap_domain_index); } mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); @@ -1575,7 +1705,6 @@ static int __init ap_module_init(void) */ if (MACHINE_IS_VM) poll_timeout = 1500000; - spin_lock_init(&ap_poll_timer_lock); hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ap_poll_timer.function = ap_poll_timeout; diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 1ea046324e8f..5029b80132aa 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -50,6 +50,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) #define AP_RESPONSE_NO_FIRST_PART 0x13 #define AP_RESPONSE_MESSAGE_TOO_BIG 0x15 #define AP_RESPONSE_REQ_FAC_NOT_INST 0x16 +#define AP_RESPONSE_INVALID_DOMAIN 0x42 /* * Known device types @@ -86,15 +87,12 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) * AP queue state machine states */ enum ap_sm_state { - AP_SM_STATE_RESET_START, + AP_SM_STATE_RESET_START = 0, AP_SM_STATE_RESET_WAIT, AP_SM_STATE_SETIRQ_WAIT, AP_SM_STATE_IDLE, AP_SM_STATE_WORKING, AP_SM_STATE_QUEUE_FULL, - AP_SM_STATE_REMOVE, /* about to be removed from driver */ - AP_SM_STATE_UNBOUND, /* momentary not bound to a driver */ - AP_SM_STATE_BORKED, /* broken */ NR_AP_SM_STATES }; @@ -118,6 +116,17 @@ enum ap_sm_wait { NR_AP_SM_WAIT }; +/* + * AP queue device states + */ +enum ap_dev_state { + AP_DEV_STATE_UNINITIATED = 0, /* fresh and virgin, not touched */ + AP_DEV_STATE_OPERATING, /* queue dev is working normal */ + AP_DEV_STATE_SHUTDOWN, /* remove/unbind/shutdown in progress */ + AP_DEV_STATE_ERROR, /* device is in error state */ + NR_AP_DEV_STATES +}; + struct ap_device; struct ap_message; @@ -158,6 +167,7 @@ struct ap_card { unsigned int functions; /* AP device function bitfield. */ int queue_depth; /* AP queue depth.*/ int id; /* AP card number. */ + bool config; /* configured state */ atomic64_t total_request_count; /* # requests ever for this AP device.*/ }; @@ -169,10 +179,11 @@ struct ap_queue { struct ap_card *card; /* Ptr to assoc. AP card. */ spinlock_t lock; /* Per device lock. */ void *private; /* ap driver private pointer. */ + enum ap_dev_state dev_state; /* queue device state */ + bool config; /* configured state */ ap_qid_t qid; /* AP queue id. */ int interrupt; /* indicate if interrupts are enabled */ int queue_count; /* # messages currently on AP queue. */ - enum ap_sm_state sm_state; /* ap queue state machine state */ int pendingq_count; /* # requests on pendingq list. */ int requestq_count; /* # requests on requestq list. */ u64 total_request_count; /* # requests ever for this AP device.*/ @@ -181,18 +192,45 @@ struct ap_queue { struct list_head pendingq; /* List of message sent to AP queue. */ struct list_head requestq; /* List of message yet to be sent. */ struct ap_message *reply; /* Per device reply message. */ + enum ap_sm_state sm_state; /* ap queue state machine state */ + int last_err_rc; /* last error state response code */ }; #define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device) typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue); +/* failure injection cmd struct */ +struct ap_fi { + union { + u16 cmd; /* fi flags + action */ + struct { + u8 flags; /* fi flags only */ + u8 action; /* fi action only */ + }; + }; +}; + +/* all currently known fi actions */ +enum ap_fi_actions { + AP_FI_ACTION_CCA_AGENT_FF = 0x01, + AP_FI_ACTION_CCA_DOM_INVAL = 0x02, + AP_FI_ACTION_NQAP_QID_INVAL = 0x03, +}; + +/* all currently known fi flags */ +enum ap_fi_flags { + AP_FI_FLAG_NO_RETRY = 0x01, + AP_FI_FLAG_TOGGLE_SPECIAL = 0x02, +}; + struct ap_message { struct list_head list; /* Request queueing. */ unsigned long long psmid; /* Message id. */ void *msg; /* Pointer to message buffer. */ unsigned int len; /* Message length. */ - u32 flags; /* Flags, see AP_MSG_FLAG_xxx */ + u16 flags; /* Flags, see AP_MSG_FLAG_xxx */ + struct ap_fi fi; /* Failure Injection cmd */ int rc; /* Return code for this message */ void *private; /* ap driver private pointer. */ /* receive is called from tasklet context */ @@ -200,7 +238,7 @@ struct ap_message { struct ap_message *); }; -#define AP_MSG_FLAG_SPECIAL (1 << 16) /* flag msg as 'special' with NQAP */ +#define AP_MSG_FLAG_SPECIAL 1 /* flag msg as 'special' with NQAP */ /** * ap_init_message() - Initialize ap_message. @@ -234,7 +272,7 @@ int ap_recv(ap_qid_t, unsigned long long *, void *, size_t); enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event); enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event); -void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg); +int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg); void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg); void ap_flush_queue(struct ap_queue *aq); diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index 6588713319ba..d98bdd28d23e 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <asm/facility.h> +#include <asm/sclp.h> #include "ap_bus.h" @@ -139,6 +140,38 @@ static ssize_t modalias_show(struct device *dev, static DEVICE_ATTR_RO(modalias); +static ssize_t config_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + + return scnprintf(buf, PAGE_SIZE, "%d\n", ac->config ? 1 : 0); +} + +static ssize_t config_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int rc = 0, cfg; + struct ap_card *ac = to_ap_card(dev); + + if (sscanf(buf, "%d\n", &cfg) != 1 || cfg < 0 || cfg > 1) + return -EINVAL; + + if (cfg && !ac->config) + rc = sclp_ap_configure(ac->id); + else if (!cfg && ac->config) + rc = sclp_ap_deconfigure(ac->id); + if (rc) + return rc; + + ac->config = cfg ? true : false; + + return count; +} + +static DEVICE_ATTR_RW(config); + static struct attribute *ap_card_dev_attrs[] = { &dev_attr_hwtype.attr, &dev_attr_raw_hwtype.attr, @@ -148,6 +181,7 @@ static struct attribute *ap_card_dev_attrs[] = { &dev_attr_requestq_count.attr, &dev_attr_pendingq_count.attr, &dev_attr_modalias.attr, + &dev_attr_config.attr, NULL }; diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h index dc675eb5aef6..34b0350d0b1a 100644 --- a/drivers/s390/crypto/ap_debug.h +++ b/drivers/s390/crypto/ap_debug.h @@ -20,6 +20,14 @@ #define AP_DBF(...) \ debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__) +#define AP_DBF_ERR(...) \ + debug_sprintf_event(ap_dbf_info, DBF_ERR, ##__VA_ARGS__) +#define AP_DBF_WARN(...) \ + debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__) +#define AP_DBF_INFO(...) \ + debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__) +#define AP_DBF_DBG(...) \ + debug_sprintf_event(ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__) extern debug_info_t *ap_dbf_info; diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 688ebebbf98c..ecefc25eff0c 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -195,7 +195,11 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq) aq->sm_state = AP_SM_STATE_IDLE; return AP_SM_WAIT_NONE; default: - aq->sm_state = AP_SM_STATE_BORKED; + aq->dev_state = AP_DEV_STATE_ERROR; + aq->last_err_rc = status.response_code; + AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return AP_SM_WAIT_NONE; } } @@ -210,12 +214,20 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq) { struct ap_queue_status status; struct ap_message *ap_msg; + ap_qid_t qid = aq->qid; if (aq->requestq_count <= 0) return AP_SM_WAIT_NONE; /* Start the next request on the queue. */ ap_msg = list_entry(aq->requestq.next, struct ap_message, list); - status = __ap_send(aq->qid, ap_msg->psmid, +#ifdef CONFIG_ZCRYPT_DEBUG + if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) { + AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n", + __func__, ap_msg->fi.cmd); + qid = 0xFF00; + } +#endif + status = __ap_send(qid, ap_msg->psmid, ap_msg->msg, ap_msg->len, ap_msg->flags & AP_MSG_FLAG_SPECIAL); switch (status.response_code) { @@ -237,6 +249,9 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq) case AP_RESPONSE_RESET_IN_PROGRESS: aq->sm_state = AP_SM_STATE_RESET_WAIT; return AP_SM_WAIT_TIMEOUT; + case AP_RESPONSE_INVALID_DOMAIN: + AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n"); + fallthrough; case AP_RESPONSE_MESSAGE_TOO_BIG: case AP_RESPONSE_REQ_FAC_NOT_INST: list_del_init(&ap_msg->list); @@ -245,7 +260,11 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq) ap_msg->receive(aq, ap_msg, NULL); return AP_SM_WAIT_AGAIN; default: - aq->sm_state = AP_SM_STATE_BORKED; + aq->dev_state = AP_DEV_STATE_ERROR; + aq->last_err_rc = status.response_code; + AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return AP_SM_WAIT_NONE; } } @@ -278,13 +297,12 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq) aq->sm_state = AP_SM_STATE_RESET_WAIT; aq->interrupt = AP_INTR_DISABLED; return AP_SM_WAIT_TIMEOUT; - case AP_RESPONSE_BUSY: - return AP_SM_WAIT_TIMEOUT; - case AP_RESPONSE_Q_NOT_AVAIL: - case AP_RESPONSE_DECONFIGURED: - case AP_RESPONSE_CHECKSTOPPED: default: - aq->sm_state = AP_SM_STATE_BORKED; + aq->dev_state = AP_DEV_STATE_ERROR; + aq->last_err_rc = status.response_code; + AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return AP_SM_WAIT_NONE; } } @@ -323,7 +341,11 @@ static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq) case AP_RESPONSE_DECONFIGURED: case AP_RESPONSE_CHECKSTOPPED: default: - aq->sm_state = AP_SM_STATE_BORKED; + aq->dev_state = AP_DEV_STATE_ERROR; + aq->last_err_rc = status.response_code; + AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return AP_SM_WAIT_NONE; } } @@ -360,7 +382,11 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq) case AP_RESPONSE_NO_PENDING_REPLY: return AP_SM_WAIT_TIMEOUT; default: - aq->sm_state = AP_SM_STATE_BORKED; + aq->dev_state = AP_DEV_STATE_ERROR; + aq->last_err_rc = status.response_code; + AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return AP_SM_WAIT_NONE; } } @@ -393,23 +419,14 @@ static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = { [AP_SM_EVENT_POLL] = ap_sm_read, [AP_SM_EVENT_TIMEOUT] = ap_sm_reset, }, - [AP_SM_STATE_REMOVE] = { - [AP_SM_EVENT_POLL] = ap_sm_nop, - [AP_SM_EVENT_TIMEOUT] = ap_sm_nop, - }, - [AP_SM_STATE_UNBOUND] = { - [AP_SM_EVENT_POLL] = ap_sm_nop, - [AP_SM_EVENT_TIMEOUT] = ap_sm_nop, - }, - [AP_SM_STATE_BORKED] = { - [AP_SM_EVENT_POLL] = ap_sm_nop, - [AP_SM_EVENT_TIMEOUT] = ap_sm_nop, - }, }; enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event) { - return ap_jumptable[aq->sm_state][event](aq); + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) + return ap_jumptable[aq->sm_state][event](aq); + else + return AP_SM_WAIT_NONE; } enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event) @@ -429,12 +446,20 @@ static ssize_t request_count_show(struct device *dev, char *buf) { struct ap_queue *aq = to_ap_queue(dev); + bool valid = false; u64 req_cnt; spin_lock_bh(&aq->lock); - req_cnt = aq->total_request_count; + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) { + req_cnt = aq->total_request_count; + valid = true; + } spin_unlock_bh(&aq->lock); - return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); + + if (valid) + return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); + else + return scnprintf(buf, PAGE_SIZE, "-\n"); } static ssize_t request_count_store(struct device *dev, @@ -459,7 +484,8 @@ static ssize_t requestq_count_show(struct device *dev, unsigned int reqq_cnt = 0; spin_lock_bh(&aq->lock); - reqq_cnt = aq->requestq_count; + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) + reqq_cnt = aq->requestq_count; spin_unlock_bh(&aq->lock); return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); } @@ -473,7 +499,8 @@ static ssize_t pendingq_count_show(struct device *dev, unsigned int penq_cnt = 0; spin_lock_bh(&aq->lock); - penq_cnt = aq->pendingq_count; + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) + penq_cnt = aq->pendingq_count; spin_unlock_bh(&aq->lock); return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); } @@ -542,12 +569,138 @@ static ssize_t interrupt_show(struct device *dev, static DEVICE_ATTR_RO(interrupt); +static ssize_t config_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + int rc; + + spin_lock_bh(&aq->lock); + rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0); + spin_unlock_bh(&aq->lock); + return rc; +} + +static DEVICE_ATTR_RO(config); + +#ifdef CONFIG_ZCRYPT_DEBUG +static ssize_t states_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + int rc = 0; + + spin_lock_bh(&aq->lock); + /* queue device state */ + switch (aq->dev_state) { + case AP_DEV_STATE_UNINITIATED: + rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n"); + break; + case AP_DEV_STATE_OPERATING: + rc = scnprintf(buf, PAGE_SIZE, "OPERATING"); + break; + case AP_DEV_STATE_SHUTDOWN: + rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN"); + break; + case AP_DEV_STATE_ERROR: + rc = scnprintf(buf, PAGE_SIZE, "ERROR"); + break; + default: + rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN"); + } + /* state machine state */ + if (aq->dev_state) { + switch (aq->sm_state) { + case AP_SM_STATE_RESET_START: + rc += scnprintf(buf + rc, PAGE_SIZE - rc, + " [RESET_START]\n"); + break; + case AP_SM_STATE_RESET_WAIT: + rc += scnprintf(buf + rc, PAGE_SIZE - rc, + " [RESET_WAIT]\n"); + break; + case AP_SM_STATE_SETIRQ_WAIT: + rc += scnprintf(buf + rc, PAGE_SIZE - rc, + " [SETIRQ_WAIT]\n"); + break; + case AP_SM_STATE_IDLE: + rc += scnprintf(buf + rc, PAGE_SIZE - rc, + " [IDLE]\n"); + break; + case AP_SM_STATE_WORKING: + rc += scnprintf(buf + rc, PAGE_SIZE - rc, + " [WORKING]\n"); + break; + case AP_SM_STATE_QUEUE_FULL: + rc += scnprintf(buf + rc, PAGE_SIZE - rc, + " [FULL]\n"); + break; + default: + rc += scnprintf(buf + rc, PAGE_SIZE - rc, + " [UNKNOWN]\n"); + } + } + spin_unlock_bh(&aq->lock); + + return rc; +} +static DEVICE_ATTR_RO(states); + +static ssize_t last_err_rc_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + int rc; + + spin_lock_bh(&aq->lock); + rc = aq->last_err_rc; + spin_unlock_bh(&aq->lock); + + switch (rc) { + case AP_RESPONSE_NORMAL: + return scnprintf(buf, PAGE_SIZE, "NORMAL\n"); + case AP_RESPONSE_Q_NOT_AVAIL: + return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n"); + case AP_RESPONSE_RESET_IN_PROGRESS: + return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n"); + case AP_RESPONSE_DECONFIGURED: + return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n"); + case AP_RESPONSE_CHECKSTOPPED: + return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n"); + case AP_RESPONSE_BUSY: + return scnprintf(buf, PAGE_SIZE, "BUSY\n"); + case AP_RESPONSE_INVALID_ADDRESS: + return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n"); + case AP_RESPONSE_OTHERWISE_CHANGED: + return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n"); + case AP_RESPONSE_Q_FULL: + return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n"); + case AP_RESPONSE_INDEX_TOO_BIG: + return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n"); + case AP_RESPONSE_NO_FIRST_PART: + return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n"); + case AP_RESPONSE_MESSAGE_TOO_BIG: + return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n"); + case AP_RESPONSE_REQ_FAC_NOT_INST: + return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n"); + default: + return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc); + } +} +static DEVICE_ATTR_RO(last_err_rc); +#endif + static struct attribute *ap_queue_dev_attrs[] = { &dev_attr_request_count.attr, &dev_attr_requestq_count.attr, &dev_attr_pendingq_count.attr, &dev_attr_reset.attr, &dev_attr_interrupt.attr, + &dev_attr_config.attr, +#ifdef CONFIG_ZCRYPT_DEBUG + &dev_attr_states.attr, + &dev_attr_last_err_rc.attr, +#endif NULL }; @@ -587,7 +740,6 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) aq->ap_dev.device.type = &ap_queue_type; aq->ap_dev.device_type = device_type; aq->qid = qid; - aq->sm_state = AP_SM_STATE_UNBOUND; aq->interrupt = AP_INTR_DISABLED; spin_lock_init(&aq->lock); INIT_LIST_HEAD(&aq->pendingq); @@ -612,22 +764,30 @@ EXPORT_SYMBOL(ap_queue_init_reply); * @aq: The AP device to queue the message to * @ap_msg: The message that is to be added */ -void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) +int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) { - /* For asynchronous message handling a valid receive-callback - * is required. - */ + int rc = 0; + + /* msg needs to have a valid receive-callback */ BUG_ON(!ap_msg->receive); spin_lock_bh(&aq->lock); - /* Queue the message. */ - list_add_tail(&ap_msg->list, &aq->requestq); - aq->requestq_count++; - aq->total_request_count++; - atomic64_inc(&aq->card->total_request_count); + + /* only allow to queue new messages if device state is ok */ + if (aq->dev_state == AP_DEV_STATE_OPERATING) { + list_add_tail(&ap_msg->list, &aq->requestq); + aq->requestq_count++; + aq->total_request_count++; + atomic64_inc(&aq->card->total_request_count); + } else + rc = -ENODEV; + /* Send/receive as many request from the queue as possible. */ ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL)); + spin_unlock_bh(&aq->lock); + + return rc; } EXPORT_SYMBOL(ap_queue_message); @@ -698,8 +858,8 @@ void ap_queue_prepare_remove(struct ap_queue *aq) spin_lock_bh(&aq->lock); /* flush queue */ __ap_flush_queue(aq); - /* set REMOVE state to prevent new messages are queued in */ - aq->sm_state = AP_SM_STATE_REMOVE; + /* move queue device state to SHUTDOWN in progress */ + aq->dev_state = AP_DEV_STATE_SHUTDOWN; spin_unlock_bh(&aq->lock); del_timer_sync(&aq->timeout); } @@ -707,21 +867,21 @@ void ap_queue_prepare_remove(struct ap_queue *aq) void ap_queue_remove(struct ap_queue *aq) { /* - * all messages have been flushed and the state is - * AP_SM_STATE_REMOVE. Now reset with zero which also - * clears the irq registration and move the state - * to AP_SM_STATE_UNBOUND to signal that this queue - * is not used by any driver currently. + * all messages have been flushed and the device state + * is SHUTDOWN. Now reset with zero which also clears + * the irq registration and move the device state + * to the initial value AP_DEV_STATE_UNINITIATED. */ spin_lock_bh(&aq->lock); ap_zapq(aq->qid); - aq->sm_state = AP_SM_STATE_UNBOUND; + aq->dev_state = AP_DEV_STATE_UNINITIATED; spin_unlock_bh(&aq->lock); } void ap_queue_init_state(struct ap_queue *aq) { spin_lock_bh(&aq->lock); + aq->dev_state = AP_DEV_STATE_OPERATING; aq->sm_state = AP_SM_STATE_RESET_START; ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); spin_unlock_bh(&aq->lock); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 5896e5282a4e..99cb60ea663d 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -31,8 +31,9 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("s390 protected key interface"); -#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ -#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ +#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ +#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */ +#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ /* mask of available pckmo subfunctions, fetched once at module init */ static cpacf_mask_t pckmo_functions; @@ -237,8 +238,9 @@ static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey) for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { card = apqns[i] >> 16; dom = apqns[i] & 0xFFFF; - rc = ep11_key2protkey(card, dom, key, kb->head.len, - pkey->protkey, &pkey->len, &pkey->type); + pkey->len = sizeof(pkey->protkey); + rc = ep11_kblob2protkey(card, dom, key, kb->head.len, + pkey->protkey, &pkey->len, &pkey->type); if (rc == 0) break; } @@ -449,15 +451,21 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, break; } case TOKVER_EP11_AES: { - if (keylen < MINEP11AESKEYBLOBSIZE) - goto out; /* check ep11 key for exportable as protected key */ - rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1); + rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); if (rc) goto out; rc = pkey_ep11key2pkey(key, protkey); break; } + case TOKVER_EP11_AES_WITH_HEADER: + /* check ep11 key with header for exportable as protected key */ + rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1); + if (rc) + goto out; + rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header), + protkey); + break; default: DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n", __func__, hdr->version); @@ -661,13 +669,14 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, *ksize = (enum pkey_key_size) t->bitsize; rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX3C, t->mkvp, 0, 1); + ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1); if (rc == 0 && flags) *flags = PKEY_FLAGS_MATCH_CUR_MKVP; if (rc == -ENODEV) { rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX3C, 0, t->mkvp, 1); + ZCRYPT_CEX3C, AES_MK_SET, + 0, t->mkvp, 1); if (rc == 0 && flags) *flags = PKEY_FLAGS_MATCH_ALT_MKVP; } @@ -697,13 +706,14 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, } rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX6, t->mkvp0, 0, 1); + ZCRYPT_CEX6, AES_MK_SET, t->mkvp0, 0, 1); if (rc == 0 && flags) *flags = PKEY_FLAGS_MATCH_CUR_MKVP; if (rc == -ENODEV) { rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX6, 0, t->mkvp0, 1); + ZCRYPT_CEX6, AES_MK_SET, + 0, t->mkvp0, 1); if (rc == 0 && flags) *flags = PKEY_FLAGS_MATCH_ALT_MKVP; } @@ -717,7 +727,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, && hdr->version == TOKVER_EP11_AES) { struct ep11keyblob *kb = (struct ep11keyblob *)key; - rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1); + rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); if (rc) goto out; if (ktype) @@ -778,7 +788,7 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, if (hdr->version == TOKVER_EP11_AES) { if (keylen < sizeof(struct ep11keyblob)) return -EINVAL; - if (ep11_check_aeskeyblob(debug_info, 3, key, 0, 1)) + if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) return -EINVAL; } else { return pkey_nonccatok2pkey(key, keylen, pkey); @@ -804,9 +814,10 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, else { /* EP11 AES secure key blob */ struct ep11keyblob *kb = (struct ep11keyblob *) key; - rc = ep11_key2protkey(card, dom, key, kb->head.len, - pkey->protkey, &pkey->len, - &pkey->type); + pkey->len = sizeof(pkey->protkey); + rc = ep11_kblob2protkey(card, dom, key, kb->head.len, + pkey->protkey, &pkey->len, + &pkey->type); } if (rc == 0) break; @@ -825,7 +836,27 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, if (keylen < sizeof(struct keytoken_header) || flags == 0) return -EINVAL; - if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES) { + if (hdr->type == TOKTYPE_NON_CCA + && (hdr->version == TOKVER_EP11_AES_WITH_HEADER + || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) + && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + int minhwtype = 0, api = 0; + struct ep11keyblob *kb = (struct ep11keyblob *) + (key + sizeof(struct ep11kblob_header)); + + if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) + return -EINVAL; + if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { + minhwtype = ZCRYPT_CEX7; + api = EP11_API_V; + } + rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp); + if (rc) + goto out; + } else if (hdr->type == TOKTYPE_NON_CCA + && hdr->version == TOKVER_EP11_AES + && is_ep11_keyblob(key)) { int minhwtype = 0, api = 0; struct ep11keyblob *kb = (struct ep11keyblob *) key; @@ -863,7 +894,26 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, return -EINVAL; } rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, cur_mkvp, old_mkvp, 1); + minhwtype, AES_MK_SET, + cur_mkvp, old_mkvp, 1); + if (rc) + goto out; + } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { + u64 cur_mkvp = 0, old_mkvp = 0; + struct eccprivkeytoken *t = (struct eccprivkeytoken *)key; + + if (t->secid == 0x20) { + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = t->mkvp; + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = t->mkvp; + } else { + /* unknown cca internal 2 token type */ + return -EINVAL; + } + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, APKA_MK_SET, + cur_mkvp, old_mkvp, 1); if (rc) goto out; } else @@ -900,10 +950,26 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype, if (ktype == PKEY_TYPE_CCA_CIPHER) minhwtype = ZCRYPT_CEX6; rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, cur_mkvp, old_mkvp, 1); + minhwtype, AES_MK_SET, + cur_mkvp, old_mkvp, 1); if (rc) goto out; - } else if (ktype == PKEY_TYPE_EP11) { + } else if (ktype == PKEY_TYPE_CCA_ECC) { + u64 cur_mkvp = 0, old_mkvp = 0; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = *((u64 *) cur_mkvp); + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = *((u64 *) alt_mkvp); + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, APKA_MK_SET, + cur_mkvp, old_mkvp, 1); + if (rc) + goto out; + + } else if (ktype == PKEY_TYPE_EP11 || + ktype == PKEY_TYPE_EP11_AES || + ktype == PKEY_TYPE_EP11_ECC) { u8 *wkvp = NULL; if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) @@ -929,6 +995,111 @@ out: return rc; } +static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, size_t keylen, u32 *protkeytype, + u8 *protkey, u32 *protkeylen) +{ + int i, card, dom, rc; + struct keytoken_header *hdr = (struct keytoken_header *)key; + + /* check for at least one apqn given */ + if (!apqns || !nr_apqns) + return -EINVAL; + + if (keylen < sizeof(struct keytoken_header)) + return -EINVAL; + + if (hdr->type == TOKTYPE_NON_CCA + && hdr->version == TOKVER_EP11_AES_WITH_HEADER + && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + /* EP11 AES key blob with header */ + if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_NON_CCA + && hdr->version == TOKVER_EP11_ECC_WITH_HEADER + && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + /* EP11 ECC key blob with header */ + if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_NON_CCA + && hdr->version == TOKVER_EP11_AES + && is_ep11_keyblob(key)) { + /* EP11 AES key blob with header in session field */ + if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_CCA_INTERNAL) { + if (hdr->version == TOKVER_CCA_AES) { + /* CCA AES data key */ + if (keylen != sizeof(struct secaeskeytoken)) + return -EINVAL; + if (cca_check_secaeskeytoken(debug_info, 3, key, 0)) + return -EINVAL; + } else if (hdr->version == TOKVER_CCA_VLSC) { + /* CCA AES cipher key */ + if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) + return -EINVAL; + if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1)) + return -EINVAL; + } else { + DEBUG_ERR("%s unknown CCA internal token version %d\n", + __func__, hdr->version); + return -EINVAL; + } + } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { + /* CCA ECC (private) key */ + if (keylen < sizeof(struct eccprivkeytoken)) + return -EINVAL; + if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_NON_CCA) { + struct pkey_protkey pkey; + + rc = pkey_nonccatok2pkey(key, keylen, &pkey); + if (rc) + return rc; + memcpy(protkey, pkey.protkey, pkey.len); + *protkeylen = pkey.len; + *protkeytype = pkey.type; + return 0; + } else { + DEBUG_ERR("%s unknown/unsupported blob type %d\n", + __func__, hdr->type); + return -EINVAL; + } + + /* simple try all apqns from the list */ + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + card = apqns[i].card; + dom = apqns[i].domain; + if (hdr->type == TOKTYPE_NON_CCA + && (hdr->version == TOKVER_EP11_AES_WITH_HEADER + || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) + && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) + rc = ep11_kblob2protkey(card, dom, key, hdr->len, + protkey, protkeylen, protkeytype); + else if (hdr->type == TOKTYPE_NON_CCA + && hdr->version == TOKVER_EP11_AES + && is_ep11_keyblob(key)) + rc = ep11_kblob2protkey(card, dom, key, hdr->len, + protkey, protkeylen, protkeytype); + else if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_AES) + rc = cca_sec2protkey(card, dom, key, protkey, + protkeylen, protkeytype); + else if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_VLSC) + rc = cca_cipher2protkey(card, dom, key, protkey, + protkeylen, protkeytype); + else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) + rc = cca_ecc2protkey(card, dom, key, protkey, + protkeylen, protkeytype); + else + return -EINVAL; + } + + return rc; +} + /* * File io functions */ @@ -1329,6 +1500,55 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, kfree(apqns); break; } + case PKEY_KBLOB2PROTK3: { + struct pkey_kblob2pkey3 __user *utp = (void __user *) arg; + struct pkey_kblob2pkey3 ktp; + struct pkey_apqn *apqns = NULL; + u32 protkeylen = PROTKEYBLOBBUFSIZE; + u8 *kkey, *protkey; + + if (copy_from_user(&ktp, utp, sizeof(ktp))) + return -EFAULT; + apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); + if (IS_ERR(apqns)) + return PTR_ERR(apqns); + kkey = _copy_key_from_user(ktp.key, ktp.keylen); + if (IS_ERR(kkey)) { + kfree(apqns); + return PTR_ERR(kkey); + } + protkey = kmalloc(protkeylen, GFP_KERNEL); + if (!protkey) { + kfree(apqns); + kfree(kkey); + return -ENOMEM; + } + rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, kkey, + ktp.keylen, &ktp.pkeytype, + protkey, &protkeylen); + DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); + kfree(apqns); + kfree(kkey); + if (rc) { + kfree(protkey); + break; + } + if (ktp.pkey && ktp.pkeylen) { + if (protkeylen > ktp.pkeylen) { + kfree(protkey); + return -EINVAL; + } + if (copy_to_user(ktp.pkey, protkey, protkeylen)) { + kfree(protkey); + return -EFAULT; + } + } + kfree(protkey); + ktp.pkeylen = protkeylen; + if (copy_to_user(utp, &ktp, sizeof(ktp))) + return -EFAULT; + break; + } default: /* unknown/unsupported ioctl cmd */ return -ENOTTY; @@ -1589,7 +1809,7 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, /* build a list of apqns able to generate an cipher key */ rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX6, 0, 0, 0); + ZCRYPT_CEX6, 0, 0, 0, 0); if (rc) return rc; diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index f314936b5462..f60f9fb25214 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -25,6 +25,7 @@ #include <linux/debugfs.h> #include <linux/cdev.h> #include <linux/ctype.h> +#include <linux/capability.h> #include <asm/debug.h> #define CREATE_TRACE_POINTS @@ -602,13 +603,13 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc, unsigned int pref_weight) { if (!pref_zc) - return false; + return true; weight += atomic_read(&zc->load); pref_weight += atomic_read(&pref_zc->load); if (weight == pref_weight) - return atomic64_read(&zc->card->total_request_count) > + return atomic64_read(&zc->card->total_request_count) < atomic64_read(&pref_zc->card->total_request_count); - return weight > pref_weight; + return weight < pref_weight; } static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, @@ -617,30 +618,39 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, unsigned int pref_weight) { if (!pref_zq) - return false; + return true; weight += atomic_read(&zq->load); pref_weight += atomic_read(&pref_zq->load); if (weight == pref_weight) - return zq->queue->total_request_count > + return zq->queue->total_request_count < pref_zq->queue->total_request_count; - return weight > pref_weight; + return weight < pref_weight; } /* * zcrypt ioctls. */ static long zcrypt_rsa_modexpo(struct ap_perms *perms, + struct zcrypt_track *tr, struct ica_rsa_modexpo *mex) { struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; - unsigned int weight = 0, pref_weight = 0; + struct ap_message ap_msg; + unsigned int wgt = 0, pref_wgt = 0; unsigned int func_code; - int qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc = -ENODEV; struct module *mod; trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); + ap_init_message(&ap_msg); + +#ifdef CONFIG_ZCRYPT_DEBUG + if (tr && tr->fi.cmd) + ap_msg.fi.cmd = tr->fi.cmd; +#endif + if (mex->outputdatalength < mex->inputdatalength) { func_code = 0; rc = -EINVAL; @@ -662,8 +672,9 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, pref_zq = NULL; spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { - /* Check for online accelarator and CCA cards */ - if (!zc->online || !(zc->card->functions & 0x18000000)) + /* Check for useable accelarator or CCA card */ + if (!zc->online || !zc->card->config || + !(zc->card->functions & 0x18000000)) continue; /* Check for size limits */ if (zc->min_mod_size > mex->inputdatalength || @@ -673,26 +684,35 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, if (!zcrypt_check_card(perms, zc->card->id)) continue; /* get weight index of the card device */ - weight = zc->speed_rating[func_code]; - if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + wgt = zc->speed_rating[func_code]; + /* penalty if this msg was previously sent via this card */ + cpen = (tr && tr->again_counter && tr->last_qid && + AP_QID_CARD(tr->last_qid) == zc->card->id) ? + TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; + if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) continue; for_each_zcrypt_queue(zq, zc) { - /* check if device is online and eligible */ - if (!zq->online || !zq->ops->rsa_modexpo) + /* check if device is useable and eligible */ + if (!zq->online || !zq->ops->rsa_modexpo || + !zq->queue->config) continue; /* check if device node has admission for this queue */ if (!zcrypt_check_queue(perms, AP_QID_QUEUE(zq->queue->qid))) continue; - if (zcrypt_queue_compare(zq, pref_zq, - weight, pref_weight)) + /* penalty if the msg was previously sent at this qid */ + qpen = (tr && tr->again_counter && tr->last_qid && + tr->last_qid == zq->queue->qid) ? + TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; + if (!zcrypt_queue_compare(zq, pref_zq, + wgt + cpen + qpen, pref_wgt)) continue; pref_zc = zc; pref_zq = zq; - pref_weight = weight; + pref_wgt = wgt + cpen + qpen; } } - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); spin_unlock(&zcrypt_list_lock); if (!pref_zq) { @@ -701,30 +721,44 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, } qid = pref_zq->queue->qid; - rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); + rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg); spin_lock(&zcrypt_list_lock); - zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); + zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); spin_unlock(&zcrypt_list_lock); out: + ap_release_message(&ap_msg); + if (tr) { + tr->last_rc = rc; + tr->last_qid = qid; + } trace_s390_zcrypt_rep(mex, func_code, rc, AP_QID_CARD(qid), AP_QID_QUEUE(qid)); return rc; } static long zcrypt_rsa_crt(struct ap_perms *perms, + struct zcrypt_track *tr, struct ica_rsa_modexpo_crt *crt) { struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; - unsigned int weight = 0, pref_weight = 0; + struct ap_message ap_msg; + unsigned int wgt = 0, pref_wgt = 0; unsigned int func_code; - int qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc = -ENODEV; struct module *mod; trace_s390_zcrypt_req(crt, TP_ICARSACRT); + ap_init_message(&ap_msg); + +#ifdef CONFIG_ZCRYPT_DEBUG + if (tr && tr->fi.cmd) + ap_msg.fi.cmd = tr->fi.cmd; +#endif + if (crt->outputdatalength < crt->inputdatalength) { func_code = 0; rc = -EINVAL; @@ -746,8 +780,9 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, pref_zq = NULL; spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { - /* Check for online accelarator and CCA cards */ - if (!zc->online || !(zc->card->functions & 0x18000000)) + /* Check for useable accelarator or CCA card */ + if (!zc->online || !zc->card->config || + !(zc->card->functions & 0x18000000)) continue; /* Check for size limits */ if (zc->min_mod_size > crt->inputdatalength || @@ -757,26 +792,35 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, if (!zcrypt_check_card(perms, zc->card->id)) continue; /* get weight index of the card device */ - weight = zc->speed_rating[func_code]; - if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + wgt = zc->speed_rating[func_code]; + /* penalty if this msg was previously sent via this card */ + cpen = (tr && tr->again_counter && tr->last_qid && + AP_QID_CARD(tr->last_qid) == zc->card->id) ? + TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; + if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) continue; for_each_zcrypt_queue(zq, zc) { - /* check if device is online and eligible */ - if (!zq->online || !zq->ops->rsa_modexpo_crt) + /* check if device is useable and eligible */ + if (!zq->online || !zq->ops->rsa_modexpo_crt || + !zq->queue->config) continue; /* check if device node has admission for this queue */ if (!zcrypt_check_queue(perms, AP_QID_QUEUE(zq->queue->qid))) continue; - if (zcrypt_queue_compare(zq, pref_zq, - weight, pref_weight)) + /* penalty if the msg was previously sent at this qid */ + qpen = (tr && tr->again_counter && tr->last_qid && + tr->last_qid == zq->queue->qid) ? + TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; + if (!zcrypt_queue_compare(zq, pref_zq, + wgt + cpen + qpen, pref_wgt)) continue; pref_zc = zc; pref_zq = zq; - pref_weight = weight; + pref_wgt = wgt + cpen + qpen; } } - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); spin_unlock(&zcrypt_list_lock); if (!pref_zq) { @@ -785,35 +829,52 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, } qid = pref_zq->queue->qid; - rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); + rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg); spin_lock(&zcrypt_list_lock); - zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); + zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); spin_unlock(&zcrypt_list_lock); out: + ap_release_message(&ap_msg); + if (tr) { + tr->last_rc = rc; + tr->last_qid = qid; + } trace_s390_zcrypt_rep(crt, func_code, rc, AP_QID_CARD(qid), AP_QID_QUEUE(qid)); return rc; } -static long _zcrypt_send_cprb(struct ap_perms *perms, +static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, + struct zcrypt_track *tr, struct ica_xcRB *xcRB) { struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; - unsigned int weight = 0, pref_weight = 0; + unsigned int wgt = 0, pref_wgt = 0; unsigned int func_code; unsigned short *domain, tdom; - int qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc = -ENODEV; struct module *mod; trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); xcRB->status = 0; ap_init_message(&ap_msg); - rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); + +#ifdef CONFIG_ZCRYPT_DEBUG + if (tr && tr->fi.cmd) + ap_msg.fi.cmd = tr->fi.cmd; + if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) { + ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n", + __func__, tr->fi.cmd); + xcRB->agent_ID = 0x4646; + } +#endif + + rc = get_cprb_fc(userspace, xcRB, &ap_msg, &func_code, &domain); if (rc) goto out; @@ -832,8 +893,9 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, pref_zq = NULL; spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { - /* Check for online CCA cards */ - if (!zc->online || !(zc->card->functions & 0x10000000)) + /* Check for useable CCA card */ + if (!zc->online || !zc->card->config || + !(zc->card->functions & 0x10000000)) continue; /* Check for user selected CCA card */ if (xcRB->user_defined != AUTOSELECT && @@ -843,13 +905,18 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, if (!zcrypt_check_card(perms, zc->card->id)) continue; /* get weight index of the card device */ - weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; - if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; + /* penalty if this msg was previously sent via this card */ + cpen = (tr && tr->again_counter && tr->last_qid && + AP_QID_CARD(tr->last_qid) == zc->card->id) ? + TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; + if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) continue; for_each_zcrypt_queue(zq, zc) { - /* check if device is online and eligible */ + /* check for device useable and eligible */ if (!zq->online || !zq->ops->send_cprb || + !zq->queue->config || (tdom != AUTOSEL_DOM && tdom != AP_QID_QUEUE(zq->queue->qid))) continue; @@ -857,15 +924,19 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, if (!zcrypt_check_queue(perms, AP_QID_QUEUE(zq->queue->qid))) continue; - if (zcrypt_queue_compare(zq, pref_zq, - weight, pref_weight)) + /* penalty if the msg was previously sent at this qid */ + qpen = (tr && tr->again_counter && tr->last_qid && + tr->last_qid == zq->queue->qid) ? + TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; + if (!zcrypt_queue_compare(zq, pref_zq, + wgt + cpen + qpen, pref_wgt)) continue; pref_zc = zc; pref_zq = zq; - pref_weight = weight; + pref_wgt = wgt + cpen + qpen; } } - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); spin_unlock(&zcrypt_list_lock); if (!pref_zq) { @@ -878,14 +949,26 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, if (*domain == AUTOSEL_DOM) *domain = AP_QID_QUEUE(qid); - rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); +#ifdef CONFIG_ZCRYPT_DEBUG + if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) { + ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n", + __func__, tr->fi.cmd); + *domain = 99; + } +#endif + + rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg); spin_lock(&zcrypt_list_lock); - zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); + zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); spin_unlock(&zcrypt_list_lock); out: ap_release_message(&ap_msg); + if (tr) { + tr->last_rc = rc; + tr->last_qid = qid; + } trace_s390_zcrypt_rep(xcRB, func_code, rc, AP_QID_CARD(qid), AP_QID_QUEUE(qid)); return rc; @@ -893,7 +976,7 @@ out: long zcrypt_send_cprb(struct ica_xcRB *xcRB) { - return _zcrypt_send_cprb(&ap_perms, xcRB); + return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB); } EXPORT_SYMBOL(zcrypt_send_cprb); @@ -924,23 +1007,29 @@ static bool is_desired_ep11_queue(unsigned int dev_qid, return false; } -static long _zcrypt_send_ep11_cprb(struct ap_perms *perms, +static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, + struct zcrypt_track *tr, struct ep11_urb *xcrb) { struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; struct ep11_target_dev *targets; unsigned short target_num; - unsigned int weight = 0, pref_weight = 0; + unsigned int wgt = 0, pref_wgt = 0; unsigned int func_code; struct ap_message ap_msg; - int qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc = -ENODEV; struct module *mod; trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); ap_init_message(&ap_msg); +#ifdef CONFIG_ZCRYPT_DEBUG + if (tr && tr->fi.cmd) + ap_msg.fi.cmd = tr->fi.cmd; +#endif + target_num = (unsigned short) xcrb->targets_num; /* empty list indicates autoselect (all available targets) */ @@ -956,7 +1045,7 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms, } uptr = (struct ep11_target_dev __force __user *) xcrb->targets; - if (copy_from_user(targets, uptr, + if (z_copy_from_user(userspace, targets, uptr, target_num * sizeof(*targets))) { func_code = 0; rc = -EFAULT; @@ -964,7 +1053,7 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms, } } - rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); + rc = get_ep11cprb_fc(userspace, xcrb, &ap_msg, &func_code); if (rc) goto out_free; @@ -972,8 +1061,9 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms, pref_zq = NULL; spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { - /* Check for online EP11 cards */ - if (!zc->online || !(zc->card->functions & 0x04000000)) + /* Check for useable EP11 card */ + if (!zc->online || !zc->card->config || + !(zc->card->functions & 0x04000000)) continue; /* Check for user selected EP11 card */ if (targets && @@ -983,13 +1073,18 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms, if (!zcrypt_check_card(perms, zc->card->id)) continue; /* get weight index of the card device */ - weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; - if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; + /* penalty if this msg was previously sent via this card */ + cpen = (tr && tr->again_counter && tr->last_qid && + AP_QID_CARD(tr->last_qid) == zc->card->id) ? + TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; + if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) continue; for_each_zcrypt_queue(zq, zc) { - /* check if device is online and eligible */ + /* check if device is useable and eligible */ if (!zq->online || !zq->ops->send_ep11_cprb || + !zq->queue->config || (targets && !is_desired_ep11_queue(zq->queue->qid, target_num, targets))) @@ -998,15 +1093,19 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms, if (!zcrypt_check_queue(perms, AP_QID_QUEUE(zq->queue->qid))) continue; - if (zcrypt_queue_compare(zq, pref_zq, - weight, pref_weight)) + /* penalty if the msg was previously sent at this qid */ + qpen = (tr && tr->again_counter && tr->last_qid && + tr->last_qid == zq->queue->qid) ? + TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; + if (!zcrypt_queue_compare(zq, pref_zq, + wgt + cpen + qpen, pref_wgt)) continue; pref_zc = zc; pref_zq = zq; - pref_weight = weight; + pref_wgt = wgt + cpen + qpen; } } - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); spin_unlock(&zcrypt_list_lock); if (!pref_zq) { @@ -1015,16 +1114,20 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms, } qid = pref_zq->queue->qid; - rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); + rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg); spin_lock(&zcrypt_list_lock); - zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); + zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); spin_unlock(&zcrypt_list_lock); out_free: kfree(targets); out: ap_release_message(&ap_msg); + if (tr) { + tr->last_rc = rc; + tr->last_qid = qid; + } trace_s390_zcrypt_rep(xcrb, func_code, rc, AP_QID_CARD(qid), AP_QID_QUEUE(qid)); return rc; @@ -1032,7 +1135,7 @@ out: long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) { - return _zcrypt_send_ep11_cprb(&ap_perms, xcrb); + return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb); } EXPORT_SYMBOL(zcrypt_send_ep11_cprb); @@ -1040,7 +1143,7 @@ static long zcrypt_rng(char *buffer) { struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; - unsigned int weight = 0, pref_weight = 0; + unsigned int wgt = 0, pref_wgt = 0; unsigned int func_code; struct ap_message ap_msg; unsigned int domain; @@ -1058,26 +1161,27 @@ static long zcrypt_rng(char *buffer) pref_zq = NULL; spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { - /* Check for online CCA cards */ - if (!zc->online || !(zc->card->functions & 0x10000000)) + /* Check for useable CCA card */ + if (!zc->online || !zc->card->config || + !(zc->card->functions & 0x10000000)) continue; /* get weight index of the card device */ - weight = zc->speed_rating[func_code]; - if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + wgt = zc->speed_rating[func_code]; + if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt)) continue; for_each_zcrypt_queue(zq, zc) { - /* check if device is online and eligible */ - if (!zq->online || !zq->ops->rng) + /* check if device is useable and eligible */ + if (!zq->online || !zq->ops->rng || + !zq->queue->config) continue; - if (zcrypt_queue_compare(zq, pref_zq, - weight, pref_weight)) + if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt)) continue; pref_zc = zc; pref_zq = zq; - pref_weight = weight; + pref_wgt = wgt; } } - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); spin_unlock(&zcrypt_list_lock); if (!pref_zq) { @@ -1089,7 +1193,7 @@ static long zcrypt_rng(char *buffer) rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); spin_lock(&zcrypt_list_lock); - zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); + zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); spin_unlock(&zcrypt_list_lock); out: @@ -1301,19 +1405,39 @@ static int zcrypt_requestq_count(void) static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) { int rc; + struct zcrypt_track tr; struct ica_rsa_modexpo mex; struct ica_rsa_modexpo __user *umex = (void __user *) arg; + memset(&tr, 0, sizeof(tr)); if (copy_from_user(&mex, umex, sizeof(mex))) return -EFAULT; + +#ifdef CONFIG_ZCRYPT_DEBUG + if (mex.inputdatalength & (1U << 31)) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + tr.fi.cmd = (u16)(mex.inputdatalength >> 16); + } + mex.inputdatalength &= 0x0000FFFF; +#endif + do { - rc = zcrypt_rsa_modexpo(perms, &mex); - } while (rc == -EAGAIN); + rc = zcrypt_rsa_modexpo(perms, &tr, &mex); + if (rc == -EAGAIN) + tr.again_counter++; +#ifdef CONFIG_ZCRYPT_DEBUG + if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) + break; +#endif + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); /* on failure: retry once again after a requested rescan */ if ((rc == -ENODEV) && (zcrypt_process_rescan())) do { - rc = zcrypt_rsa_modexpo(perms, &mex); - } while (rc == -EAGAIN); + rc = zcrypt_rsa_modexpo(perms, &tr, &mex); + if (rc == -EAGAIN) + tr.again_counter++; + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); if (rc) { ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc); return rc; @@ -1324,19 +1448,39 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) { int rc; + struct zcrypt_track tr; struct ica_rsa_modexpo_crt crt; struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; + memset(&tr, 0, sizeof(tr)); if (copy_from_user(&crt, ucrt, sizeof(crt))) return -EFAULT; + +#ifdef CONFIG_ZCRYPT_DEBUG + if (crt.inputdatalength & (1U << 31)) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + tr.fi.cmd = (u16)(crt.inputdatalength >> 16); + } + crt.inputdatalength &= 0x0000FFFF; +#endif + do { - rc = zcrypt_rsa_crt(perms, &crt); - } while (rc == -EAGAIN); + rc = zcrypt_rsa_crt(perms, &tr, &crt); + if (rc == -EAGAIN) + tr.again_counter++; +#ifdef CONFIG_ZCRYPT_DEBUG + if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) + break; +#endif + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); /* on failure: retry once again after a requested rescan */ if ((rc == -ENODEV) && (zcrypt_process_rescan())) do { - rc = zcrypt_rsa_crt(perms, &crt); - } while (rc == -EAGAIN); + rc = zcrypt_rsa_crt(perms, &tr, &crt); + if (rc == -EAGAIN) + tr.again_counter++; + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); if (rc) { ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc); return rc; @@ -1348,18 +1492,38 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) { int rc; struct ica_xcRB xcRB; + struct zcrypt_track tr; struct ica_xcRB __user *uxcRB = (void __user *) arg; + memset(&tr, 0, sizeof(tr)); if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) return -EFAULT; + +#ifdef CONFIG_ZCRYPT_DEBUG + if (xcRB.status & (1U << 31)) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + tr.fi.cmd = (u16)(xcRB.status >> 16); + } + xcRB.status &= 0x0000FFFF; +#endif + do { - rc = _zcrypt_send_cprb(perms, &xcRB); - } while (rc == -EAGAIN); + rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB); + if (rc == -EAGAIN) + tr.again_counter++; +#ifdef CONFIG_ZCRYPT_DEBUG + if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) + break; +#endif + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); /* on failure: retry once again after a requested rescan */ if ((rc == -ENODEV) && (zcrypt_process_rescan())) do { - rc = _zcrypt_send_cprb(perms, &xcRB); - } while (rc == -EAGAIN); + rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB); + if (rc == -EAGAIN) + tr.again_counter++; + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); if (rc) ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n", rc, xcRB.status); @@ -1372,18 +1536,38 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) { int rc; struct ep11_urb xcrb; + struct zcrypt_track tr; struct ep11_urb __user *uxcrb = (void __user *)arg; + memset(&tr, 0, sizeof(tr)); if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) return -EFAULT; + +#ifdef CONFIG_ZCRYPT_DEBUG + if (xcrb.req_len & (1ULL << 63)) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + tr.fi.cmd = (u16)(xcrb.req_len >> 48); + } + xcrb.req_len &= 0x0000FFFFFFFFFFFFULL; +#endif + do { - rc = _zcrypt_send_ep11_cprb(perms, &xcrb); - } while (rc == -EAGAIN); + rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); + if (rc == -EAGAIN) + tr.again_counter++; +#ifdef CONFIG_ZCRYPT_DEBUG + if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) + break; +#endif + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); /* on failure: retry once again after a requested rescan */ if ((rc == -ENODEV) && (zcrypt_process_rescan())) do { - rc = _zcrypt_send_ep11_cprb(perms, &xcrb); - } while (rc == -EAGAIN); + rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); + if (rc == -EAGAIN) + tr.again_counter++; + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); if (rc) ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) @@ -1536,8 +1720,10 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp, struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); struct compat_ica_rsa_modexpo mex32; struct ica_rsa_modexpo mex64; + struct zcrypt_track tr; long rc; + memset(&tr, 0, sizeof(tr)); if (copy_from_user(&mex32, umex32, sizeof(mex32))) return -EFAULT; mex64.inputdata = compat_ptr(mex32.inputdata); @@ -1547,13 +1733,17 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp, mex64.b_key = compat_ptr(mex32.b_key); mex64.n_modulus = compat_ptr(mex32.n_modulus); do { - rc = zcrypt_rsa_modexpo(perms, &mex64); - } while (rc == -EAGAIN); + rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); + if (rc == -EAGAIN) + tr.again_counter++; + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); /* on failure: retry once again after a requested rescan */ if ((rc == -ENODEV) && (zcrypt_process_rescan())) do { - rc = zcrypt_rsa_modexpo(perms, &mex64); - } while (rc == -EAGAIN); + rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); + if (rc == -EAGAIN) + tr.again_counter++; + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); if (rc) return rc; return put_user(mex64.outputdatalength, @@ -1578,8 +1768,10 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); struct compat_ica_rsa_modexpo_crt crt32; struct ica_rsa_modexpo_crt crt64; + struct zcrypt_track tr; long rc; + memset(&tr, 0, sizeof(tr)); if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) return -EFAULT; crt64.inputdata = compat_ptr(crt32.inputdata); @@ -1592,13 +1784,17 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, crt64.nq_prime = compat_ptr(crt32.nq_prime); crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); do { - rc = zcrypt_rsa_crt(perms, &crt64); - } while (rc == -EAGAIN); + rc = zcrypt_rsa_crt(perms, &tr, &crt64); + if (rc == -EAGAIN) + tr.again_counter++; + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); /* on failure: retry once again after a requested rescan */ if ((rc == -ENODEV) && (zcrypt_process_rescan())) do { - rc = zcrypt_rsa_crt(perms, &crt64); - } while (rc == -EAGAIN); + rc = zcrypt_rsa_crt(perms, &tr, &crt64); + if (rc == -EAGAIN) + tr.again_counter++; + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); if (rc) return rc; return put_user(crt64.outputdatalength, @@ -1630,9 +1826,11 @@ static long trans_xcRB32(struct ap_perms *perms, struct file *filp, { struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); struct compat_ica_xcRB xcRB32; + struct zcrypt_track tr; struct ica_xcRB xcRB64; long rc; + memset(&tr, 0, sizeof(tr)); if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) return -EFAULT; xcRB64.agent_ID = xcRB32.agent_ID; @@ -1656,13 +1854,17 @@ static long trans_xcRB32(struct ap_perms *perms, struct file *filp, xcRB64.priority_window = xcRB32.priority_window; xcRB64.status = xcRB32.status; do { - rc = _zcrypt_send_cprb(perms, &xcRB64); - } while (rc == -EAGAIN); + rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64); + if (rc == -EAGAIN) + tr.again_counter++; + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); /* on failure: retry once again after a requested rescan */ if ((rc == -ENODEV) && (zcrypt_process_rescan())) do { - rc = _zcrypt_send_cprb(perms, &xcRB64); - } while (rc == -EAGAIN); + rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64); + if (rc == -EAGAIN) + tr.again_counter++; + } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; xcRB32.reply_data_length = xcRB64.reply_data_length; xcRB32.status = xcRB64.status; diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 599e68bf53f7..51c0b8bdef50 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -55,13 +55,30 @@ enum crypto_ops { struct zcrypt_queue; +/* struct to hold tracking information for a userspace request/response */ +struct zcrypt_track { + int again_counter; /* retry attempts counter */ + int last_qid; /* last qid used */ + int last_rc; /* last return code */ +#ifdef CONFIG_ZCRYPT_DEBUG + struct ap_fi fi; /* failure injection cmd */ +#endif +}; + +/* defines related to message tracking */ +#define TRACK_AGAIN_MAX 10 +#define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000 +#define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000 + struct zcrypt_ops { - long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *); + long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *, + struct ap_message *); long (*rsa_modexpo_crt)(struct zcrypt_queue *, - struct ica_rsa_modexpo_crt *); - long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *, + struct ica_rsa_modexpo_crt *, + struct ap_message *); + long (*send_cprb)(bool userspace, struct zcrypt_queue *, struct ica_xcRB *, struct ap_message *); - long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *, + long (*send_ep11_cprb)(bool userspace, struct zcrypt_queue *, struct ep11_urb *, struct ap_message *); long (*rng)(struct zcrypt_queue *, char *, struct ap_message *); struct list_head list; /* zcrypt ops list. */ @@ -82,7 +99,7 @@ struct zcrypt_card { int min_mod_size; /* Min number of bits. */ int max_mod_size; /* Max number of bits. */ int max_exp_bit_length; - int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */ + const int *speed_rating; /* Speed idx of crypto ops. */ atomic_t load; /* Utilization of the crypto device */ int request_count; /* # current requests. */ @@ -145,4 +162,26 @@ void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus); int zcrypt_device_status_ext(int card, int queue, struct zcrypt_device_status_ext *devstatus); +static inline unsigned long z_copy_from_user(bool userspace, + void *to, + const void __user *from, + unsigned long n) +{ + if (likely(userspace)) + return copy_from_user(to, from, n); + memcpy(to, (void __force *) from, n); + return 0; +} + +static inline unsigned long z_copy_to_user(bool userspace, + void __user *to, + const void *from, + unsigned long n) +{ + if (likely(userspace)) + return copy_to_user(to, from, n); + memcpy((void __force *) to, from, n); + return 0; +} + #endif /* _ZCRYPT_API_H_ */ diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c index c53cab4b0c9e..e342eb86acd1 100644 --- a/drivers/s390/crypto/zcrypt_card.c +++ b/drivers/s390/crypto/zcrypt_card.c @@ -50,22 +50,28 @@ static ssize_t online_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = to_ap_card(dev)->private; + struct ap_card *ac = to_ap_card(dev); + struct zcrypt_card *zc = ac->private; + int online = ac->config && zc->online ? 1 : 0; - return scnprintf(buf, PAGE_SIZE, "%d\n", zc->online); + return scnprintf(buf, PAGE_SIZE, "%d\n", online); } static ssize_t online_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct zcrypt_card *zc = to_ap_card(dev)->private; + struct ap_card *ac = to_ap_card(dev); + struct zcrypt_card *zc = ac->private; struct zcrypt_queue *zq; int online, id; if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) return -EINVAL; + if (online && !ac->config) + return -ENODEV; + zc->online = online; id = zc->card->id; diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index c793dcabd551..b1046811450f 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -173,6 +173,49 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl, EXPORT_SYMBOL(cca_check_secaescipherkey); /* + * Simple check if the token is a valid CCA secure ECC private + * key token. Returns 0 on success or errno value on failure. + */ +int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, + const u8 *token, size_t keysize, + int checkcpacfexport) +{ + struct eccprivkeytoken *t = (struct eccprivkeytoken *) token; + +#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) + + if (t->type != TOKTYPE_CCA_INTERNAL_PKA) { + if (dbg) + DBF("%s token check failed, type 0x%02x != 0x%02x\n", + __func__, (int) t->type, TOKTYPE_CCA_INTERNAL_PKA); + return -EINVAL; + } + if (t->len > keysize) { + if (dbg) + DBF("%s token check failed, len %d > keysize %zu\n", + __func__, (int) t->len, keysize); + return -EINVAL; + } + if (t->secid != 0x20) { + if (dbg) + DBF("%s token check failed, secid 0x%02x != 0x20\n", + __func__, (int) t->secid); + return -EINVAL; + } + if (checkcpacfexport && !(t->kutc & 0x01)) { + if (dbg) + DBF("%s token check failed, XPRTCPAC bit is 0\n", + __func__); + return -EINVAL; + } + +#undef DBF + + return 0; +} +EXPORT_SYMBOL(cca_check_sececckeytoken); + +/* * Allocate consecutive memory for request CPRB, request param * block, reply CPRB and reply param block and fill in values * for the common fields. Returns 0 on success or errno value @@ -249,24 +292,6 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb, } /* - * Helper function which calls zcrypt_send_cprb with - * memory management segment adjusted to kernel space - * so that the copy_from_user called within this - * function do in fact copy from kernel space. - */ -static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb) -{ - int rc; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - rc = zcrypt_send_cprb(xcrb); - set_fs(old_fs); - - return rc; -} - -/* * Generate (random) CCA AES DATA secure key. */ int cca_genseckey(u16 cardnr, u16 domain, @@ -359,7 +384,7 @@ int cca_genseckey(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = _zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb); if (rc) { DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", __func__, (int) cardnr, (int) domain, rc); @@ -497,7 +522,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = _zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb); if (rc) { DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int) cardnr, (int) domain, rc); @@ -624,7 +649,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = _zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb); if (rc) { DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int) cardnr, (int) domain, rc); @@ -850,7 +875,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = _zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb); if (rc) { DEBUG_ERR( "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", @@ -1018,7 +1043,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = _zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb); if (rc) { DEBUG_ERR( "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", @@ -1235,7 +1260,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = _zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb); if (rc) { DEBUG_ERR( "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", @@ -1316,6 +1341,156 @@ out: EXPORT_SYMBOL(cca_cipher2protkey); /* + * Derive protected key from CCA ECC secure private key. + */ +int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + int rc; + u8 *mem, *ptr; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct aureqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + u8 rule_array[8]; + struct { + u16 len; + u16 tk_blob_len; + u16 tk_blob_tag; + u8 tk_blob[66]; + } vud; + struct { + u16 len; + u16 cca_key_token_len; + u16 cca_key_token_flags; + u8 cca_key_token[0]; + } kb; + } __packed * preqparm; + struct aurepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct { + u16 len; + u16 sublen; + u16 tag; + struct cpacfkeyblock { + u8 version; /* version of this struct */ + u8 flags[2]; + u8 algo; + u8 form; + u8 pad1[3]; + u16 keylen; + u8 key[0]; /* the key (keylen bytes) */ + u16 keyattrlen; + u8 keyattr[32]; + u8 pad2[1]; + u8 vptype; + u8 vp[32]; /* verification pattern */ + } ckb; + } vud; + struct { + u16 len; + } kb; + } __packed * prepparm; + int keylen = ((struct eccprivkeytoken *)key)->len; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with AU request */ + preqparm = (struct aureqparm __force *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "AU", 2); + preqparm->rule_array_len = + sizeof(preqparm->rule_array_len) + + sizeof(preqparm->rule_array); + memcpy(preqparm->rule_array, "EXPT-SK ", 8); + /* vud, tk blob */ + preqparm->vud.len = sizeof(preqparm->vud); + preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob) + + 2 * sizeof(uint16_t); + preqparm->vud.tk_blob_tag = 0x00C2; + /* kb, cca token */ + preqparm->kb.len = keylen + 3 * sizeof(uint16_t); + preqparm->kb.cca_key_token_len = keylen + 2 * sizeof(uint16_t); + memcpy(preqparm->kb.cca_key_token, key, keylen); + /* now fill length of param block into cprb */ + preqcblk->req_parml = sizeof(struct aureqparm) + keylen; + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + if (prepcblk->ccp_rscode != 0) { + DEBUG_WARN( + "%s unwrap secure key warning, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + } + + /* process response cprb param block */ + ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepcblk->rpl_parmb = (u8 __user *) ptr; + prepparm = (struct aurepparm *) ptr; + + /* check the returned keyblock */ + if (prepparm->vud.ckb.version != 0x02) { + DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n", + __func__, (int) prepparm->vud.ckb.version); + rc = -EIO; + goto out; + } + if (prepparm->vud.ckb.algo != 0x81) { + DEBUG_ERR( + "%s reply param keyblock algo mismatch 0x%02x != 0x81\n", + __func__, (int) prepparm->vud.ckb.algo); + rc = -EIO; + goto out; + } + + /* copy the translated protected key */ + if (prepparm->vud.ckb.keylen > *protkeylen) { + DEBUG_ERR("%s prot keylen mismatch %d > buffersize %u\n", + __func__, prepparm->vud.ckb.keylen, *protkeylen); + rc = -EIO; + goto out; + } + memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen); + *protkeylen = prepparm->vud.ckb.keylen; + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_ECC; + +out: + free_cprbmem(mem, PARMBSIZE, 0); + return rc; +} +EXPORT_SYMBOL(cca_ecc2protkey); + +/* * query cryptographic facility from CCA adapter */ int cca_query_crypto_facility(u16 cardnr, u16 domain, @@ -1366,7 +1541,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = _zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb); if (rc) { DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int) cardnr, (int) domain, rc); @@ -1524,21 +1699,38 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) rarray, &rlen, varray, &vlen); if (rc == 0 && rlen >= 10*8 && vlen >= 204) { memcpy(ci->serial, rarray, 8); - ci->new_mk_state = (char) rarray[7*8]; - ci->cur_mk_state = (char) rarray[8*8]; - ci->old_mk_state = (char) rarray[9*8]; - if (ci->old_mk_state == '2') - memcpy(&ci->old_mkvp, varray + 172, 8); - if (ci->cur_mk_state == '2') - memcpy(&ci->cur_mkvp, varray + 184, 8); - if (ci->new_mk_state == '3') - memcpy(&ci->new_mkvp, varray + 196, 8); - found = 1; + ci->new_aes_mk_state = (char) rarray[7*8]; + ci->cur_aes_mk_state = (char) rarray[8*8]; + ci->old_aes_mk_state = (char) rarray[9*8]; + if (ci->old_aes_mk_state == '2') + memcpy(&ci->old_aes_mkvp, varray + 172, 8); + if (ci->cur_aes_mk_state == '2') + memcpy(&ci->cur_aes_mkvp, varray + 184, 8); + if (ci->new_aes_mk_state == '3') + memcpy(&ci->new_aes_mkvp, varray + 196, 8); + found++; + } + if (!found) + goto out; + rlen = vlen = PAGE_SIZE/2; + rc = cca_query_crypto_facility(cardnr, domain, "STATICSB", + rarray, &rlen, varray, &vlen); + if (rc == 0 && rlen >= 10*8 && vlen >= 240) { + ci->new_apka_mk_state = (char) rarray[7*8]; + ci->cur_apka_mk_state = (char) rarray[8*8]; + ci->old_apka_mk_state = (char) rarray[9*8]; + if (ci->old_apka_mk_state == '2') + memcpy(&ci->old_apka_mkvp, varray + 208, 8); + if (ci->cur_apka_mk_state == '2') + memcpy(&ci->cur_apka_mkvp, varray + 220, 8); + if (ci->new_apka_mk_state == '3') + memcpy(&ci->new_apka_mkvp, varray + 232, 8); + found++; } +out: free_page((unsigned long) pg); - - return found ? 0 : -ENOENT; + return found == 2 ? 0 : -ENOENT; } /* @@ -1592,16 +1784,16 @@ static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain, /* enabled CCA card, check current mkvp from cache */ if (cca_info_cache_fetch(card, dom, &ci) == 0 && ci.hwtype >= minhwtype && - ci.cur_mk_state == '2' && - ci.cur_mkvp == mkvp) { + ci.cur_aes_mk_state == '2' && + ci.cur_aes_mkvp == mkvp) { if (!verify) break; /* verify: refresh card info */ if (fetch_cca_info(card, dom, &ci) == 0) { cca_info_cache_update(card, dom, &ci); if (ci.hwtype >= minhwtype && - ci.cur_mk_state == '2' && - ci.cur_mkvp == mkvp) + ci.cur_aes_mk_state == '2' && + ci.cur_aes_mkvp == mkvp) break; } } @@ -1623,12 +1815,12 @@ static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain, if (fetch_cca_info(card, dom, &ci) == 0) { cca_info_cache_update(card, dom, &ci); if (ci.hwtype >= minhwtype && - ci.cur_mk_state == '2' && - ci.cur_mkvp == mkvp) + ci.cur_aes_mk_state == '2' && + ci.cur_aes_mkvp == mkvp) break; if (ci.hwtype >= minhwtype && - ci.old_mk_state == '2' && - ci.old_mkvp == mkvp && + ci.old_aes_mk_state == '2' && + ci.old_aes_mkvp == mkvp && oi < 0) oi = i; } @@ -1682,15 +1874,14 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify) EXPORT_SYMBOL(cca_findcard); int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, - int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify) + int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, + int verify) { struct zcrypt_device_status_ext *device_status; - int i, n, card, dom, curmatch, oldmatch, rc = 0; + u32 *_apqns = NULL, _nr_apqns = 0; + int i, card, dom, curmatch, oldmatch, rc = 0; struct cca_info ci; - *apqns = NULL; - *nr_apqns = 0; - /* fetch status of all crypto cards */ device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, sizeof(struct zcrypt_device_status_ext), @@ -1699,67 +1890,73 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, return -ENOMEM; zcrypt_device_status_mask_ext(device_status); - /* loop two times: first gather eligible apqns, then store them */ - while (1) { - n = 0; - /* walk through all the crypto cards */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { - card = AP_QID_CARD(device_status[i].qid); - dom = AP_QID_QUEUE(device_status[i].qid); - /* check online state */ - if (!device_status[i].online) - continue; - /* check for cca functions */ - if (!(device_status[i].functions & 0x04)) - continue; - /* check cardnr */ - if (cardnr != 0xFFFF && card != cardnr) - continue; - /* check domain */ - if (domain != 0xFFFF && dom != domain) - continue; - /* get cca info on this apqn */ - if (cca_get_info(card, dom, &ci, verify)) - continue; - /* current master key needs to be valid */ - if (ci.cur_mk_state != '2') - continue; - /* check min hardware type */ - if (minhwtype > 0 && minhwtype > ci.hwtype) - continue; - if (cur_mkvp || old_mkvp) { - /* check mkvps */ - curmatch = oldmatch = 0; - if (cur_mkvp && cur_mkvp == ci.cur_mkvp) + /* allocate 1k space for up to 256 apqns */ + _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); + if (!_apqns) { + kvfree(device_status); + return -ENOMEM; + } + + /* walk through all the crypto apqnss */ + for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + card = AP_QID_CARD(device_status[i].qid); + dom = AP_QID_QUEUE(device_status[i].qid); + /* check online state */ + if (!device_status[i].online) + continue; + /* check for cca functions */ + if (!(device_status[i].functions & 0x04)) + continue; + /* check cardnr */ + if (cardnr != 0xFFFF && card != cardnr) + continue; + /* check domain */ + if (domain != 0xFFFF && dom != domain) + continue; + /* get cca info on this apqn */ + if (cca_get_info(card, dom, &ci, verify)) + continue; + /* current master key needs to be valid */ + if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2') + continue; + if (mktype == APKA_MK_SET && ci.cur_apka_mk_state != '2') + continue; + /* check min hardware type */ + if (minhwtype > 0 && minhwtype > ci.hwtype) + continue; + if (cur_mkvp || old_mkvp) { + /* check mkvps */ + curmatch = oldmatch = 0; + if (mktype == AES_MK_SET) { + if (cur_mkvp && cur_mkvp == ci.cur_aes_mkvp) + curmatch = 1; + if (old_mkvp && ci.old_aes_mk_state == '2' && + old_mkvp == ci.old_aes_mkvp) + oldmatch = 1; + } else { + if (cur_mkvp && cur_mkvp == ci.cur_apka_mkvp) curmatch = 1; - if (old_mkvp && ci.old_mk_state == '2' && - old_mkvp == ci.old_mkvp) + if (old_mkvp && ci.old_apka_mk_state == '2' && + old_mkvp == ci.old_apka_mkvp) oldmatch = 1; - if ((cur_mkvp || old_mkvp) && - (curmatch + oldmatch < 1)) - continue; } - /* apqn passed all filtering criterons */ - if (*apqns && n < *nr_apqns) - (*apqns)[n] = (((u16)card) << 16) | ((u16) dom); - n++; - } - /* loop 2nd time: array has been filled */ - if (*apqns) - break; - /* loop 1st time: have # of eligible apqns in n */ - if (!n) { - rc = -ENODEV; /* no eligible apqns found */ - break; - } - *nr_apqns = n; - /* allocate array to store n apqns into */ - *apqns = kmalloc_array(n, sizeof(u32), GFP_KERNEL); - if (!*apqns) { - rc = -ENOMEM; - break; + if (curmatch + oldmatch < 1) + continue; } - verify = 0; + /* apqn passed all filtering criterons, add to the array */ + if (_nr_apqns < 256) + _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom); + } + + /* nothing found ? */ + if (!_nr_apqns) { + kfree(_apqns); + rc = -ENODEV; + } else { + /* no re-allocation, simple return the _apqns array */ + *apqns = _apqns; + *nr_apqns = _nr_apqns; + rc = 0; } kvfree(device_status); diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h index 8b7a641671c9..e7105443d5cb 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.h +++ b/drivers/s390/crypto/zcrypt_ccamisc.h @@ -14,8 +14,9 @@ #include <asm/pkey.h> /* Key token types */ -#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */ -#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal key token */ +#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */ +#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal sym key token */ +#define TOKTYPE_CCA_INTERNAL_PKA 0x1f /* CCA internal asym key token */ /* For TOKTYPE_NON_CCA: */ #define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */ @@ -93,6 +94,31 @@ struct cipherkeytoken { u8 vdata[]; /* variable part data follows */ } __packed; +/* inside view of an CCA secure ECC private key */ +struct eccprivkeytoken { + u8 type; /* 0x1f for internal asym key token */ + u8 version; /* should be 0x00 */ + u16 len; /* total key token length in bytes */ + u8 res1[4]; + u8 secid; /* 0x20 for ECC priv key section marker */ + u8 secver; /* section version */ + u16 seclen; /* section length */ + u8 wtype; /* wrapping method, 0x00 clear, 0x01 AES */ + u8 htype; /* hash method, 0x02 for SHA-256 */ + u8 res2[2]; + u8 kutc; /* key usage and translation control */ + u8 ctype; /* curve type */ + u8 kfs; /* key format and security */ + u8 ksrc; /* key source */ + u16 pbitlen; /* length of prime p in bits */ + u16 ibmadlen; /* IBM associated data length in bytes */ + u64 mkvp; /* master key verification pattern */ + u8 opk[48]; /* encrypted object protection key data */ + u16 adatalen; /* associated data length in bytes */ + u16 fseclen; /* formated section length in bytes */ + u8 more_data[]; /* more data follows */ +} __packed; + /* Some defines for the CCA AES cipherkeytoken kmf1 field */ #define KMF1_XPRT_SYM 0x8000 #define KMF1_XPRT_UASY 0x4000 @@ -123,6 +149,14 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl, int checkcpacfexport); /* + * Simple check if the token is a valid CCA secure ECC private + * key token. Returns 0 on success or errno value on failure. + */ +int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, + const u8 *token, size_t keysize, + int checkcpacfexport); + +/* * Generate (random) CCA AES DATA secure key. */ int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey); @@ -159,6 +193,12 @@ int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, const u8 *clrkey, u8 *keybuf, size_t *keybufsize); /* + * Derive proteced key from CCA ECC secure private key. + */ +int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, + u8 *protkey, u32 *protkeylen, u32 *protkeytype); + +/* * Query cryptographic facility from CCA adapter */ int cca_query_crypto_facility(u16 cardnr, u16 domain, @@ -186,6 +226,8 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); * - if verify is enabled and a cur_mkvp and/or old_mkvp * value is given, then refetch the cca_info and make sure the current * cur_mkvp or old_mkvp values of the apqn are used. + * The mktype determines which set of master keys to use: + * 0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set * The array of apqn entries is allocated with kmalloc and returned in *apqns; * the number of apqns stored into the list is returned in *nr_apqns. One apqn * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and @@ -194,18 +236,28 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); * -ENODEV is returned. */ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, - int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify); + int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, + int verify); + +#define AES_MK_SET 0 +#define APKA_MK_SET 1 /* struct to hold info for each CCA queue */ struct cca_info { - int hwtype; /* one of the defined AP_DEVICE_TYPE_* */ - char new_mk_state; /* '1' empty, '2' partially full, '3' full */ - char cur_mk_state; /* '1' invalid, '2' valid */ - char old_mk_state; /* '1' invalid, '2' valid */ - u64 new_mkvp; /* truncated sha256 hash of new master key */ - u64 cur_mkvp; /* truncated sha256 hash of current master key */ - u64 old_mkvp; /* truncated sha256 hash of old master key */ - char serial[9]; /* serial number string (8 ascii numbers + 0x00) */ + int hwtype; /* one of the defined AP_DEVICE_TYPE_* */ + char new_aes_mk_state; /* '1' empty, '2' partially full, '3' full */ + char cur_aes_mk_state; /* '1' invalid, '2' valid */ + char old_aes_mk_state; /* '1' invalid, '2' valid */ + char new_apka_mk_state; /* '1' empty, '2' partially full, '3' full */ + char cur_apka_mk_state; /* '1' invalid, '2' valid */ + char old_apka_mk_state; /* '1' invalid, '2' valid */ + u64 new_aes_mkvp; /* truncated sha256 of new aes master key */ + u64 cur_aes_mkvp; /* truncated sha256 of current aes master key */ + u64 old_aes_mkvp; /* truncated sha256 of old aes master key */ + u64 new_apka_mkvp; /* truncated sha256 of new apka master key */ + u64 cur_apka_mkvp; /* truncated sha256 of current apka mk */ + u64 old_apka_mkvp; /* truncated sha256 of old apka mk */ + char serial[9]; /* serial number (8 ascii numbers + 0x00) */ }; /* diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index b447f3e9e4a2..226a5612e855 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -94,8 +94,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev) if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) { zc->min_mod_size = CEX2A_MIN_MOD_SIZE; zc->max_mod_size = CEX2A_MAX_MOD_SIZE; - memcpy(zc->speed_rating, CEX2A_SPEED_IDX, - sizeof(CEX2A_SPEED_IDX)); + zc->speed_rating = CEX2A_SPEED_IDX; zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; zc->type_string = "CEX2A"; zc->user_space_type = ZCRYPT_CEX2A; @@ -108,8 +107,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev) zc->max_mod_size = CEX3A_MAX_MOD_SIZE; zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; } - memcpy(zc->speed_rating, CEX3A_SPEED_IDX, - sizeof(CEX3A_SPEED_IDX)); + zc->speed_rating = CEX3A_SPEED_IDX; zc->type_string = "CEX3A"; zc->user_space_type = ZCRYPT_CEX3A; } else { diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c index f00127a78bab..7a8cbdbe4408 100644 --- a/drivers/s390/crypto/zcrypt_cex2c.c +++ b/drivers/s390/crypto/zcrypt_cex2c.c @@ -109,26 +109,53 @@ static ssize_t cca_mkvps_show(struct device *dev, AP_QID_QUEUE(zq->queue->qid), &ci, zq->online); - if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3') + if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3') n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n", - new_state[ci.new_mk_state - '1'], ci.new_mkvp); + new_state[ci.new_aes_mk_state - '1'], + ci.new_aes_mkvp); else n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n"); - if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2') + if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2') n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: %s 0x%016llx\n", - cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp); + cao_state[ci.cur_aes_mk_state - '1'], + ci.cur_aes_mkvp); else n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n"); - if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2') + if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2') n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: %s 0x%016llx\n", - cao_state[ci.old_mk_state - '1'], ci.old_mkvp); + cao_state[ci.old_aes_mk_state - '1'], + ci.old_aes_mkvp); else n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n"); + if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3') + n += scnprintf(buf + n, PAGE_SIZE - n, + "APKA NEW: %s 0x%016llx\n", + new_state[ci.new_apka_mk_state - '1'], + ci.new_apka_mkvp); + else + n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n"); + + if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2') + n += scnprintf(buf + n, PAGE_SIZE - n, + "APKA CUR: %s 0x%016llx\n", + cao_state[ci.cur_apka_mk_state - '1'], + ci.cur_apka_mkvp); + else + n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n"); + + if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2') + n += scnprintf(buf + n, PAGE_SIZE - n, + "APKA OLD: %s 0x%016llx\n", + cao_state[ci.old_apka_mk_state - '1'], + ci.old_apka_mkvp); + else + n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n"); + return n; } @@ -239,8 +266,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev) case AP_DEVICE_TYPE_CEX2C: zc->user_space_type = ZCRYPT_CEX2C; zc->type_string = "CEX2C"; - memcpy(zc->speed_rating, CEX2C_SPEED_IDX, - sizeof(CEX2C_SPEED_IDX)); + zc->speed_rating = CEX2C_SPEED_IDX; zc->min_mod_size = CEX2C_MIN_MOD_SIZE; zc->max_mod_size = CEX2C_MAX_MOD_SIZE; zc->max_exp_bit_length = CEX2C_MAX_MOD_SIZE; @@ -248,8 +274,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev) case AP_DEVICE_TYPE_CEX3C: zc->user_space_type = ZCRYPT_CEX3C; zc->type_string = "CEX3C"; - memcpy(zc->speed_rating, CEX3C_SPEED_IDX, - sizeof(CEX3C_SPEED_IDX)); + zc->speed_rating = CEX3C_SPEED_IDX; zc->min_mod_size = CEX3C_MIN_MOD_SIZE; zc->max_mod_size = CEX3C_MAX_MOD_SIZE; zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index dc20d983e468..f5195bca1d85 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -121,26 +121,53 @@ static ssize_t cca_mkvps_show(struct device *dev, AP_QID_QUEUE(zq->queue->qid), &ci, zq->online); - if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3') + if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3') n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n", - new_state[ci.new_mk_state - '1'], ci.new_mkvp); + new_state[ci.new_aes_mk_state - '1'], + ci.new_aes_mkvp); else n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n"); - if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2') + if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2') n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: %s 0x%016llx\n", - cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp); + cao_state[ci.cur_aes_mk_state - '1'], + ci.cur_aes_mkvp); else n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n"); - if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2') + if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2') n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: %s 0x%016llx\n", - cao_state[ci.old_mk_state - '1'], ci.old_mkvp); + cao_state[ci.old_aes_mk_state - '1'], + ci.old_aes_mkvp); else n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n"); + if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3') + n += scnprintf(buf + n, PAGE_SIZE - n, + "APKA NEW: %s 0x%016llx\n", + new_state[ci.new_apka_mk_state - '1'], + ci.new_apka_mkvp); + else + n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n"); + + if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2') + n += scnprintf(buf + n, PAGE_SIZE - n, + "APKA CUR: %s 0x%016llx\n", + cao_state[ci.cur_apka_mk_state - '1'], + ci.cur_apka_mkvp); + else + n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n"); + + if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2') + n += scnprintf(buf + n, PAGE_SIZE - n, + "APKA OLD: %s 0x%016llx\n", + cao_state[ci.old_apka_mk_state - '1'], + ci.old_apka_mkvp); + else + n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n"); + return n; } @@ -382,31 +409,31 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) * Normalized speed ratings per crypto adapter * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY */ - static const int CEX4A_SPEED_IDX[] = { + static const int CEX4A_SPEED_IDX[NUM_OPS] = { 14, 19, 249, 42, 228, 1458, 0, 0}; - static const int CEX5A_SPEED_IDX[] = { + static const int CEX5A_SPEED_IDX[NUM_OPS] = { 8, 9, 20, 18, 66, 458, 0, 0}; - static const int CEX6A_SPEED_IDX[] = { + static const int CEX6A_SPEED_IDX[NUM_OPS] = { 6, 9, 20, 17, 65, 438, 0, 0}; - static const int CEX7A_SPEED_IDX[] = { + static const int CEX7A_SPEED_IDX[NUM_OPS] = { 6, 8, 17, 15, 54, 362, 0, 0}; - static const int CEX4C_SPEED_IDX[] = { + static const int CEX4C_SPEED_IDX[NUM_OPS] = { 59, 69, 308, 83, 278, 2204, 209, 40}; static const int CEX5C_SPEED_IDX[] = { 24, 31, 50, 37, 90, 479, 27, 10}; - static const int CEX6C_SPEED_IDX[] = { + static const int CEX6C_SPEED_IDX[NUM_OPS] = { 16, 20, 32, 27, 77, 455, 24, 9}; - static const int CEX7C_SPEED_IDX[] = { + static const int CEX7C_SPEED_IDX[NUM_OPS] = { 14, 16, 26, 23, 64, 376, 23, 8}; - static const int CEX4P_SPEED_IDX[] = { + static const int CEX4P_SPEED_IDX[NUM_OPS] = { 0, 0, 0, 0, 0, 0, 0, 50}; - static const int CEX5P_SPEED_IDX[] = { + static const int CEX5P_SPEED_IDX[NUM_OPS] = { 0, 0, 0, 0, 0, 0, 0, 10}; - static const int CEX6P_SPEED_IDX[] = { + static const int CEX6P_SPEED_IDX[NUM_OPS] = { 0, 0, 0, 0, 0, 0, 0, 9}; - static const int CEX7P_SPEED_IDX[] = { + static const int CEX7P_SPEED_IDX[NUM_OPS] = { 0, 0, 0, 0, 0, 0, 0, 8}; struct ap_card *ac = to_ap_card(&ap_dev->device); @@ -422,26 +449,22 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { zc->type_string = "CEX4A"; zc->user_space_type = ZCRYPT_CEX4; - memcpy(zc->speed_rating, CEX4A_SPEED_IDX, - sizeof(CEX4A_SPEED_IDX)); + zc->speed_rating = CEX4A_SPEED_IDX; } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) { zc->type_string = "CEX5A"; zc->user_space_type = ZCRYPT_CEX5; - memcpy(zc->speed_rating, CEX5A_SPEED_IDX, - sizeof(CEX5A_SPEED_IDX)); + zc->speed_rating = CEX5A_SPEED_IDX; } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { zc->type_string = "CEX6A"; zc->user_space_type = ZCRYPT_CEX6; - memcpy(zc->speed_rating, CEX6A_SPEED_IDX, - sizeof(CEX6A_SPEED_IDX)); + zc->speed_rating = CEX6A_SPEED_IDX; } else { zc->type_string = "CEX7A"; /* wrong user space type, just for compatibility * with the ZCRYPT_STATUS_MASK ioctl. */ zc->user_space_type = ZCRYPT_CEX6; - memcpy(zc->speed_rating, CEX7A_SPEED_IDX, - sizeof(CEX7A_SPEED_IDX)); + zc->speed_rating = CEX7A_SPEED_IDX; } zc->min_mod_size = CEX4A_MIN_MOD_SIZE; if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && @@ -461,32 +484,28 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) * just keep it for cca compatibility */ zc->user_space_type = ZCRYPT_CEX3C; - memcpy(zc->speed_rating, CEX4C_SPEED_IDX, - sizeof(CEX4C_SPEED_IDX)); + zc->speed_rating = CEX4C_SPEED_IDX; } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) { zc->type_string = "CEX5C"; /* wrong user space type, must be CEX5 * just keep it for cca compatibility */ zc->user_space_type = ZCRYPT_CEX3C; - memcpy(zc->speed_rating, CEX5C_SPEED_IDX, - sizeof(CEX5C_SPEED_IDX)); + zc->speed_rating = CEX5C_SPEED_IDX; } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { zc->type_string = "CEX6C"; /* wrong user space type, must be CEX6 * just keep it for cca compatibility */ zc->user_space_type = ZCRYPT_CEX3C; - memcpy(zc->speed_rating, CEX6C_SPEED_IDX, - sizeof(CEX6C_SPEED_IDX)); + zc->speed_rating = CEX6C_SPEED_IDX; } else { zc->type_string = "CEX7C"; /* wrong user space type, must be CEX7 * just keep it for cca compatibility */ zc->user_space_type = ZCRYPT_CEX3C; - memcpy(zc->speed_rating, CEX7C_SPEED_IDX, - sizeof(CEX7C_SPEED_IDX)); + zc->speed_rating = CEX7C_SPEED_IDX; } zc->min_mod_size = CEX4C_MIN_MOD_SIZE; zc->max_mod_size = CEX4C_MAX_MOD_SIZE; @@ -495,26 +514,22 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { zc->type_string = "CEX4P"; zc->user_space_type = ZCRYPT_CEX4; - memcpy(zc->speed_rating, CEX4P_SPEED_IDX, - sizeof(CEX4P_SPEED_IDX)); + zc->speed_rating = CEX4P_SPEED_IDX; } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) { zc->type_string = "CEX5P"; zc->user_space_type = ZCRYPT_CEX5; - memcpy(zc->speed_rating, CEX5P_SPEED_IDX, - sizeof(CEX5P_SPEED_IDX)); + zc->speed_rating = CEX5P_SPEED_IDX; } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { zc->type_string = "CEX6P"; zc->user_space_type = ZCRYPT_CEX6; - memcpy(zc->speed_rating, CEX6P_SPEED_IDX, - sizeof(CEX6P_SPEED_IDX)); + zc->speed_rating = CEX6P_SPEED_IDX; } else { zc->type_string = "CEX7P"; /* wrong user space type, just for compatibility * with the ZCRYPT_STATUS_MASK ioctl. */ zc->user_space_type = ZCRYPT_CEX6; - memcpy(zc->speed_rating, CEX7P_SPEED_IDX, - sizeof(CEX7P_SPEED_IDX)); + zc->speed_rating = CEX7P_SPEED_IDX; } zc->min_mod_size = CEX4C_MIN_MOD_SIZE; zc->max_mod_size = CEX4C_MAX_MOD_SIZE; diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h index 241dbb5f75bf..3225489a1c41 100644 --- a/drivers/s390/crypto/zcrypt_debug.h +++ b/drivers/s390/crypto/zcrypt_debug.h @@ -21,6 +21,14 @@ #define ZCRYPT_DBF(...) \ debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__) +#define ZCRYPT_DBF_ERR(...) \ + debug_sprintf_event(zcrypt_dbf_info, DBF_ERR, ##__VA_ARGS__) +#define ZCRYPT_DBF_WARN(...) \ + debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__) +#define ZCRYPT_DBF_INFO(...) \ + debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__) +#define ZCRYPT_DBF_DBG(...) \ + debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__) extern debug_info_t *zcrypt_dbf_info; diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index 3c3d403abe92..9ce5a71da69b 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -15,6 +15,7 @@ #include <linux/random.h> #include <asm/zcrypt.h> #include <asm/pkey.h> +#include <crypto/aes.h> #include "ap_bus.h" #include "zcrypt_api.h" @@ -113,79 +114,199 @@ static void __exit card_cache_free(void) } /* - * Simple check if the key blob is a valid EP11 secure AES key. + * Simple check if the key blob is a valid EP11 AES key blob with header. */ -int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl, - const u8 *key, int keybitsize, - int checkcpacfexport) +int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, + const u8 *key, size_t keylen, int checkcpacfexp) { - struct ep11keyblob *kb = (struct ep11keyblob *) key; + struct ep11kblob_header *hdr = (struct ep11kblob_header *) key; + struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr)); #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) - if (kb->head.type != TOKTYPE_NON_CCA) { + if (keylen < sizeof(*hdr) + sizeof(*kb)) { + DBF("%s key check failed, keylen %zu < %zu\n", + __func__, keylen, sizeof(*hdr) + sizeof(*kb)); + return -EINVAL; + } + + if (hdr->type != TOKTYPE_NON_CCA) { if (dbg) DBF("%s key check failed, type 0x%02x != 0x%02x\n", - __func__, (int) kb->head.type, TOKTYPE_NON_CCA); + __func__, (int) hdr->type, TOKTYPE_NON_CCA); return -EINVAL; } - if (kb->head.version != TOKVER_EP11_AES) { + if (hdr->hver != 0x00) { + if (dbg) + DBF("%s key check failed, header version 0x%02x != 0x00\n", + __func__, (int) hdr->hver); + return -EINVAL; + } + if (hdr->version != TOKVER_EP11_AES_WITH_HEADER) { if (dbg) DBF("%s key check failed, version 0x%02x != 0x%02x\n", - __func__, (int) kb->head.version, TOKVER_EP11_AES); + __func__, (int) hdr->version, TOKVER_EP11_AES_WITH_HEADER); + return -EINVAL; + } + if (hdr->len > keylen) { + if (dbg) + DBF("%s key check failed, header len %d keylen %zu mismatch\n", + __func__, (int) hdr->len, keylen); + return -EINVAL; + } + if (hdr->len < sizeof(*hdr) + sizeof(*kb)) { + if (dbg) + DBF("%s key check failed, header len %d < %zu\n", + __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb)); return -EINVAL; } + if (kb->version != EP11_STRUCT_MAGIC) { if (dbg) - DBF("%s key check failed, magic 0x%04x != 0x%04x\n", + DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", __func__, (int) kb->version, EP11_STRUCT_MAGIC); return -EINVAL; } - switch (kb->head.keybitlen) { - case 128: - case 192: - case 256: - break; - default: + if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { if (dbg) - DBF("%s key check failed, keybitlen %d invalid\n", - __func__, (int) kb->head.keybitlen); + DBF("%s key check failed, PKEY_EXTRACTABLE is off\n", + __func__); return -EINVAL; } - if (keybitsize > 0 && keybitsize != (int) kb->head.keybitlen) { - DBF("%s key check failed, keybitsize %d\n", - __func__, keybitsize); + +#undef DBF + + return 0; +} +EXPORT_SYMBOL(ep11_check_aes_key_with_hdr); + +/* + * Simple check if the key blob is a valid EP11 ECC key blob with header. + */ +int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, + const u8 *key, size_t keylen, int checkcpacfexp) +{ + struct ep11kblob_header *hdr = (struct ep11kblob_header *) key; + struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr)); + +#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) + + if (keylen < sizeof(*hdr) + sizeof(*kb)) { + DBF("%s key check failed, keylen %zu < %zu\n", + __func__, keylen, sizeof(*hdr) + sizeof(*kb)); + return -EINVAL; + } + + if (hdr->type != TOKTYPE_NON_CCA) { + if (dbg) + DBF("%s key check failed, type 0x%02x != 0x%02x\n", + __func__, (int) hdr->type, TOKTYPE_NON_CCA); + return -EINVAL; + } + if (hdr->hver != 0x00) { + if (dbg) + DBF("%s key check failed, header version 0x%02x != 0x00\n", + __func__, (int) hdr->hver); + return -EINVAL; + } + if (hdr->version != TOKVER_EP11_ECC_WITH_HEADER) { + if (dbg) + DBF("%s key check failed, version 0x%02x != 0x%02x\n", + __func__, (int) hdr->version, TOKVER_EP11_ECC_WITH_HEADER); return -EINVAL; } - if (checkcpacfexport && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { + if (hdr->len > keylen) { if (dbg) - DBF("%s key check failed, PKEY_EXTRACTABLE is 0\n", + DBF("%s key check failed, header len %d keylen %zu mismatch\n", + __func__, (int) hdr->len, keylen); + return -EINVAL; + } + if (hdr->len < sizeof(*hdr) + sizeof(*kb)) { + if (dbg) + DBF("%s key check failed, header len %d < %zu\n", + __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb)); + return -EINVAL; + } + + if (kb->version != EP11_STRUCT_MAGIC) { + if (dbg) + DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", + __func__, (int) kb->version, EP11_STRUCT_MAGIC); + return -EINVAL; + } + if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { + if (dbg) + DBF("%s key check failed, PKEY_EXTRACTABLE is off\n", __func__); return -EINVAL; } + #undef DBF return 0; } -EXPORT_SYMBOL(ep11_check_aeskeyblob); +EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr); /* - * Helper function which calls zcrypt_send_ep11_cprb with - * memory management segment adjusted to kernel space - * so that the copy_from_user called within this - * function do in fact copy from kernel space. + * Simple check if the key blob is a valid EP11 AES key blob with + * the header in the session field (old style EP11 AES key). */ -static inline int _zcrypt_send_ep11_cprb(struct ep11_urb *urb) +int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, + const u8 *key, size_t keylen, int checkcpacfexp) { - int rc; - mm_segment_t old_fs = get_fs(); + struct ep11keyblob *kb = (struct ep11keyblob *) key; - set_fs(KERNEL_DS); - rc = zcrypt_send_ep11_cprb(urb); - set_fs(old_fs); +#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) - return rc; + if (keylen < sizeof(*kb)) { + DBF("%s key check failed, keylen %zu < %zu\n", + __func__, keylen, sizeof(*kb)); + return -EINVAL; + } + + if (kb->head.type != TOKTYPE_NON_CCA) { + if (dbg) + DBF("%s key check failed, type 0x%02x != 0x%02x\n", + __func__, (int) kb->head.type, TOKTYPE_NON_CCA); + return -EINVAL; + } + if (kb->head.version != TOKVER_EP11_AES) { + if (dbg) + DBF("%s key check failed, version 0x%02x != 0x%02x\n", + __func__, (int) kb->head.version, TOKVER_EP11_AES); + return -EINVAL; + } + if (kb->head.len > keylen) { + if (dbg) + DBF("%s key check failed, header len %d keylen %zu mismatch\n", + __func__, (int) kb->head.len, keylen); + return -EINVAL; + } + if (kb->head.len < sizeof(*kb)) { + if (dbg) + DBF("%s key check failed, header len %d < %zu\n", + __func__, (int) kb->head.len, sizeof(*kb)); + return -EINVAL; + } + + if (kb->version != EP11_STRUCT_MAGIC) { + if (dbg) + DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", + __func__, (int) kb->version, EP11_STRUCT_MAGIC); + return -EINVAL; + } + if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { + if (dbg) + DBF("%s key check failed, PKEY_EXTRACTABLE is off\n", + __func__); + return -EINVAL; + } + +#undef DBF + + return 0; } +EXPORT_SYMBOL(ep11_check_aes_key); /* * Allocate and prepare ep11 cprb plus additional payload. @@ -399,7 +520,7 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, req, sizeof(*req) + sizeof(*req_pl), rep, sizeof(*rep) + sizeof(*rep_pl) + buflen); - rc = _zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(urb); if (rc) { DEBUG_ERR( "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", @@ -637,7 +758,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, req, sizeof(*req) + sizeof(*req_pl), rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = _zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(urb); if (rc) { DEBUG_ERR( "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", @@ -757,7 +878,7 @@ static int ep11_cryptsingle(u16 card, u16 domain, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + rep_pl_size); - rc = _zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(urb); if (rc) { DEBUG_ERR( "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", @@ -905,7 +1026,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = _zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(urb); if (rc) { DEBUG_ERR( "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", @@ -972,7 +1093,7 @@ static int ep11_wrapkey(u16 card, u16 domain, u8 data_tag; u8 data_lenfmt; u16 data_len; - u8 data[512]; + u8 data[1024]; } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; @@ -980,8 +1101,17 @@ static int ep11_wrapkey(u16 card, u16 domain, struct ep11keyblob *kb; size_t req_pl_size; int api, rc = -ENOMEM; + bool has_header = false; u8 *p; + /* maybe the session field holds a header with key info */ + kb = (struct ep11keyblob *) key; + if (kb->head.type == TOKTYPE_NON_CCA && + kb->head.version == TOKVER_EP11_AES) { + has_header = true; + keysize = kb->head.len < keysize ? kb->head.len : keysize; + } + /* request cprb and payload */ req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keysize) + 4; @@ -1007,9 +1137,10 @@ static int ep11_wrapkey(u16 card, u16 domain, /* key blob */ p += asn1tag_write(p, 0x04, key, keysize); /* maybe the key argument needs the head data cleaned out */ - kb = (struct ep11keyblob *)(p - keysize); - if (kb->head.version == TOKVER_EP11_AES) + if (has_header) { + kb = (struct ep11keyblob *)(p - keysize); memset(&kb->head, 0, sizeof(kb->head)); + } /* empty kek tag */ *p++ = 0x04; *p++ = 0; @@ -1033,7 +1164,7 @@ static int ep11_wrapkey(u16 card, u16 domain, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = _zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(urb); if (rc) { DEBUG_ERR( "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", @@ -1132,12 +1263,12 @@ out: } EXPORT_SYMBOL(ep11_clr2keyblob); -int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) { int rc = -EIO; u8 *wkbuf = NULL; - size_t wkbuflen = 256; + size_t wkbuflen, keylen; struct wk_info { u16 version; u8 res1[16]; @@ -1147,8 +1278,33 @@ int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen, u8 res2[8]; u8 pkey[0]; } __packed * wki; + const u8 *key; + struct ep11kblob_header *hdr; + + /* key with or without header ? */ + hdr = (struct ep11kblob_header *) keyblob; + if (hdr->type == TOKTYPE_NON_CCA + && (hdr->version == TOKVER_EP11_AES_WITH_HEADER + || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) + && is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) { + /* EP11 AES or ECC key with header */ + key = keyblob + sizeof(struct ep11kblob_header); + keylen = hdr->len - sizeof(struct ep11kblob_header); + } else if (hdr->type == TOKTYPE_NON_CCA + && hdr->version == TOKVER_EP11_AES + && is_ep11_keyblob(keyblob)) { + /* EP11 AES key (old style) */ + key = keyblob; + keylen = hdr->len; + } else if (is_ep11_keyblob(keyblob)) { + /* raw EP11 key blob */ + key = keyblob; + keylen = keybloblen; + } else + return -EINVAL; /* alloc temp working buffer */ + wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1)); wkbuf = kmalloc(wkbuflen, GFP_ATOMIC); if (!wkbuf) return -ENOMEM; @@ -1165,46 +1321,68 @@ int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen, wki = (struct wk_info *) wkbuf; /* check struct version and pkey type */ - if (wki->version != 1 || wki->pkeytype != 1) { + if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) { DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n", __func__, (int) wki->version, (int) wki->pkeytype); rc = -EIO; goto out; } - /* copy the tanslated protected key */ - switch (wki->pkeysize) { - case 16+32: - /* AES 128 protected key */ - if (protkeytype) - *protkeytype = PKEY_KEYTYPE_AES_128; - break; - case 24+32: - /* AES 192 protected key */ - if (protkeytype) - *protkeytype = PKEY_KEYTYPE_AES_192; + /* check protected key type field */ + switch (wki->pkeytype) { + case 1: /* AES */ + switch (wki->pkeysize) { + case 16+32: + /* AES 128 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_128; + break; + case 24+32: + /* AES 192 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_192; + break; + case 32+32: + /* AES 256 protected key */ + if (protkeytype) + *protkeytype = PKEY_KEYTYPE_AES_256; + break; + default: + DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n", + __func__, (int) wki->pkeysize); + rc = -EIO; + goto out; + } break; - case 32+32: - /* AES 256 protected key */ + case 3: /* EC-P */ + case 4: /* EC-ED */ + case 5: /* EC-BP */ if (protkeytype) - *protkeytype = PKEY_KEYTYPE_AES_256; + *protkeytype = PKEY_KEYTYPE_ECC; break; + case 2: /* TDES */ default: - DEBUG_ERR("%s unknown/unsupported pkeysize %d\n", - __func__, (int) wki->pkeysize); + DEBUG_ERR("%s unknown/unsupported key type %d\n", + __func__, (int) wki->pkeytype); rc = -EIO; goto out; } + + /* copy the tanslated protected key */ + if (wki->pkeysize > *protkeylen) { + DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n", + __func__, wki->pkeysize, *protkeylen); + rc = -EINVAL; + goto out; + } memcpy(protkey, wki->pkey, wki->pkeysize); - if (protkeylen) - *protkeylen = (u32) wki->pkeysize; - rc = 0; + *protkeylen = wki->pkeysize; out: kfree(wkbuf); return rc; } -EXPORT_SYMBOL(ep11_key2protkey); +EXPORT_SYMBOL(ep11_kblob2protkey); int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, int minhwtype, int minapi, const u8 *wkvp) diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h index e3ed5ed1de86..1e02b197c003 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.h +++ b/drivers/s390/crypto/zcrypt_ep11misc.h @@ -12,22 +12,28 @@ #include <asm/zcrypt.h> #include <asm/pkey.h> -#define TOKVER_EP11_AES 0x03 /* EP11 AES key blob */ - #define EP11_API_V 4 /* highest known and supported EP11 API version */ - #define EP11_STRUCT_MAGIC 0x1234 -#define EP11_BLOB_PKEY_EXTRACTABLE 0x200000 +#define EP11_BLOB_PKEY_EXTRACTABLE 0x00200000 + +/* + * Internal used values for the version field of the key header. + * Should match to the enum pkey_key_type in pkey.h. + */ +#define TOKVER_EP11_AES 0x03 /* EP11 AES key blob (old style) */ +#define TOKVER_EP11_AES_WITH_HEADER 0x06 /* EP11 AES key blob with header */ +#define TOKVER_EP11_ECC_WITH_HEADER 0x07 /* EP11 ECC key blob with header */ /* inside view of an EP11 secure key blob */ struct ep11keyblob { union { u8 session[32]; + /* only used for PKEY_TYPE_EP11: */ struct { u8 type; /* 0x00 (TOKTYPE_NON_CCA) */ u8 res0; /* unused */ u16 len; /* total length in bytes of this blob */ - u8 version; /* 0x06 (TOKVER_EP11_AES) */ + u8 version; /* 0x03 (TOKVER_EP11_AES) */ u8 res1; /* unused */ u16 keybitlen; /* clear key bit len, 0 for unknown */ } head; @@ -41,16 +47,41 @@ struct ep11keyblob { u8 mac[32]; } __packed; +/* check ep11 key magic to find out if this is an ep11 key blob */ +static inline bool is_ep11_keyblob(const u8 *key) +{ + struct ep11keyblob *kb = (struct ep11keyblob *) key; + + return (kb->version == EP11_STRUCT_MAGIC); +} + +/* + * Simple check if the key blob is a valid EP11 AES key blob with header. + * If checkcpacfexport is enabled, the key is also checked for the + * attributes needed to export this key for CPACF use. + * Returns 0 on success or errno value on failure. + */ +int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, + const u8 *key, size_t keylen, int checkcpacfexp); + /* - * Simple check if the key blob is a valid EP11 secure AES key. - * If keybitsize is given, the bitsize of the key is also checked. + * Simple check if the key blob is a valid EP11 ECC key blob with header. * If checkcpacfexport is enabled, the key is also checked for the * attributes needed to export this key for CPACF use. * Returns 0 on success or errno value on failure. */ -int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl, - const u8 *key, int keybitsize, - int checkcpacfexport); +int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, + const u8 *key, size_t keylen, int checkcpacfexp); + +/* + * Simple check if the key blob is a valid EP11 AES key blob with + * the header in the session field (old style EP11 AES key). + * If checkcpacfexport is enabled, the key is also checked for the + * attributes needed to export this key for CPACF use. + * Returns 0 on success or errno value on failure. + */ +int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, + const u8 *key, size_t keylen, int checkcpacfexp); /* EP11 card info struct */ struct ep11_card_info { @@ -92,12 +123,6 @@ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, const u8 *clrkey, u8 *keybuf, size_t *keybufsize); /* - * Derive proteced key from EP11 AES secure key blob. - */ -int ep11_key2protkey(u16 cardnr, u16 domain, const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); - -/* * Build a list of ep11 apqns meeting the following constrains: * - apqn is online and is in fact an EP11 apqn * - if cardnr is not FFFF only apqns with this cardnr @@ -119,6 +144,12 @@ int ep11_key2protkey(u16 cardnr, u16 domain, const u8 *key, size_t keylen, int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, int minhwtype, int minapi, const u8 *wkvp); +/* + * Derive proteced key from EP11 key blob (AES and ECC keys). + */ +int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, size_t keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype); + void zcrypt_ep11misc_exit(void); #endif /* _ZCRYPT_EP11MISC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index 54a04f8c38ef..39e626e3a379 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h @@ -52,7 +52,6 @@ struct error_hdr { #define REP82_ERROR_INVALID_COMMAND 0x30 #define REP82_ERROR_MALFORMED_MSG 0x40 #define REP82_ERROR_INVALID_SPECIAL_CMD 0x41 -#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42 #define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */ #define REP82_ERROR_WORD_ALIGNMENT 0x60 #define REP82_ERROR_MESSAGE_LENGTH 0x80 @@ -67,7 +66,6 @@ struct error_hdr { #define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 #define REP88_ERROR_MODULE_FAILURE 0x10 - #define REP88_ERROR_MESSAGE_TYPE 0x20 #define REP88_ERROR_MESSAGE_MALFORMD 0x22 #define REP88_ERROR_MESSAGE_LENGTH 0x23 @@ -85,78 +83,56 @@ static inline int convert_error(struct zcrypt_queue *zq, int queue = AP_QID_QUEUE(zq->queue->qid); switch (ehdr->reply_code) { - case REP82_ERROR_OPERAND_INVALID: - case REP82_ERROR_OPERAND_SIZE: - case REP82_ERROR_EVEN_MOD_IN_OPND: - case REP88_ERROR_MESSAGE_MALFORMD: - case REP82_ERROR_INVALID_DOMAIN_PRECHECK: - case REP82_ERROR_INVALID_DOMAIN_PENDING: - case REP82_ERROR_INVALID_SPECIAL_CMD: - case REP82_ERROR_FILTERED_BY_HYPERVISOR: - // REP88_ERROR_INVALID_KEY // '82' CEX2A - // REP88_ERROR_OPERAND // '84' CEX2A - // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A - /* Invalid input data. */ + case REP82_ERROR_INVALID_MSG_LEN: /* 0x23 */ + case REP82_ERROR_RESERVD_FIELD: /* 0x24 */ + case REP82_ERROR_FORMAT_FIELD: /* 0x29 */ + case REP82_ERROR_MALFORMED_MSG: /* 0x40 */ + case REP82_ERROR_INVALID_SPECIAL_CMD: /* 0x41 */ + case REP82_ERROR_MESSAGE_LENGTH: /* 0x80 */ + case REP82_ERROR_OPERAND_INVALID: /* 0x82 */ + case REP82_ERROR_OPERAND_SIZE: /* 0x84 */ + case REP82_ERROR_EVEN_MOD_IN_OPND: /* 0x85 */ + case REP82_ERROR_INVALID_DOMAIN_PENDING: /* 0x8A */ + case REP82_ERROR_FILTERED_BY_HYPERVISOR: /* 0x8B */ + case REP82_ERROR_PACKET_TRUNCATED: /* 0xA0 */ + case REP88_ERROR_MESSAGE_MALFORMD: /* 0x22 */ + case REP88_ERROR_KEY_TYPE: /* 0x34 */ + /* RY indicates malformed request */ ZCRYPT_DBF(DBF_WARN, - "device=%02x.%04x reply=0x%02x => rc=EINVAL\n", + "dev=%02x.%04x RY=0x%02x => rc=EINVAL\n", card, queue, ehdr->reply_code); return -EINVAL; - case REP82_ERROR_MESSAGE_TYPE: - // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A + case REP82_ERROR_MACHINE_FAILURE: /* 0x10 */ + case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */ + case REP82_ERROR_TRANSPORT_FAIL: /* 0x90 */ /* - * To sent a message of the wrong type is a bug in the - * device driver. Send error msg, disable the device - * and then repeat the request. + * Msg to wrong type or card/infrastructure failure. + * Trigger rescan of the ap bus, trigger retry request. */ atomic_set(&zcrypt_rescan_req, 1); - zq->online = 0; - pr_err("Cryptographic device %02x.%04x failed and was set offline\n", - card, queue); - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", - card, queue, ehdr->reply_code); - return -EAGAIN; - case REP82_ERROR_TRANSPORT_FAIL: - /* Card or infrastructure failure, disable card */ - atomic_set(&zcrypt_rescan_req, 1); - zq->online = 0; - pr_err("Cryptographic device %02x.%04x failed and was set offline\n", - card, queue); /* For type 86 response show the apfs value (failure reason) */ - if (ehdr->type == TYPE86_RSP_CODE) { + if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL && + ehdr->type == TYPE86_RSP_CODE) { struct { struct type86_hdr hdr; struct type86_fmt2_ext fmt2; } __packed * head = reply->msg; unsigned int apfs = *((u32 *)head->fmt2.apfs); - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x reply=0x%02x apfs=0x%x => online=0 rc=EAGAIN\n", - card, queue, apfs, ehdr->reply_code); + ZCRYPT_DBF(DBF_WARN, + "dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n", + card, queue, ehdr->reply_code, apfs); } else - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", + ZCRYPT_DBF(DBF_WARN, + "dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n", card, queue, ehdr->reply_code); return -EAGAIN; - case REP82_ERROR_MACHINE_FAILURE: - // REP88_ERROR_MODULE_FAILURE // '10' CEX2A - /* If a card fails disable it and repeat the request. */ - atomic_set(&zcrypt_rescan_req, 1); - zq->online = 0; - pr_err("Cryptographic device %02x.%04x failed and was set offline\n", - card, queue); - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", - card, queue, ehdr->reply_code); - return -EAGAIN; default: - zq->online = 0; - pr_err("Cryptographic device %02x.%04x failed and was set offline\n", - card, queue); - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", + /* Assume request is valid and a retry will be worth it */ + ZCRYPT_DBF(DBF_WARN, + "dev=%02x.%04x RY=0x%02x => rc=EAGAIN\n", card, queue, ehdr->reply_code); - return -EAGAIN; /* repeat the request on a different device. */ + return -EAGAIN; } } diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index 7aedc338b445..bf14ee445f89 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -246,6 +246,12 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq, copy_from_user(exp, mex->b_key, mod_len) || copy_from_user(inp, mex->inputdata, mod_len)) return -EFAULT; + +#ifdef CONFIG_ZCRYPT_DEBUG + if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL) + ap_msg->flags ^= AP_MSG_FLAG_SPECIAL; +#endif + return 0; } @@ -332,6 +338,11 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq, copy_from_user(inp, crt->inputdata, mod_len)) return -EFAULT; +#ifdef CONFIG_ZCRYPT_DEBUG + if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL) + ap_msg->flags ^= AP_MSG_FLAG_SPECIAL; +#endif + return 0; } @@ -356,15 +367,15 @@ static int convert_type80(struct zcrypt_queue *zq, if (t80h->len < sizeof(*t80h) + outputdatalength) { /* The result is too short, the CEXxA card may not do that.. */ zq->online = 0; - pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + pr_err("Crypto dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid)); - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n", - AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), - t80h->code); - return -EAGAIN; /* repeat the request on a different device. */ + AP_QID_QUEUE(zq->queue->qid), + t80h->code); + ZCRYPT_DBF_ERR("dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + t80h->code); + return -EAGAIN; } if (zq->zcard->user_space_type == ZCRYPT_CEX2A) BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); @@ -376,10 +387,10 @@ static int convert_type80(struct zcrypt_queue *zq, return 0; } -static int convert_response(struct zcrypt_queue *zq, - struct ap_message *reply, - char __user *outputdata, - unsigned int outputdatalength) +static int convert_response_cex2a(struct zcrypt_queue *zq, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) { /* Response type byte is the second byte in the response. */ unsigned char rtype = ((unsigned char *) reply->msg)[1]; @@ -393,15 +404,15 @@ static int convert_response(struct zcrypt_queue *zq, outputdata, outputdatalength); default: /* Unknown response type, this should NEVER EVER happen */ zq->online = 0; - pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid)); - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", - AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), - (unsigned int) rtype); - return -EAGAIN; /* repeat the request on a different device. */ + AP_QID_QUEUE(zq->queue->qid), + (int) rtype); + ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) rtype); + return -EAGAIN; } } @@ -450,39 +461,41 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0); * @mex: pointer to the modexpo request buffer */ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq, - struct ica_rsa_modexpo *mex) + struct ica_rsa_modexpo *mex, + struct ap_message *ap_msg) { - struct ap_message ap_msg; struct completion work; int rc; - ap_init_message(&ap_msg); if (zq->zcard->user_space_type == ZCRYPT_CEX2A) - ap_msg.msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL); + ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL); else - ap_msg.msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL); - if (!ap_msg.msg) + ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg->msg) return -ENOMEM; - ap_msg.receive = zcrypt_cex2a_receive; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &work; - rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex); + ap_msg->receive = zcrypt_cex2a_receive; + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg->private = &work; + rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex); if (rc) - goto out_free; + goto out; init_completion(&work); - ap_queue_message(zq->queue, &ap_msg); + rc = ap_queue_message(zq->queue, ap_msg); + if (rc) + goto out; rc = wait_for_completion_interruptible(&work); if (rc == 0) { - rc = ap_msg.rc; + rc = ap_msg->rc; if (rc == 0) - rc = convert_response(zq, &ap_msg, mex->outputdata, - mex->outputdatalength); + rc = convert_response_cex2a(zq, ap_msg, + mex->outputdata, + mex->outputdatalength); } else /* Signal pending. */ - ap_cancel_message(zq->queue, &ap_msg); -out_free: - kfree(ap_msg.msg); + ap_cancel_message(zq->queue, ap_msg); +out: + ap_msg->private = NULL; return rc; } @@ -494,39 +507,41 @@ out_free: * @crt: pointer to the modexpoc_crt request buffer */ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq, - struct ica_rsa_modexpo_crt *crt) + struct ica_rsa_modexpo_crt *crt, + struct ap_message *ap_msg) { - struct ap_message ap_msg; struct completion work; int rc; - ap_init_message(&ap_msg); if (zq->zcard->user_space_type == ZCRYPT_CEX2A) - ap_msg.msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL); + ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL); else - ap_msg.msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL); - if (!ap_msg.msg) + ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg->msg) return -ENOMEM; - ap_msg.receive = zcrypt_cex2a_receive; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &work; - rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt); + ap_msg->receive = zcrypt_cex2a_receive; + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg->private = &work; + rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt); if (rc) - goto out_free; + goto out; init_completion(&work); - ap_queue_message(zq->queue, &ap_msg); + rc = ap_queue_message(zq->queue, ap_msg); + if (rc) + goto out; rc = wait_for_completion_interruptible(&work); if (rc == 0) { - rc = ap_msg.rc; + rc = ap_msg->rc; if (rc == 0) - rc = convert_response(zq, &ap_msg, crt->outputdata, - crt->outputdatalength); + rc = convert_response_cex2a(zq, ap_msg, + crt->outputdata, + crt->outputdatalength); } else /* Signal pending. */ - ap_cancel_message(zq->queue, &ap_msg); -out_free: - kfree(ap_msg.msg); + ap_cancel_message(zq->queue, ap_msg); +out: + ap_msg->private = NULL; return rc; } diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index d77991c74c25..307f90657d1d 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -388,7 +388,7 @@ struct type86_fmt2_msg { struct type86_fmt2_ext fmt2; } __packed; -static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg, +static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg, struct ica_xcRB *xcRB, unsigned int *fcode, unsigned short **dom) @@ -465,8 +465,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg, msg->hdr.FromCardLen2 = xcRB->reply_data_length; /* prepare CPRB */ - if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr, - xcRB->request_control_blk_length)) + if (z_copy_from_user(userspace, &(msg->cprbx), xcRB->request_control_blk_addr, + xcRB->request_control_blk_length)) return -EFAULT; if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > xcRB->request_control_blk_length) @@ -482,18 +482,23 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg, || memcmp(function_code, "AU", 2) == 0) ap_msg->flags |= AP_MSG_FLAG_SPECIAL; +#ifdef CONFIG_ZCRYPT_DEBUG + if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL) + ap_msg->flags ^= AP_MSG_FLAG_SPECIAL; +#endif + /* copy data block */ if (xcRB->request_data_length && - copy_from_user(req_data, xcRB->request_data_address, - xcRB->request_data_length)) + z_copy_from_user(userspace, req_data, xcRB->request_data_address, + xcRB->request_data_length)) return -EFAULT; return 0; } -static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg, - struct ep11_urb *xcRB, - unsigned int *fcode) +static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg, + struct ep11_urb *xcRB, + unsigned int *fcode) { unsigned int lfmt; static struct type6_hdr static_type6_ep11_hdr = { @@ -543,8 +548,8 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg, msg->hdr.FromCardLen1 = xcRB->resp_len; /* Import CPRB data from the ioctl input parameter */ - if (copy_from_user(&(msg->cprbx.cprb_len), - (char __force __user *)xcRB->req, xcRB->req_len)) { + if (z_copy_from_user(userspace, &(msg->cprbx.cprb_len), + (char __force __user *)xcRB->req, xcRB->req_len)) { return -EFAULT; } @@ -569,6 +574,11 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg, if (msg->cprbx.flags & 0x20) ap_msg->flags |= AP_MSG_FLAG_SPECIAL; +#ifdef CONFIG_ZCRYPT_DEBUG + if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL) + ap_msg->flags ^= AP_MSG_FLAG_SPECIAL; +#endif + return 0; } @@ -650,23 +660,22 @@ static int convert_type86_ica(struct zcrypt_queue *zq, (service_rc == 8 && service_rs == 72) || (service_rc == 8 && service_rs == 770) || (service_rc == 12 && service_rs == 769)) { - ZCRYPT_DBF(DBF_DEBUG, - "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n", - AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), - (int) service_rc, (int) service_rs); + ZCRYPT_DBF_WARN("dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) service_rc, (int) service_rs); return -EINVAL; } zq->online = 0; - pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid)); - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n", - AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), - (int) service_rc, (int) service_rs); - return -EAGAIN; /* repeat the request on a different device. */ + AP_QID_QUEUE(zq->queue->qid), + (int) service_rc, (int) service_rs); + ZCRYPT_DBF_ERR("dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) service_rc, (int) service_rs); + return -EAGAIN; } data = msg->text; reply_len = msg->length - 2; @@ -707,7 +716,7 @@ static int convert_type86_ica(struct zcrypt_queue *zq, * * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. */ -static int convert_type86_xcrb(struct zcrypt_queue *zq, +static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq, struct ap_message *reply, struct ica_xcRB *xcRB) { @@ -715,15 +724,15 @@ static int convert_type86_xcrb(struct zcrypt_queue *zq, char *data = reply->msg; /* Copy CPRB to user */ - if (copy_to_user(xcRB->reply_control_blk_addr, - data + msg->fmt2.offset1, msg->fmt2.count1)) + if (z_copy_to_user(userspace, xcRB->reply_control_blk_addr, + data + msg->fmt2.offset1, msg->fmt2.count1)) return -EFAULT; xcRB->reply_control_blk_length = msg->fmt2.count1; /* Copy data buffer to user */ if (msg->fmt2.count2) - if (copy_to_user(xcRB->reply_data_addr, - data + msg->fmt2.offset2, msg->fmt2.count2)) + if (z_copy_to_user(userspace, xcRB->reply_data_addr, + data + msg->fmt2.offset2, msg->fmt2.count2)) return -EFAULT; xcRB->reply_data_length = msg->fmt2.count2; return 0; @@ -738,7 +747,7 @@ static int convert_type86_xcrb(struct zcrypt_queue *zq, * * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. */ -static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq, +static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, struct ap_message *reply, struct ep11_urb *xcRB) { @@ -749,8 +758,8 @@ static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq, return -EINVAL; /* Copy response CPRB to user */ - if (copy_to_user((char __force __user *)xcRB->resp, - data + msg->fmt2.offset1, msg->fmt2.count1)) + if (z_copy_to_user(userspace, (char __force __user *)xcRB->resp, + data + msg->fmt2.offset1, msg->fmt2.count1)) return -EFAULT; xcRB->resp_len = msg->fmt2.count1; return 0; @@ -800,23 +809,24 @@ static int convert_response_ica(struct zcrypt_queue *zq, return convert_type86_ica(zq, reply, outputdata, outputdatalength); fallthrough; /* wrong cprb version is an unknown response */ - default: /* Unknown response type, this should NEVER EVER happen */ + default: + /* Unknown response type, this should NEVER EVER happen */ zq->online = 0; - pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid)); - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", - AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), - (int) msg->hdr.type); - return -EAGAIN; /* repeat the request on a different device. */ + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + return -EAGAIN; } } -static int convert_response_xcrb(struct zcrypt_queue *zq, - struct ap_message *reply, - struct ica_xcRB *xcRB) +static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq, + struct ap_message *reply, + struct ica_xcRB *xcRB) { struct type86x_reply *msg = reply->msg; @@ -831,25 +841,25 @@ static int convert_response_xcrb(struct zcrypt_queue *zq, return convert_error(zq, reply); } if (msg->cprbx.cprb_ver_id == 0x02) - return convert_type86_xcrb(zq, reply, xcRB); + return convert_type86_xcrb(userspace, zq, reply, xcRB); fallthrough; /* wrong cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ zq->online = 0; - pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid)); - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", - AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), - (int) msg->hdr.type); - return -EAGAIN; /* repeat the request on a different device. */ + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + return -EAGAIN; } } -static int convert_response_ep11_xcrb(struct zcrypt_queue *zq, - struct ap_message *reply, struct ep11_urb *xcRB) +static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, + struct ap_message *reply, struct ep11_urb *xcRB) { struct type86_ep11_reply *msg = reply->msg; @@ -861,19 +871,19 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq, if (msg->hdr.reply_code) return convert_error(zq, reply); if (msg->cprbx.cprb_ver_id == 0x04) - return convert_type86_ep11_xcrb(zq, reply, xcRB); + return convert_type86_ep11_xcrb(userspace, zq, reply, xcRB); fallthrough; /* wrong cprb version is an unknown resp */ default: /* Unknown response type, this should NEVER EVER happen */ zq->online = 0; - pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid)); - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", - AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), - (int) msg->hdr.type); - return -EAGAIN; /* repeat the request on a different device. */ + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + return -EAGAIN; } } @@ -895,15 +905,15 @@ static int convert_response_rng(struct zcrypt_queue *zq, fallthrough; /* wrong cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ zq->online = 0; - pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid)); - ZCRYPT_DBF(DBF_ERR, - "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", - AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), - (int) msg->hdr.type); - return -EAGAIN; /* repeat the request on a different device. */ + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + return -EAGAIN; } } @@ -1007,39 +1017,42 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0); * @mex: pointer to the modexpo request buffer */ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, - struct ica_rsa_modexpo *mex) + struct ica_rsa_modexpo *mex, + struct ap_message *ap_msg) { - struct ap_message ap_msg; struct response_type resp_type = { .type = CEXXC_RESPONSE_TYPE_ICA, }; int rc; - ap_init_message(&ap_msg); - ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL); - if (!ap_msg.msg) + ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL); + if (!ap_msg->msg) return -ENOMEM; - ap_msg.receive = zcrypt_msgtype6_receive; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &resp_type; - rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex); + ap_msg->receive = zcrypt_msgtype6_receive; + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg->private = &resp_type; + rc = ICAMEX_msg_to_type6MEX_msgX(zq, ap_msg, mex); if (rc) goto out_free; init_completion(&resp_type.work); - ap_queue_message(zq->queue, &ap_msg); + rc = ap_queue_message(zq->queue, ap_msg); + if (rc) + goto out_free; rc = wait_for_completion_interruptible(&resp_type.work); if (rc == 0) { - rc = ap_msg.rc; + rc = ap_msg->rc; if (rc == 0) - rc = convert_response_ica(zq, &ap_msg, + rc = convert_response_ica(zq, ap_msg, mex->outputdata, mex->outputdatalength); } else /* Signal pending. */ - ap_cancel_message(zq->queue, &ap_msg); + ap_cancel_message(zq->queue, ap_msg); out_free: - free_page((unsigned long) ap_msg.msg); + free_page((unsigned long) ap_msg->msg); + ap_msg->private = NULL; + ap_msg->msg = NULL; return rc; } @@ -1051,40 +1064,43 @@ out_free: * @crt: pointer to the modexpoc_crt request buffer */ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, - struct ica_rsa_modexpo_crt *crt) + struct ica_rsa_modexpo_crt *crt, + struct ap_message *ap_msg) { - struct ap_message ap_msg; struct response_type resp_type = { .type = CEXXC_RESPONSE_TYPE_ICA, }; int rc; - ap_init_message(&ap_msg); - ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL); - if (!ap_msg.msg) + ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL); + if (!ap_msg->msg) return -ENOMEM; - ap_msg.receive = zcrypt_msgtype6_receive; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &resp_type; - rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt); + ap_msg->receive = zcrypt_msgtype6_receive; + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg->private = &resp_type; + rc = ICACRT_msg_to_type6CRT_msgX(zq, ap_msg, crt); if (rc) goto out_free; init_completion(&resp_type.work); - ap_queue_message(zq->queue, &ap_msg); + rc = ap_queue_message(zq->queue, ap_msg); + if (rc) + goto out_free; rc = wait_for_completion_interruptible(&resp_type.work); if (rc == 0) { - rc = ap_msg.rc; + rc = ap_msg->rc; if (rc == 0) - rc = convert_response_ica(zq, &ap_msg, + rc = convert_response_ica(zq, ap_msg, crt->outputdata, crt->outputdatalength); } else { /* Signal pending. */ - ap_cancel_message(zq->queue, &ap_msg); + ap_cancel_message(zq->queue, ap_msg); } out_free: - free_page((unsigned long) ap_msg.msg); + free_page((unsigned long) ap_msg->msg); + ap_msg->private = NULL; + ap_msg->msg = NULL; return rc; } @@ -1095,9 +1111,9 @@ out_free: * by the caller with ap_init_message(). Also the caller has to * make sure ap_release_message() is always called even on failure. */ -unsigned int get_cprb_fc(struct ica_xcRB *xcRB, - struct ap_message *ap_msg, - unsigned int *func_code, unsigned short **dom) +unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB, + struct ap_message *ap_msg, + unsigned int *func_code, unsigned short **dom) { struct response_type resp_type = { .type = CEXXC_RESPONSE_TYPE_XCRB, @@ -1112,7 +1128,7 @@ unsigned int get_cprb_fc(struct ica_xcRB *xcRB, ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); if (!ap_msg->private) return -ENOMEM; - return XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom); + return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom); } /** @@ -1122,24 +1138,26 @@ unsigned int get_cprb_fc(struct ica_xcRB *xcRB, * CEXxC device to the request distributor * @xcRB: pointer to the send_cprb request buffer */ -static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq, - struct ica_xcRB *xcRB, - struct ap_message *ap_msg) +static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, + struct ica_xcRB *xcRB, + struct ap_message *ap_msg) { int rc; struct response_type *rtype = (struct response_type *)(ap_msg->private); init_completion(&rtype->work); - ap_queue_message(zq->queue, ap_msg); + rc = ap_queue_message(zq->queue, ap_msg); + if (rc) + goto out; rc = wait_for_completion_interruptible(&rtype->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) - rc = convert_response_xcrb(zq, ap_msg, xcRB); + rc = convert_response_xcrb(userspace, zq, ap_msg, xcRB); } else /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); - +out: return rc; } @@ -1150,9 +1168,9 @@ static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq, * by the caller with ap_init_message(). Also the caller has to * make sure ap_release_message() is always called even on failure. */ -unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb, - struct ap_message *ap_msg, - unsigned int *func_code) +unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb, + struct ap_message *ap_msg, + unsigned int *func_code) { struct response_type resp_type = { .type = CEXXC_RESPONSE_TYPE_EP11, @@ -1167,7 +1185,7 @@ unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb, ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); if (!ap_msg->private) return -ENOMEM; - return xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code); + return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code); } /** @@ -1177,7 +1195,7 @@ unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb, * CEX4P device to the request distributor * @xcRB: pointer to the ep11 user request block */ -static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq, +static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *zq, struct ep11_urb *xcrb, struct ap_message *ap_msg) { @@ -1232,16 +1250,18 @@ static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq, } init_completion(&rtype->work); - ap_queue_message(zq->queue, ap_msg); + rc = ap_queue_message(zq->queue, ap_msg); + if (rc) + goto out; rc = wait_for_completion_interruptible(&rtype->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) - rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb); + rc = convert_response_ep11_xcrb(userspace, zq, ap_msg, xcrb); } else /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); - +out: return rc; } @@ -1293,7 +1313,9 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); init_completion(&rtype->work); - ap_queue_message(zq->queue, ap_msg); + rc = ap_queue_message(zq->queue, ap_msg); + if (rc) + goto out; rc = wait_for_completion_interruptible(&rtype->work); if (rc == 0) { rc = ap_msg->rc; @@ -1302,7 +1324,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, } else /* Signal pending. */ ap_cancel_message(zq->queue, ap_msg); - +out: return rc; } diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h index 0de280a81dd4..0a0bf074206b 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.h +++ b/drivers/s390/crypto/zcrypt_msgtype6.h @@ -96,9 +96,9 @@ struct type86_fmt2_ext { unsigned int offset4; /* 0x00000000 */ } __packed; -unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *, +unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *, struct ap_message *, unsigned int *, unsigned short **); -unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *, +unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *, struct ap_message *, unsigned int *); unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *); diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c index 8bae6ad159a7..3c207066313c 100644 --- a/drivers/s390/crypto/zcrypt_queue.c +++ b/drivers/s390/crypto/zcrypt_queue.c @@ -40,22 +40,27 @@ static ssize_t online_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_queue *zq = to_ap_queue(dev)->private; + struct ap_queue *aq = to_ap_queue(dev); + struct zcrypt_queue *zq = aq->private; + int online = aq->config && zq->online ? 1 : 0; - return scnprintf(buf, PAGE_SIZE, "%d\n", zq->online); + return scnprintf(buf, PAGE_SIZE, "%d\n", online); } static ssize_t online_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct zcrypt_queue *zq = to_ap_queue(dev)->private; + struct ap_queue *aq = to_ap_queue(dev); + struct zcrypt_queue *zq = aq->private; struct zcrypt_card *zc = zq->zcard; int online; if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) return -EINVAL; + if (online && (!aq->config || !aq->card->config)) + return -ENODEV; if (online && !zc->online) return -EINVAL; zq->online = online; diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index e78d65bd46b1..a8a514074084 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -380,8 +380,6 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) &qdio->adapter->status); init_data.q_format = QDIO_ZFCP_QFMT; - memcpy(init_data.adapter_name, dev_name(&cdev->dev), 8); - ASCEBC(init_data.adapter_name, 8); init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; if (enable_multibuffer) init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE; |