diff options
author | Linus Torvalds | 2017-02-24 14:13:34 -0800 |
---|---|---|
committer | Linus Torvalds | 2017-02-24 14:13:34 -0800 |
commit | 1802979ab1ee8ec5a72987ad518f5a91bf41cd89 (patch) | |
tree | 4b1bfc68a180040d713e8b1f106ca12f2461b8ec /block | |
parent | f1ef09fde17f9b77ca1435a5b53a28b203afb81c (diff) | |
parent | 61febef40bfe8ab68259d8545257686e8a0d91d1 (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block updates and fixes from Jens Axboe:
- NVMe updates and fixes that missed the first pull request. This
includes bug fixes, and support for autonomous power management.
- Fix from Christoph for missing clear of the request payload, causing
a problem with (at least) the storvsc driver.
- Further fixes for the queue/bdi life time issues from Jan.
- The Kconfig mq scheduler update from me.
- Fixing a use-after-free in dm-rq, spotted by Bart, introduced in this
merge window.
- Three fixes for nbd from Josef.
- Bug fix from Omar, fixing a bug in sas transport code that oopses
when bsg ioctls were used. From Omar.
- Improvements to the queue restart and tag wait from from Omar.
- Set of fixes for the sed/opal code from Scott.
- Three trivial patches to cciss from Tobin
* 'for-linus' of git://git.kernel.dk/linux-block: (41 commits)
dm-rq: don't dereference request payload after ending request
blk-mq-sched: separate mark hctx and queue restart operations
blk-mq: use sbq wait queues instead of restart for driver tags
block/sed-opal: Propagate original error message to userland.
nvme/pci: re-check security protocol support after reset
block/sed-opal: Introduce free_opal_dev to free the structure and clean up state
nvme: detect NVMe controller in recent MacBooks
nvme-rdma: add support for host_traddr
nvmet-rdma: Fix error handling
nvmet-rdma: use nvme cm status helper
nvme-rdma: move nvme cm status helper to .h file
nvme-fc: don't bother to validate ioccsz and iorcsz
nvme/pci: No special case for queue busy on IO
nvme/core: Fix race kicking freed request_queue
nvme/pci: Disable on removal when disconnected
nvme: Enable autonomous power state transitions
nvme: Add a quirk mechanism that uses identify_ctrl
nvme: make nvmf_register_transport require a create_ctrl callback
nvme: Use CNS as 8-bit field and avoid endianness conversion
nvme: add semicolon in nvme_command setting
...
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig.iosched | 44 | ||||
-rw-r--r-- | block/blk-mq-sched.c | 29 | ||||
-rw-r--r-- | block/blk-mq-sched.h | 26 | ||||
-rw-r--r-- | block/blk-mq.c | 64 | ||||
-rw-r--r-- | block/elevator.c | 19 | ||||
-rw-r--r-- | block/genhd.c | 4 | ||||
-rw-r--r-- | block/sed-opal.c | 577 |
7 files changed, 353 insertions, 410 deletions
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 0715ce93daef..58fc8684788d 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -69,50 +69,6 @@ config MQ_IOSCHED_DEADLINE ---help--- MQ version of the deadline IO scheduler. -config MQ_IOSCHED_NONE - bool - default y - -choice - prompt "Default single-queue blk-mq I/O scheduler" - default DEFAULT_SQ_NONE - help - Select the I/O scheduler which will be used by default for blk-mq - managed block devices with a single queue. - - config DEFAULT_SQ_DEADLINE - bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y - - config DEFAULT_SQ_NONE - bool "None" - -endchoice - -config DEFAULT_SQ_IOSCHED - string - default "mq-deadline" if DEFAULT_SQ_DEADLINE - default "none" if DEFAULT_SQ_NONE - -choice - prompt "Default multi-queue blk-mq I/O scheduler" - default DEFAULT_MQ_NONE - help - Select the I/O scheduler which will be used by default for blk-mq - managed block devices with multiple queues. - - config DEFAULT_MQ_DEADLINE - bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y - - config DEFAULT_MQ_NONE - bool "None" - -endchoice - -config DEFAULT_MQ_IOSCHED - string - default "mq-deadline" if DEFAULT_MQ_DEADLINE - default "none" if DEFAULT_MQ_NONE - endmenu endif diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 9e8d6795a8c1..98c7b061781e 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -205,7 +205,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) * needing a restart in that case. */ if (!list_empty(&rq_list)) { - blk_mq_sched_mark_restart(hctx); + blk_mq_sched_mark_restart_hctx(hctx); did_work = blk_mq_dispatch_rq_list(hctx, &rq_list); } else if (!has_sched_dispatch) { blk_mq_flush_busy_ctxs(hctx, &rq_list); @@ -331,20 +331,16 @@ static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) { + struct request_queue *q = hctx->queue; unsigned int i; - if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { + if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_sched_restart_hctx(hctx); + } + } else { blk_mq_sched_restart_hctx(hctx); - else { - struct request_queue *q = hctx->queue; - - if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) - return; - - clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags); - - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_sched_restart_hctx(hctx); } } @@ -498,15 +494,6 @@ int blk_mq_sched_init(struct request_queue *q) { int ret; -#if defined(CONFIG_DEFAULT_SQ_NONE) - if (q->nr_hw_queues == 1) - return 0; -#endif -#if defined(CONFIG_DEFAULT_MQ_NONE) - if (q->nr_hw_queues > 1) - return 0; -#endif - mutex_lock(&q->sysfs_lock); ret = elevator_init(q, NULL); mutex_unlock(&q->sysfs_lock); diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 7b5f3b95c78e..a75b16b123f7 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -122,17 +122,27 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) return false; } -static inline void blk_mq_sched_mark_restart(struct blk_mq_hw_ctx *hctx) +/* + * Mark a hardware queue as needing a restart. + */ +static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) { - if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { + if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); - if (hctx->flags & BLK_MQ_F_TAG_SHARED) { - struct request_queue *q = hctx->queue; +} + +/* + * Mark a hardware queue and the request queue it belongs to as needing a + * restart. + */ +static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; - if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) - set_bit(QUEUE_FLAG_RESTART, &q->queue_flags); - } - } + if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) + set_bit(QUEUE_FLAG_RESTART, &q->queue_flags); } static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) diff --git a/block/blk-mq.c b/block/blk-mq.c index b29e7dc7b309..9e6b064e5339 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -904,6 +904,44 @@ static bool reorder_tags_to_front(struct list_head *list) return first != NULL; } +static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags, + void *key) +{ + struct blk_mq_hw_ctx *hctx; + + hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); + + list_del(&wait->task_list); + clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state); + blk_mq_run_hw_queue(hctx, true); + return 1; +} + +static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx) +{ + struct sbq_wait_state *ws; + + /* + * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait. + * The thread which wins the race to grab this bit adds the hardware + * queue to the wait queue. + */ + if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) || + test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state)) + return false; + + init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); + ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx); + + /* + * As soon as this returns, it's no longer safe to fiddle with + * hctx->dispatch_wait, since a completion can wake up the wait queue + * and unlock the bit. + */ + add_wait_queue(&ws->wait, &hctx->dispatch_wait); + return true; +} + bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) { struct request_queue *q = hctx->queue; @@ -931,15 +969,22 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) continue; /* - * We failed getting a driver tag. Mark the queue(s) - * as needing a restart. Retry getting a tag again, - * in case the needed IO completed right before we - * marked the queue as needing a restart. + * The initial allocation attempt failed, so we need to + * rerun the hardware queue when a tag is freed. */ - blk_mq_sched_mark_restart(hctx); - if (!blk_mq_get_driver_tag(rq, &hctx, false)) + if (blk_mq_dispatch_wait_add(hctx)) { + /* + * It's possible that a tag was freed in the + * window between the allocation failure and + * adding the hardware queue to the wait queue. + */ + if (!blk_mq_get_driver_tag(rq, &hctx, false)) + break; + } else { break; + } } + list_del_init(&rq->queuelist); bd.rq = rq; @@ -995,10 +1040,11 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) * * blk_mq_run_hw_queue() already checks the STOPPED bit * - * If RESTART is set, then let completion restart the queue - * instead of potentially looping here. + * If RESTART or TAG_WAITING is set, then let completion restart + * the queue instead of potentially looping here. */ - if (!blk_mq_sched_needs_restart(hctx)) + if (!blk_mq_sched_needs_restart(hctx) && + !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state)) blk_mq_run_hw_queue(hctx, true); } diff --git a/block/elevator.c b/block/elevator.c index 699d10f71a2c..ac1c9f481a98 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -220,17 +220,24 @@ int elevator_init(struct request_queue *q, char *name) } if (!e) { - if (q->mq_ops && q->nr_hw_queues == 1) - e = elevator_get(CONFIG_DEFAULT_SQ_IOSCHED, false); - else if (q->mq_ops) - e = elevator_get(CONFIG_DEFAULT_MQ_IOSCHED, false); - else + /* + * For blk-mq devices, we default to using mq-deadline, + * if available, for single queue devices. If deadline + * isn't available OR we have multiple queues, default + * to "none". + */ + if (q->mq_ops) { + if (q->nr_hw_queues == 1) + e = elevator_get("mq-deadline", false); + if (!e) + return 0; + } else e = elevator_get(CONFIG_DEFAULT_IOSCHED, false); if (!e) { printk(KERN_ERR "Default I/O scheduler not found. " \ - "Using noop/none.\n"); + "Using noop.\n"); e = elevator_get("noop", false); } } diff --git a/block/genhd.c b/block/genhd.c index 3631cd480295..2f444b87a5f2 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -669,14 +669,14 @@ void del_gendisk(struct gendisk *disk) disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); while ((part = disk_part_iter_next(&piter))) { - bdev_unhash_inode(MKDEV(disk->major, - disk->first_minor + part->partno)); invalidate_partition(disk, part->partno); + bdev_unhash_inode(part_devt(part)); delete_partition(disk, part->partno); } disk_part_iter_exit(&piter); invalidate_partition(disk, 0); + bdev_unhash_inode(disk_devt(disk)); set_capacity(disk, 0); disk->flags &= ~GENHD_FL_UP; diff --git a/block/sed-opal.c b/block/sed-opal.c index d1c52ba4d62d..1e18dca360fc 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -34,7 +34,11 @@ #define IO_BUFFER_LENGTH 2048 #define MAX_TOKS 64 -typedef int (*opal_step)(struct opal_dev *dev); +struct opal_step { + int (*fn)(struct opal_dev *dev, void *data); + void *data; +}; +typedef int (cont_fn)(struct opal_dev *dev); enum opal_atom_width { OPAL_WIDTH_TINY, @@ -80,9 +84,7 @@ struct opal_dev { void *data; sec_send_recv *send_recv; - const opal_step *funcs; - void **func_data; - int state; + const struct opal_step *steps; struct mutex dev_lock; u16 comid; u32 hsn; @@ -213,8 +215,6 @@ static const u8 opalmethod[][OPAL_UID_LENGTH] = { { 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x08, 0x03 }, }; -typedef int (cont_fn)(struct opal_dev *dev); - static int end_opal_session_error(struct opal_dev *dev); struct opal_suspend_data { @@ -375,18 +375,18 @@ static void check_geometry(struct opal_dev *dev, const void *data) static int next(struct opal_dev *dev) { - opal_step func; - int error = 0; + const struct opal_step *step; + int state = 0, error = 0; do { - func = dev->funcs[dev->state]; - if (!func) + step = &dev->steps[state]; + if (!step->fn) break; - error = func(dev); + error = step->fn(dev, step->data); if (error) { pr_err("Error on step function: %d with error %d: %s\n", - dev->state, error, + state, error, opal_error_to_human(error)); /* For each OPAL command we do a discovery0 then we @@ -396,10 +396,13 @@ static int next(struct opal_dev *dev) * session. Therefore we shouldn't attempt to terminate * a session, as one has not yet been created. */ - if (dev->state > 1) - return end_opal_session_error(dev); + if (state > 1) { + end_opal_session_error(dev); + return error; + } + } - dev->state++; + state++; } while (!error); return error; @@ -411,10 +414,17 @@ static int opal_discovery0_end(struct opal_dev *dev) const struct d0_header *hdr = (struct d0_header *)dev->resp; const u8 *epos = dev->resp, *cpos = dev->resp; u16 comid = 0; + u32 hlen = be32_to_cpu(hdr->length); + + print_buffer(dev->resp, hlen); - print_buffer(dev->resp, be32_to_cpu(hdr->length)); + if (hlen > IO_BUFFER_LENGTH - sizeof(*hdr)) { + pr_warn("Discovery length overflows buffer (%zu+%u)/%u\n", + sizeof(*hdr), hlen, IO_BUFFER_LENGTH); + return -EFAULT; + } - epos += be32_to_cpu(hdr->length); /* end of buffer */ + epos += hlen; /* end of buffer */ cpos += sizeof(*hdr); /* current position on buffer */ while (cpos < epos && supported) { @@ -476,7 +486,7 @@ static int opal_discovery0_end(struct opal_dev *dev) return 0; } -static int opal_discovery0(struct opal_dev *dev) +static int opal_discovery0(struct opal_dev *dev, void *data) { int ret; @@ -662,52 +672,29 @@ static int cmd_finalize(struct opal_dev *cmd, u32 hsn, u32 tsn) return 0; } -static enum opal_response_token token_type(const struct parsed_resp *resp, - int n) -{ - const struct opal_resp_tok *tok; - - if (n >= resp->num) { - pr_err("Token number doesn't exist: %d, resp: %d\n", - n, resp->num); - return OPAL_DTA_TOKENID_INVALID; - } - - tok = &resp->toks[n]; - if (tok->len == 0) { - pr_err("Token length must be non-zero\n"); - return OPAL_DTA_TOKENID_INVALID; - } - - return tok->type; -} - -/* - * This function returns 0 in case of invalid token. One should call - * token_type() first to find out if the token is valid or not. - */ -static enum opal_token response_get_token(const struct parsed_resp *resp, - int n) +static const struct opal_resp_tok *response_get_token( + const struct parsed_resp *resp, + int n) { const struct opal_resp_tok *tok; if (n >= resp->num) { pr_err("Token number doesn't exist: %d, resp: %d\n", n, resp->num); - return 0; + return ERR_PTR(-EINVAL); } tok = &resp->toks[n]; if (tok->len == 0) { pr_err("Token length must be non-zero\n"); - return 0; + return ERR_PTR(-EINVAL); } - return tok->pos[0]; + return tok; } -static size_t response_parse_tiny(struct opal_resp_tok *tok, - const u8 *pos) +static ssize_t response_parse_tiny(struct opal_resp_tok *tok, + const u8 *pos) { tok->pos = pos; tok->len = 1; @@ -723,8 +710,8 @@ static size_t response_parse_tiny(struct opal_resp_tok *tok, return tok->len; } -static size_t response_parse_short(struct opal_resp_tok *tok, - const u8 *pos) +static ssize_t response_parse_short(struct opal_resp_tok *tok, + const u8 *pos) { tok->pos = pos; tok->len = (pos[0] & SHORT_ATOM_LEN_MASK) + 1; @@ -736,7 +723,7 @@ static size_t response_parse_short(struct opal_resp_tok *tok, tok->type = OPAL_DTA_TOKENID_SINT; } else { u64 u_integer = 0; - int i, b = 0; + ssize_t i, b = 0; tok->type = OPAL_DTA_TOKENID_UINT; if (tok->len > 9) { @@ -753,8 +740,8 @@ static size_t response_parse_short(struct opal_resp_tok *tok, return tok->len; } -static size_t response_parse_medium(struct opal_resp_tok *tok, - const u8 *pos) +static ssize_t response_parse_medium(struct opal_resp_tok *tok, + const u8 *pos) { tok->pos = pos; tok->len = (((pos[0] & MEDIUM_ATOM_LEN_MASK) << 8) | pos[1]) + 2; @@ -770,8 +757,8 @@ static size_t response_parse_medium(struct opal_resp_tok *tok, return tok->len; } -static size_t response_parse_long(struct opal_resp_tok *tok, - const u8 *pos) +static ssize_t response_parse_long(struct opal_resp_tok *tok, + const u8 *pos) { tok->pos = pos; tok->len = ((pos[1] << 16) | (pos[2] << 8) | pos[3]) + 4; @@ -787,8 +774,8 @@ static size_t response_parse_long(struct opal_resp_tok *tok, return tok->len; } -static size_t response_parse_token(struct opal_resp_tok *tok, - const u8 *pos) +static ssize_t response_parse_token(struct opal_resp_tok *tok, + const u8 *pos) { tok->pos = pos; tok->len = 1; @@ -805,8 +792,9 @@ static int response_parse(const u8 *buf, size_t length, struct opal_resp_tok *iter; int num_entries = 0; int total; - size_t token_length; + ssize_t token_length; const u8 *pos; + u32 clen, plen, slen; if (!buf) return -EFAULT; @@ -818,17 +806,16 @@ static int response_parse(const u8 *buf, size_t length, pos = buf; pos += sizeof(*hdr); - pr_debug("Response size: cp: %d, pkt: %d, subpkt: %d\n", - be32_to_cpu(hdr->cp.length), - be32_to_cpu(hdr->pkt.length), - be32_to_cpu(hdr->subpkt.length)); - - if (hdr->cp.length == 0 || hdr->pkt.length == 0 || - hdr->subpkt.length == 0) { - pr_err("Bad header length. cp: %d, pkt: %d, subpkt: %d\n", - be32_to_cpu(hdr->cp.length), - be32_to_cpu(hdr->pkt.length), - be32_to_cpu(hdr->subpkt.length)); + clen = be32_to_cpu(hdr->cp.length); + plen = be32_to_cpu(hdr->pkt.length); + slen = be32_to_cpu(hdr->subpkt.length); + pr_debug("Response size: cp: %u, pkt: %u, subpkt: %u\n", + clen, plen, slen); + + if (clen == 0 || plen == 0 || slen == 0 || + slen > IO_BUFFER_LENGTH - sizeof(*hdr)) { + pr_err("Bad header length. cp: %u, pkt: %u, subpkt: %u\n", + clen, plen, slen); print_buffer(pos, sizeof(*hdr)); return -EINVAL; } @@ -837,7 +824,7 @@ static int response_parse(const u8 *buf, size_t length, return -EFAULT; iter = resp->toks; - total = be32_to_cpu(hdr->subpkt.length); + total = slen; print_buffer(pos, total); while (total > 0) { if (pos[0] <= TINY_ATOM_BYTE) /* tiny atom */ @@ -851,8 +838,8 @@ static int response_parse(const u8 *buf, size_t length, else /* TOKEN */ token_length = response_parse_token(iter, pos); - if (token_length == -EINVAL) - return -EINVAL; + if (token_length < 0) + return token_length; pos += token_length; total -= token_length; @@ -922,20 +909,32 @@ static u64 response_get_u64(const struct parsed_resp *resp, int n) return resp->toks[n].stored.u; } +static bool response_token_matches(const struct opal_resp_tok *token, u8 match) +{ + if (IS_ERR(token) || + token->type != OPAL_DTA_TOKENID_TOKEN || + token->pos[0] != match) + return false; + return true; +} + static u8 response_status(const struct parsed_resp *resp) { - if (token_type(resp, 0) == OPAL_DTA_TOKENID_TOKEN && - response_get_token(resp, 0) == OPAL_ENDOFSESSION) { + const struct opal_resp_tok *tok; + + tok = response_get_token(resp, 0); + if (response_token_matches(tok, OPAL_ENDOFSESSION)) return 0; - } if (resp->num < 5) return DTAERROR_NO_METHOD_STATUS; - if (token_type(resp, resp->num - 1) != OPAL_DTA_TOKENID_TOKEN || - token_type(resp, resp->num - 5) != OPAL_DTA_TOKENID_TOKEN || - response_get_token(resp, resp->num - 1) != OPAL_ENDLIST || - response_get_token(resp, resp->num - 5) != OPAL_STARTLIST) + tok = response_get_token(resp, resp->num - 5); + if (!response_token_matches(tok, OPAL_STARTLIST)) + return DTAERROR_NO_METHOD_STATUS; + + tok = response_get_token(resp, resp->num - 1); + if (!response_token_matches(tok, OPAL_ENDLIST)) return DTAERROR_NO_METHOD_STATUS; return response_get_u64(resp, resp->num - 4); @@ -1022,7 +1021,7 @@ static int finalize_and_send(struct opal_dev *dev, cont_fn cont) return opal_send_recv(dev, cont); } -static int gen_key(struct opal_dev *dev) +static int gen_key(struct opal_dev *dev, void *data) { const u8 *method; u8 uid[OPAL_UID_LENGTH]; @@ -1076,15 +1075,14 @@ static int get_active_key_cont(struct opal_dev *dev) return 0; } -static int get_active_key(struct opal_dev *dev) +static int get_active_key(struct opal_dev *dev, void *data) { u8 uid[OPAL_UID_LENGTH]; int err = 0; - u8 *lr; + u8 *lr = data; clear_opal_cmd(dev); set_comid(dev, dev->comid); - lr = dev->func_data[dev->state]; err = build_locking_range(uid, sizeof(uid), *lr); if (err) @@ -1167,17 +1165,16 @@ static inline int enable_global_lr(struct opal_dev *dev, u8 *uid, return err; } -static int setup_locking_range(struct opal_dev *dev) +static int setup_locking_range(struct opal_dev *dev, void *data) { u8 uid[OPAL_UID_LENGTH]; - struct opal_user_lr_setup *setup; + struct opal_user_lr_setup *setup = data; u8 lr; int err = 0; clear_opal_cmd(dev); set_comid(dev, dev->comid); - setup = dev->func_data[dev->state]; lr = setup->session.opal_key.lr; err = build_locking_range(uid, sizeof(uid), lr); if (err) @@ -1290,20 +1287,19 @@ static int start_generic_opal_session(struct opal_dev *dev, return finalize_and_send(dev, start_opal_session_cont); } -static int start_anybodyASP_opal_session(struct opal_dev *dev) +static int start_anybodyASP_opal_session(struct opal_dev *dev, void *data) { return start_generic_opal_session(dev, OPAL_ANYBODY_UID, OPAL_ADMINSP_UID, NULL, 0); } -static int start_SIDASP_opal_session(struct opal_dev *dev) +static int start_SIDASP_opal_session(struct opal_dev *dev, void *data) { int ret; const u8 *key = dev->prev_data; - struct opal_key *okey; if (!key) { - okey = dev->func_data[dev->state]; + const struct opal_key *okey = data; ret = start_generic_opal_session(dev, OPAL_SID_UID, OPAL_ADMINSP_UID, okey->key, @@ -1318,22 +1314,21 @@ static int start_SIDASP_opal_session(struct opal_dev *dev) return ret; } -static inline int start_admin1LSP_opal_session(struct opal_dev *dev) +static int start_admin1LSP_opal_session(struct opal_dev *dev, void *data) { - struct opal_key *key = dev->func_data[dev->state]; - + struct opal_key *key = data; return start_generic_opal_session(dev, OPAL_ADMIN1_UID, OPAL_LOCKINGSP_UID, key->key, key->key_len); } -static int start_auth_opal_session(struct opal_dev *dev) +static int start_auth_opal_session(struct opal_dev *dev, void *data) { + struct opal_session_info *session = data; u8 lk_ul_user[OPAL_UID_LENGTH]; + size_t keylen = session->opal_key.key_len; int err = 0; - struct opal_session_info *session = dev->func_data[dev->state]; - size_t keylen = session->opal_key.key_len; u8 *key = session->opal_key.key; u32 hsn = GENERIC_HOST_SESSION_NUM; @@ -1383,7 +1378,7 @@ static int start_auth_opal_session(struct opal_dev *dev) return finalize_and_send(dev, start_opal_session_cont); } -static int revert_tper(struct opal_dev *dev) +static int revert_tper(struct opal_dev *dev, void *data) { int err = 0; @@ -1405,9 +1400,9 @@ static int revert_tper(struct opal_dev *dev) return finalize_and_send(dev, parse_and_check_status); } -static int internal_activate_user(struct opal_dev *dev) +static int internal_activate_user(struct opal_dev *dev, void *data) { - struct opal_session_info *session = dev->func_data[dev->state]; + struct opal_session_info *session = data; u8 uid[OPAL_UID_LENGTH]; int err = 0; @@ -1440,15 +1435,14 @@ static int internal_activate_user(struct opal_dev *dev) return finalize_and_send(dev, parse_and_check_status); } -static int erase_locking_range(struct opal_dev *dev) +static int erase_locking_range(struct opal_dev *dev, void *data) { - struct opal_session_info *session; + struct opal_session_info *session = data; u8 uid[OPAL_UID_LENGTH]; int err = 0; clear_opal_cmd(dev); set_comid(dev, dev->comid); - session = dev->func_data[dev->state]; if (build_locking_range(uid, sizeof(uid), session->opal_key.lr) < 0) return -ERANGE; @@ -1467,9 +1461,9 @@ static int erase_locking_range(struct opal_dev *dev) return finalize_and_send(dev, parse_and_check_status); } -static int set_mbr_done(struct opal_dev *dev) +static int set_mbr_done(struct opal_dev *dev, void *data) { - u8 mbr_done_tf = *(u8 *)dev->func_data[dev->state]; + u8 *mbr_done_tf = data; int err = 0; clear_opal_cmd(dev); @@ -1485,7 +1479,7 @@ static int set_mbr_done(struct opal_dev *dev) add_token_u8(&err, dev, OPAL_STARTLIST); add_token_u8(&err, dev, OPAL_STARTNAME); add_token_u8(&err, dev, 2); /* Done */ - add_token_u8(&err, dev, mbr_done_tf); /* Done T or F */ + add_token_u8(&err, dev, *mbr_done_tf); /* Done T or F */ add_token_u8(&err, dev, OPAL_ENDNAME); add_token_u8(&err, dev, OPAL_ENDLIST); add_token_u8(&err, dev, OPAL_ENDNAME); @@ -1499,9 +1493,9 @@ static int set_mbr_done(struct opal_dev *dev) return finalize_and_send(dev, parse_and_check_status); } -static int set_mbr_enable_disable(struct opal_dev *dev) +static int set_mbr_enable_disable(struct opal_dev *dev, void *data) { - u8 mbr_en_dis = *(u8 *)dev->func_data[dev->state]; + u8 *mbr_en_dis = data; int err = 0; clear_opal_cmd(dev); @@ -1517,7 +1511,7 @@ static int set_mbr_enable_disable(struct opal_dev *dev) add_token_u8(&err, dev, OPAL_STARTLIST); add_token_u8(&err, dev, OPAL_STARTNAME); add_token_u8(&err, dev, 1); - add_token_u8(&err, dev, mbr_en_dis); + add_token_u8(&err, dev, *mbr_en_dis); add_token_u8(&err, dev, OPAL_ENDNAME); add_token_u8(&err, dev, OPAL_ENDLIST); add_token_u8(&err, dev, OPAL_ENDNAME); @@ -1558,11 +1552,10 @@ static int generic_pw_cmd(u8 *key, size_t key_len, u8 *cpin_uid, return err; } -static int set_new_pw(struct opal_dev *dev) +static int set_new_pw(struct opal_dev *dev, void *data) { u8 cpin_uid[OPAL_UID_LENGTH]; - struct opal_session_info *usr = dev->func_data[dev->state]; - + struct opal_session_info *usr = data; memcpy(cpin_uid, opaluid[OPAL_C_PIN_ADMIN1], OPAL_UID_LENGTH); @@ -1583,10 +1576,10 @@ static int set_new_pw(struct opal_dev *dev) return finalize_and_send(dev, parse_and_check_status); } -static int set_sid_cpin_pin(struct opal_dev *dev) +static int set_sid_cpin_pin(struct opal_dev *dev, void *data) { u8 cpin_uid[OPAL_UID_LENGTH]; - struct opal_key *key = dev->func_data[dev->state]; + struct opal_key *key = data; memcpy(cpin_uid, opaluid[OPAL_C_PIN_SID], OPAL_UID_LENGTH); @@ -1597,18 +1590,16 @@ static int set_sid_cpin_pin(struct opal_dev *dev) return finalize_and_send(dev, parse_and_check_status); } -static int add_user_to_lr(struct opal_dev *dev) +static int add_user_to_lr(struct opal_dev *dev, void *data) { u8 lr_buffer[OPAL_UID_LENGTH]; u8 user_uid[OPAL_UID_LENGTH]; - struct opal_lock_unlock *lkul; + struct opal_lock_unlock *lkul = data; int err = 0; clear_opal_cmd(dev); set_comid(dev, dev->comid); - lkul = dev->func_data[dev->state]; - memcpy(lr_buffer, opaluid[OPAL_LOCKINGRANGE_ACE_RDLOCKED], OPAL_UID_LENGTH); @@ -1675,11 +1666,11 @@ static int add_user_to_lr(struct opal_dev *dev) return finalize_and_send(dev, parse_and_check_status); } -static int lock_unlock_locking_range(struct opal_dev *dev) +static int lock_unlock_locking_range(struct opal_dev *dev, void *data) { u8 lr_buffer[OPAL_UID_LENGTH]; const u8 *method; - struct opal_lock_unlock *lkul; + struct opal_lock_unlock *lkul = data; u8 read_locked = 1, write_locked = 1; int err = 0; @@ -1687,7 +1678,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev) set_comid(dev, dev->comid); method = opalmethod[OPAL_SET]; - lkul = dev->func_data[dev->state]; if (build_locking_range(lr_buffer, sizeof(lr_buffer), lkul->session.opal_key.lr) < 0) return -ERANGE; @@ -1739,19 +1729,18 @@ static int lock_unlock_locking_range(struct opal_dev *dev) } -static int lock_unlock_locking_range_sum(struct opal_dev *dev) +static int lock_unlock_locking_range_sum(struct opal_dev *dev, void *data) { u8 lr_buffer[OPAL_UID_LENGTH]; u8 read_locked = 1, write_locked = 1; const u8 *method; - struct opal_lock_unlock *lkul; + struct opal_lock_unlock *lkul = data; int ret; clear_opal_cmd(dev); set_comid(dev, dev->comid); method = opalmethod[OPAL_SET]; - lkul = dev->func_data[dev->state]; if (build_locking_range(lr_buffer, sizeof(lr_buffer), lkul->session.opal_key.lr) < 0) return -ERANGE; @@ -1782,9 +1771,9 @@ static int lock_unlock_locking_range_sum(struct opal_dev *dev) return finalize_and_send(dev, parse_and_check_status); } -static int activate_lsp(struct opal_dev *dev) +static int activate_lsp(struct opal_dev *dev, void *data) { - struct opal_lr_act *opal_act; + struct opal_lr_act *opal_act = data; u8 user_lr[OPAL_UID_LENGTH]; u8 uint_3 = 0x83; int err = 0, i; @@ -1792,8 +1781,6 @@ static int activate_lsp(struct opal_dev *dev) clear_opal_cmd(dev); set_comid(dev, dev->comid); - opal_act = dev->func_data[dev->state]; - add_token_u8(&err, dev, OPAL_CALL); add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID], OPAL_UID_LENGTH); @@ -1858,7 +1845,7 @@ static int get_lsp_lifecycle_cont(struct opal_dev *dev) } /* Determine if we're in the Manufactured Inactive or Active state */ -static int get_lsp_lifecycle(struct opal_dev *dev) +static int get_lsp_lifecycle(struct opal_dev *dev, void *data) { int err = 0; @@ -1919,14 +1906,13 @@ static int get_msid_cpin_pin_cont(struct opal_dev *dev) return 0; } -static int get_msid_cpin_pin(struct opal_dev *dev) +static int get_msid_cpin_pin(struct opal_dev *dev, void *data) { int err = 0; clear_opal_cmd(dev); set_comid(dev, dev->comid); - add_token_u8(&err, dev, OPAL_CALL); add_token_bytestring(&err, dev, opaluid[OPAL_C_PIN_MSID], OPAL_UID_LENGTH); @@ -1956,64 +1942,76 @@ static int get_msid_cpin_pin(struct opal_dev *dev) return finalize_and_send(dev, get_msid_cpin_pin_cont); } -static int build_end_opal_session(struct opal_dev *dev) +static int end_opal_session(struct opal_dev *dev, void *data) { int err = 0; clear_opal_cmd(dev); - set_comid(dev, dev->comid); add_token_u8(&err, dev, OPAL_ENDOFSESSION); - return err; -} -static int end_opal_session(struct opal_dev *dev) -{ - int ret = build_end_opal_session(dev); - - if (ret < 0) - return ret; + if (err < 0) + return err; return finalize_and_send(dev, end_session_cont); } static int end_opal_session_error(struct opal_dev *dev) { - const opal_step error_end_session[] = { - end_opal_session, - NULL, + const struct opal_step error_end_session[] = { + { end_opal_session, }, + { NULL, } }; - dev->funcs = error_end_session; - dev->state = 0; + dev->steps = error_end_session; return next(dev); } static inline void setup_opal_dev(struct opal_dev *dev, - const opal_step *funcs) + const struct opal_step *steps) { - dev->state = 0; - dev->funcs = funcs; + dev->steps = steps; dev->tsn = 0; dev->hsn = 0; - dev->func_data = NULL; dev->prev_data = NULL; } static int check_opal_support(struct opal_dev *dev) { - static const opal_step funcs[] = { - opal_discovery0, - NULL + const struct opal_step steps[] = { + { opal_discovery0, }, + { NULL, } }; int ret; mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, funcs); + setup_opal_dev(dev, steps); ret = next(dev); dev->supported = !ret; mutex_unlock(&dev->dev_lock); return ret; } +static void clean_opal_dev(struct opal_dev *dev) +{ + + struct opal_suspend_data *suspend, *next; + + mutex_lock(&dev->dev_lock); + list_for_each_entry_safe(suspend, next, &dev->unlk_lst, node) { + list_del(&suspend->node); + kfree(suspend); + } + mutex_unlock(&dev->dev_lock); +} + +void free_opal_dev(struct opal_dev *dev) +{ + if (!dev) + return; + clean_opal_dev(dev); + kfree(dev); +} +EXPORT_SYMBOL(free_opal_dev); + struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv) { struct opal_dev *dev; @@ -2038,24 +2036,18 @@ EXPORT_SYMBOL(init_opal_dev); static int opal_secure_erase_locking_range(struct opal_dev *dev, struct opal_session_info *opal_session) { - void *data[3] = { NULL }; - static const opal_step erase_funcs[] = { - opal_discovery0, - start_auth_opal_session, - get_active_key, - gen_key, - end_opal_session, - NULL, + const struct opal_step erase_steps[] = { + { opal_discovery0, }, + { start_auth_opal_session, opal_session }, + { get_active_key, &opal_session->opal_key.lr }, + { gen_key, }, + { end_opal_session, }, + { NULL, } }; int ret; mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, erase_funcs); - - dev->func_data = data; - dev->func_data[1] = opal_session; - dev->func_data[2] = &opal_session->opal_key.lr; - + setup_opal_dev(dev, erase_steps); ret = next(dev); mutex_unlock(&dev->dev_lock); return ret; @@ -2064,23 +2056,17 @@ static int opal_secure_erase_locking_range(struct opal_dev *dev, static int opal_erase_locking_range(struct opal_dev *dev, struct opal_session_info *opal_session) { - void *data[3] = { NULL }; - static const opal_step erase_funcs[] = { - opal_discovery0, - start_auth_opal_session, - erase_locking_range, - end_opal_session, - NULL, + const struct opal_step erase_steps[] = { + { opal_discovery0, }, + { start_auth_opal_session, opal_session }, + { erase_locking_range, opal_session }, + { end_opal_session, }, + { NULL, } }; int ret; mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, erase_funcs); - - dev->func_data = data; - dev->func_data[1] = opal_session; - dev->func_data[2] = opal_session; - + setup_opal_dev(dev, erase_steps); ret = next(dev); mutex_unlock(&dev->dev_lock); return ret; @@ -2089,16 +2075,15 @@ static int opal_erase_locking_range(struct opal_dev *dev, static int opal_enable_disable_shadow_mbr(struct opal_dev *dev, struct opal_mbr_data *opal_mbr) { - void *func_data[6] = { NULL }; - static const opal_step mbr_funcs[] = { - opal_discovery0, - start_admin1LSP_opal_session, - set_mbr_done, - end_opal_session, - start_admin1LSP_opal_session, - set_mbr_enable_disable, - end_opal_session, - NULL, + const struct opal_step mbr_steps[] = { + { opal_discovery0, }, + { start_admin1LSP_opal_session, &opal_mbr->key }, + { set_mbr_done, &opal_mbr->enable_disable }, + { end_opal_session, }, + { start_admin1LSP_opal_session, &opal_mbr->key }, + { set_mbr_enable_disable, &opal_mbr->enable_disable }, + { end_opal_session, }, + { NULL, } }; int ret; @@ -2107,12 +2092,7 @@ static int opal_enable_disable_shadow_mbr(struct opal_dev *dev, return -EINVAL; mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, mbr_funcs); - dev->func_data = func_data; - dev->func_data[1] = &opal_mbr->key; - dev->func_data[2] = &opal_mbr->enable_disable; - dev->func_data[4] = &opal_mbr->key; - dev->func_data[5] = &opal_mbr->enable_disable; + setup_opal_dev(dev, mbr_steps); ret = next(dev); mutex_unlock(&dev->dev_lock); return ret; @@ -2139,13 +2119,12 @@ static int opal_save(struct opal_dev *dev, struct opal_lock_unlock *lk_unlk) static int opal_add_user_to_lr(struct opal_dev *dev, struct opal_lock_unlock *lk_unlk) { - void *func_data[3] = { NULL }; - static const opal_step funcs[] = { - opal_discovery0, - start_admin1LSP_opal_session, - add_user_to_lr, - end_opal_session, - NULL + const struct opal_step steps[] = { + { opal_discovery0, }, + { start_admin1LSP_opal_session, &lk_unlk->session.opal_key }, + { add_user_to_lr, lk_unlk }, + { end_opal_session, }, + { NULL, } }; int ret; @@ -2167,10 +2146,7 @@ static int opal_add_user_to_lr(struct opal_dev *dev, } mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, funcs); - dev->func_data = func_data; - dev->func_data[1] = &lk_unlk->session.opal_key; - dev->func_data[2] = lk_unlk; + setup_opal_dev(dev, steps); ret = next(dev); mutex_unlock(&dev->dev_lock); return ret; @@ -2178,55 +2154,54 @@ static int opal_add_user_to_lr(struct opal_dev *dev, static int opal_reverttper(struct opal_dev *dev, struct opal_key *opal) { - void *data[2] = { NULL }; - static const opal_step revert_funcs[] = { - opal_discovery0, - start_SIDASP_opal_session, - revert_tper, /* controller will terminate session */ - NULL, + const struct opal_step revert_steps[] = { + { opal_discovery0, }, + { start_SIDASP_opal_session, opal }, + { revert_tper, }, /* controller will terminate session */ + { NULL, } }; int ret; mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, revert_funcs); - dev->func_data = data; - dev->func_data[1] = opal; + setup_opal_dev(dev, revert_steps); ret = next(dev); mutex_unlock(&dev->dev_lock); - return ret; -} -static int __opal_lock_unlock_sum(struct opal_dev *dev) -{ - static const opal_step ulk_funcs_sum[] = { - opal_discovery0, - start_auth_opal_session, - lock_unlock_locking_range_sum, - end_opal_session, - NULL - }; + /* + * If we successfully reverted lets clean + * any saved locking ranges. + */ + if (!ret) + clean_opal_dev(dev); - dev->funcs = ulk_funcs_sum; - return next(dev); + return ret; } -static int __opal_lock_unlock(struct opal_dev *dev) +static int __opal_lock_unlock(struct opal_dev *dev, + struct opal_lock_unlock *lk_unlk) { - static const opal_step _unlock_funcs[] = { - opal_discovery0, - start_auth_opal_session, - lock_unlock_locking_range, - end_opal_session, - NULL + const struct opal_step unlock_steps[] = { + { opal_discovery0, }, + { start_auth_opal_session, &lk_unlk->session }, + { lock_unlock_locking_range, lk_unlk }, + { end_opal_session, }, + { NULL, } + }; + const struct opal_step unlock_sum_steps[] = { + { opal_discovery0, }, + { start_auth_opal_session, &lk_unlk->session }, + { lock_unlock_locking_range_sum, lk_unlk }, + { end_opal_session, }, + { NULL, } }; - dev->funcs = _unlock_funcs; + dev->steps = lk_unlk->session.sum ? unlock_sum_steps : unlock_steps; return next(dev); } -static int opal_lock_unlock(struct opal_dev *dev, struct opal_lock_unlock *lk_unlk) +static int opal_lock_unlock(struct opal_dev *dev, + struct opal_lock_unlock *lk_unlk) { - void *func_data[3] = { NULL }; int ret; if (lk_unlk->session.who < OPAL_ADMIN1 || @@ -2234,43 +2209,30 @@ static int opal_lock_unlock(struct opal_dev *dev, struct opal_lock_unlock *lk_un return -EINVAL; mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, NULL); - dev->func_data = func_data; - dev->func_data[1] = &lk_unlk->session; - dev->func_data[2] = lk_unlk; - - if (lk_unlk->session.sum) - ret = __opal_lock_unlock_sum(dev); - else - ret = __opal_lock_unlock(dev); - + ret = __opal_lock_unlock(dev, lk_unlk); mutex_unlock(&dev->dev_lock); return ret; } static int opal_take_ownership(struct opal_dev *dev, struct opal_key *opal) { - static const opal_step owner_funcs[] = { - opal_discovery0, - start_anybodyASP_opal_session, - get_msid_cpin_pin, - end_opal_session, - start_SIDASP_opal_session, - set_sid_cpin_pin, - end_opal_session, - NULL + const struct opal_step owner_steps[] = { + { opal_discovery0, }, + { start_anybodyASP_opal_session, }, + { get_msid_cpin_pin, }, + { end_opal_session, }, + { start_SIDASP_opal_session, opal }, + { set_sid_cpin_pin, opal }, + { end_opal_session, }, + { NULL, } }; - void *data[6] = { NULL }; int ret; if (!dev) return -ENODEV; mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, owner_funcs); - dev->func_data = data; - dev->func_data[4] = opal; - dev->func_data[5] = opal; + setup_opal_dev(dev, owner_steps); ret = next(dev); mutex_unlock(&dev->dev_lock); return ret; @@ -2278,14 +2240,13 @@ static int opal_take_ownership(struct opal_dev *dev, struct opal_key *opal) static int opal_activate_lsp(struct opal_dev *dev, struct opal_lr_act *opal_lr_act) { - void *data[4] = { NULL }; - static const opal_step active_funcs[] = { - opal_discovery0, - start_SIDASP_opal_session, /* Open session as SID auth */ - get_lsp_lifecycle, - activate_lsp, - end_opal_session, - NULL + const struct opal_step active_steps[] = { + { opal_discovery0, }, + { start_SIDASP_opal_session, &opal_lr_act->key }, + { get_lsp_lifecycle, }, + { activate_lsp, opal_lr_act }, + { end_opal_session, }, + { NULL, } }; int ret; @@ -2293,10 +2254,7 @@ static int opal_activate_lsp(struct opal_dev *dev, struct opal_lr_act *opal_lr_a return -EINVAL; mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, active_funcs); - dev->func_data = data; - dev->func_data[1] = &opal_lr_act->key; - dev->func_data[3] = opal_lr_act; + setup_opal_dev(dev, active_steps); ret = next(dev); mutex_unlock(&dev->dev_lock); return ret; @@ -2305,21 +2263,17 @@ static int opal_activate_lsp(struct opal_dev *dev, struct opal_lr_act *opal_lr_a static int opal_setup_locking_range(struct opal_dev *dev, struct opal_user_lr_setup *opal_lrs) { - void *data[3] = { NULL }; - static const opal_step lr_funcs[] = { - opal_discovery0, - start_auth_opal_session, - setup_locking_range, - end_opal_session, - NULL, + const struct opal_step lr_steps[] = { + { opal_discovery0, }, + { start_auth_opal_session, &opal_lrs->session }, + { setup_locking_range, opal_lrs }, + { end_opal_session, }, + { NULL, } }; int ret; mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, lr_funcs); - dev->func_data = data; - dev->func_data[1] = &opal_lrs->session; - dev->func_data[2] = opal_lrs; + setup_opal_dev(dev, lr_steps); ret = next(dev); mutex_unlock(&dev->dev_lock); return ret; @@ -2327,14 +2281,13 @@ static int opal_setup_locking_range(struct opal_dev *dev, static int opal_set_new_pw(struct opal_dev *dev, struct opal_new_pw *opal_pw) { - static const opal_step pw_funcs[] = { - opal_discovery0, - start_auth_opal_session, - set_new_pw, - end_opal_session, - NULL + const struct opal_step pw_steps[] = { + { opal_discovery0, }, + { start_auth_opal_session, &opal_pw->session }, + { set_new_pw, &opal_pw->new_user_pw }, + { end_opal_session, }, + { NULL } }; - void *data[3] = { NULL }; int ret; if (opal_pw->session.who < OPAL_ADMIN1 || @@ -2344,11 +2297,7 @@ static int opal_set_new_pw(struct opal_dev *dev, struct opal_new_pw *opal_pw) return -EINVAL; mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, pw_funcs); - dev->func_data = data; - dev->func_data[1] = (void *) &opal_pw->session; - dev->func_data[2] = (void *) &opal_pw->new_user_pw; - + setup_opal_dev(dev, pw_steps); ret = next(dev); mutex_unlock(&dev->dev_lock); return ret; @@ -2357,14 +2306,13 @@ static int opal_set_new_pw(struct opal_dev *dev, struct opal_new_pw *opal_pw) static int opal_activate_user(struct opal_dev *dev, struct opal_session_info *opal_session) { - static const opal_step act_funcs[] = { - opal_discovery0, - start_admin1LSP_opal_session, - internal_activate_user, - end_opal_session, - NULL + const struct opal_step act_steps[] = { + { opal_discovery0, }, + { start_admin1LSP_opal_session, &opal_session->opal_key }, + { internal_activate_user, opal_session }, + { end_opal_session, }, + { NULL, } }; - void *data[3] = { NULL }; int ret; /* We can't activate Admin1 it's active as manufactured */ @@ -2375,10 +2323,7 @@ static int opal_activate_user(struct opal_dev *dev, } mutex_lock(&dev->dev_lock); - setup_opal_dev(dev, act_funcs); - dev->func_data = data; - dev->func_data[1] = &opal_session->opal_key; - dev->func_data[2] = opal_session; + setup_opal_dev(dev, act_steps); ret = next(dev); mutex_unlock(&dev->dev_lock); return ret; @@ -2387,7 +2332,6 @@ static int opal_activate_user(struct opal_dev *dev, bool opal_unlock_from_suspend(struct opal_dev *dev) { struct opal_suspend_data *suspend; - void *func_data[3] = { NULL }; bool was_failure = false; int ret = 0; @@ -2398,19 +2342,12 @@ bool opal_unlock_from_suspend(struct opal_dev *dev) mutex_lock(&dev->dev_lock); setup_opal_dev(dev, NULL); - dev->func_data = func_data; list_for_each_entry(suspend, &dev->unlk_lst, node) { - dev->state = 0; - dev->func_data[1] = &suspend->unlk.session; - dev->func_data[2] = &suspend->unlk; dev->tsn = 0; dev->hsn = 0; - if (suspend->unlk.session.sum) - ret = __opal_lock_unlock_sum(dev); - else - ret = __opal_lock_unlock(dev); + ret = __opal_lock_unlock(dev, &suspend->unlk); if (ret) { pr_warn("Failed to unlock LR %hhu with sum %d\n", suspend->unlk.session.opal_key.lr, @@ -2437,7 +2374,7 @@ int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *arg) return -ENOTSUPP; } - p = memdup_user(arg, _IOC_SIZE(cmd)); + p = memdup_user(arg, _IOC_SIZE(cmd)); if (IS_ERR(p)) return PTR_ERR(p); |