diff options
author | Len Brown | 2008-10-22 23:57:26 -0400 |
---|---|---|
committer | Len Brown | 2008-10-23 00:11:07 -0400 |
commit | 057316cc6a5b521b332a1d7ccc871cd60c904c74 (patch) | |
tree | 4333e608da237c73ff69b10878025cca96dcb4c8 /drivers/ata/libata-eh.c | |
parent | 3e2dab9a1c2deb03c311eb3f83466009147ed4d3 (diff) | |
parent | 2515ddc6db8eb49a79f0fe5e67ff09ac7c81eab4 (diff) |
Merge branch 'linus' into test
Conflicts:
MAINTAINERS
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/sleep.c
drivers/acpi/Kconfig
drivers/pnp/Makefile
drivers/pnp/quirks.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/ata/libata-eh.c')
-rw-r--r-- | drivers/ata/libata-eh.c | 375 |
1 files changed, 342 insertions, 33 deletions
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index c1db2f234d2e..a93247cc395a 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -33,6 +33,7 @@ */ #include <linux/kernel.h> +#include <linux/blkdev.h> #include <linux/pci.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@ -79,6 +80,8 @@ enum { */ ATA_EH_PRERESET_TIMEOUT = 10000, ATA_EH_FASTDRAIN_INTERVAL = 3000, + + ATA_EH_UA_TRIES = 5, }; /* The following table determines how we sequence resets. Each entry @@ -457,29 +460,29 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, * RETURNS: * EH_HANDLED or EH_NOT_HANDLED */ -enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) +enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct ata_port *ap = ata_shost_to_port(host); unsigned long flags; struct ata_queued_cmd *qc; - enum scsi_eh_timer_return ret; + enum blk_eh_timer_return ret; DPRINTK("ENTER\n"); if (ap->ops->error_handler) { - ret = EH_NOT_HANDLED; + ret = BLK_EH_NOT_HANDLED; goto out; } - ret = EH_HANDLED; + ret = BLK_EH_HANDLED; spin_lock_irqsave(ap->lock, flags); qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) { WARN_ON(qc->scsicmd != cmd); qc->flags |= ATA_QCFLAG_EH_SCHEDULED; qc->err_mask |= AC_ERR_TIMEOUT; - ret = EH_NOT_HANDLED; + ret = BLK_EH_NOT_HANDLED; } spin_unlock_irqrestore(ap->lock, flags); @@ -831,7 +834,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) * Note that ATA_QCFLAG_FAILED is unconditionally set after * this function completes. */ - scsi_req_abort_cmd(qc->scsicmd); + blk_abort_request(qc->scsicmd->request); } /** @@ -1357,6 +1360,37 @@ static int ata_eh_read_log_10h(struct ata_device *dev, } /** + * atapi_eh_tur - perform ATAPI TEST_UNIT_READY + * @dev: target ATAPI device + * @r_sense_key: out parameter for sense_key + * + * Perform ATAPI TEST_UNIT_READY. + * + * LOCKING: + * EH context (may sleep). + * + * RETURNS: + * 0 on success, AC_ERR_* mask on failure. + */ +static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) +{ + u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; + struct ata_taskfile tf; + unsigned int err_mask; + + ata_tf_init(dev, &tf); + + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.command = ATA_CMD_PACKET; + tf.protocol = ATAPI_PROT_NODATA; + + err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); + if (err_mask == AC_ERR_DEV) + *r_sense_key = tf.feature >> 4; + return err_mask; +} + +/** * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE * @dev: device to perform REQUEST_SENSE to * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) @@ -1756,7 +1790,7 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) static unsigned int ata_eh_speed_down(struct ata_device *dev, unsigned int eflags, unsigned int err_mask) { - struct ata_link *link = dev->link; + struct ata_link *link = ata_dev_phys_link(dev); int xfer_ok = 0; unsigned int verdict; unsigned int action = 0; @@ -1880,7 +1914,8 @@ static void ata_eh_link_autopsy(struct ata_link *link) for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); - if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link) + if (!(qc->flags & ATA_QCFLAG_FAILED) || + ata_dev_phys_link(qc->dev) != link) continue; /* inherit upper level err_mask */ @@ -1967,6 +2002,23 @@ void ata_eh_autopsy(struct ata_port *ap) ata_port_for_each_link(link, ap) ata_eh_link_autopsy(link); + /* Handle the frigging slave link. Autopsy is done similarly + * but actions and flags are transferred over to the master + * link and handled from there. + */ + if (ap->slave_link) { + struct ata_eh_context *mehc = &ap->link.eh_context; + struct ata_eh_context *sehc = &ap->slave_link->eh_context; + + ata_eh_link_autopsy(ap->slave_link); + + ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); + mehc->i.action |= sehc->i.action; + mehc->i.dev_action[1] |= sehc->i.dev_action[1]; + mehc->i.flags |= sehc->i.flags; + ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); + } + /* Autopsy of fanout ports can affect host link autopsy. * Perform host link autopsy last. */ @@ -2001,7 +2053,8 @@ static void ata_eh_link_report(struct ata_link *link) for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); - if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link || + if (!(qc->flags & ATA_QCFLAG_FAILED) || + ata_dev_phys_link(qc->dev) != link || ((qc->flags & ATA_QCFLAG_QUIET) && qc->err_mask == AC_ERR_DEV)) continue; @@ -2068,7 +2121,7 @@ static void ata_eh_link_report(struct ata_link *link) char cdb_buf[70] = ""; if (!(qc->flags & ATA_QCFLAG_FAILED) || - qc->dev->link != link || !qc->err_mask) + ata_dev_phys_link(qc->dev) != link || !qc->err_mask) continue; if (qc->dma_dir != DMA_NONE) { @@ -2160,12 +2213,14 @@ void ata_eh_report(struct ata_port *ap) } static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, - unsigned int *classes, unsigned long deadline) + unsigned int *classes, unsigned long deadline, + bool clear_classes) { struct ata_device *dev; - ata_link_for_each_dev(dev, link) - classes[dev->devno] = ATA_DEV_UNKNOWN; + if (clear_classes) + ata_link_for_each_dev(dev, link) + classes[dev->devno] = ATA_DEV_UNKNOWN; return reset(link, classes, deadline); } @@ -2187,17 +2242,20 @@ int ata_eh_reset(struct ata_link *link, int classify, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { struct ata_port *ap = link->ap; + struct ata_link *slave = ap->slave_link; struct ata_eh_context *ehc = &link->eh_context; + struct ata_eh_context *sehc = &slave->eh_context; unsigned int *classes = ehc->classes; unsigned int lflags = link->flags; int verbose = !(ehc->i.flags & ATA_EHI_QUIET); int max_tries = 0, try = 0; + struct ata_link *failed_link; struct ata_device *dev; unsigned long deadline, now; ata_reset_fn_t reset; unsigned long flags; u32 sstatus; - int nr_known, rc; + int nr_unknown, rc; /* * Prepare to reset @@ -2252,8 +2310,30 @@ int ata_eh_reset(struct ata_link *link, int classify, } if (prereset) { - rc = prereset(link, - ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT)); + unsigned long deadline = ata_deadline(jiffies, + ATA_EH_PRERESET_TIMEOUT); + + if (slave) { + sehc->i.action &= ~ATA_EH_RESET; + sehc->i.action |= ehc->i.action; + } + + rc = prereset(link, deadline); + + /* If present, do prereset on slave link too. Reset + * is skipped iff both master and slave links report + * -ENOENT or clear ATA_EH_RESET. + */ + if (slave && (rc == 0 || rc == -ENOENT)) { + int tmp; + + tmp = prereset(slave, deadline); + if (tmp != -ENOENT) + rc = tmp; + + ehc->i.action |= sehc->i.action; + } + if (rc) { if (rc == -ENOENT) { ata_link_printk(link, KERN_DEBUG, @@ -2302,25 +2382,51 @@ int ata_eh_reset(struct ata_link *link, int classify, else ehc->i.flags |= ATA_EHI_DID_SOFTRESET; - rc = ata_do_reset(link, reset, classes, deadline); - if (rc && rc != -EAGAIN) + rc = ata_do_reset(link, reset, classes, deadline, true); + if (rc && rc != -EAGAIN) { + failed_link = link; goto fail; + } + + /* hardreset slave link if existent */ + if (slave && reset == hardreset) { + int tmp; + + if (verbose) + ata_link_printk(slave, KERN_INFO, + "hard resetting link\n"); + ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); + tmp = ata_do_reset(slave, reset, classes, deadline, + false); + switch (tmp) { + case -EAGAIN: + rc = -EAGAIN; + case 0: + break; + default: + failed_link = slave; + rc = tmp; + goto fail; + } + } + + /* perform follow-up SRST if necessary */ if (reset == hardreset && ata_eh_followup_srst_needed(link, rc, classes)) { - /* okay, let's do follow-up softreset */ reset = softreset; if (!reset) { ata_link_printk(link, KERN_ERR, "follow-up softreset required " "but no softreset avaliable\n"); + failed_link = link; rc = -EINVAL; goto fail; } ata_eh_about_to_do(link, NULL, ATA_EH_RESET); - rc = ata_do_reset(link, reset, classes, deadline); + rc = ata_do_reset(link, reset, classes, deadline, true); } } else { if (verbose) @@ -2341,7 +2447,7 @@ int ata_eh_reset(struct ata_link *link, int classify, dev->pio_mode = XFER_PIO_0; dev->flags &= ~ATA_DFLAG_SLEEPING; - if (ata_link_offline(link)) + if (ata_phys_link_offline(ata_dev_phys_link(dev))) continue; /* apply class override */ @@ -2354,6 +2460,8 @@ int ata_eh_reset(struct ata_link *link, int classify, /* record current link speed */ if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) link->sata_spd = (sstatus >> 4) & 0xf; + if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) + slave->sata_spd = (sstatus >> 4) & 0xf; /* thaw the port */ if (ata_is_host_link(link)) @@ -2366,12 +2474,17 @@ int ata_eh_reset(struct ata_link *link, int classify, * reset and here. This race is mediated by cross checking * link onlineness and classification result later. */ - if (postreset) + if (postreset) { postreset(link, classes); + if (slave) + postreset(slave, classes); + } /* clear cached SError */ spin_lock_irqsave(link->ap->lock, flags); link->eh_info.serror = 0; + if (slave) + slave->eh_info.serror = 0; spin_unlock_irqrestore(link->ap->lock, flags); /* Make sure onlineness and classification result correspond. @@ -2381,19 +2494,21 @@ int ata_eh_reset(struct ata_link *link, int classify, * link onlineness and classification result, those conditions * can be reliably detected and retried. */ - nr_known = 0; + nr_unknown = 0; ata_link_for_each_dev(dev, link) { /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ - if (classes[dev->devno] == ATA_DEV_UNKNOWN) + if (classes[dev->devno] == ATA_DEV_UNKNOWN) { classes[dev->devno] = ATA_DEV_NONE; - else - nr_known++; + if (ata_phys_link_online(ata_dev_phys_link(dev))) + nr_unknown++; + } } - if (classify && !nr_known && ata_link_online(link)) { + if (classify && nr_unknown) { if (try < max_tries) { ata_link_printk(link, KERN_WARNING, "link online but " "device misclassified, retrying\n"); + failed_link = link; rc = -EAGAIN; goto fail; } @@ -2404,6 +2519,8 @@ int ata_eh_reset(struct ata_link *link, int classify, /* reset successful, schedule revalidation */ ata_eh_done(link, NULL, ATA_EH_RESET); + if (slave) + ata_eh_done(slave, NULL, ATA_EH_RESET); ehc->last_reset = jiffies; ehc->i.action |= ATA_EH_REVALIDATE; @@ -2411,6 +2528,8 @@ int ata_eh_reset(struct ata_link *link, int classify, out: /* clear hotplug flag */ ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; + if (slave) + sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~ATA_PFLAG_RESETTING; @@ -2431,7 +2550,7 @@ int ata_eh_reset(struct ata_link *link, int classify, if (time_before(now, deadline)) { unsigned long delta = deadline - now; - ata_link_printk(link, KERN_WARNING, + ata_link_printk(failed_link, KERN_WARNING, "reset failed (errno=%d), retrying in %u secs\n", rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); @@ -2439,13 +2558,92 @@ int ata_eh_reset(struct ata_link *link, int classify, delta = schedule_timeout_uninterruptible(delta); } - if (rc == -EPIPE || try == max_tries - 1) + if (try == max_tries - 1) { sata_down_spd_limit(link); + if (slave) + sata_down_spd_limit(slave); + } else if (rc == -EPIPE) + sata_down_spd_limit(failed_link); + if (hardreset) reset = hardreset; goto retry; } +static inline void ata_eh_pull_park_action(struct ata_port *ap) +{ + struct ata_link *link; + struct ata_device *dev; + unsigned long flags; + + /* + * This function can be thought of as an extended version of + * ata_eh_about_to_do() specially crafted to accommodate the + * requirements of ATA_EH_PARK handling. Since the EH thread + * does not leave the do {} while () loop in ata_eh_recover as + * long as the timeout for a park request to *one* device on + * the port has not expired, and since we still want to pick + * up park requests to other devices on the same port or + * timeout updates for the same device, we have to pull + * ATA_EH_PARK actions from eh_info into eh_context.i + * ourselves at the beginning of each pass over the loop. + * + * Additionally, all write accesses to &ap->park_req_pending + * through INIT_COMPLETION() (see below) or complete_all() + * (see ata_scsi_park_store()) are protected by the host lock. + * As a result we have that park_req_pending.done is zero on + * exit from this function, i.e. when ATA_EH_PARK actions for + * *all* devices on port ap have been pulled into the + * respective eh_context structs. If, and only if, + * park_req_pending.done is non-zero by the time we reach + * wait_for_completion_timeout(), another ATA_EH_PARK action + * has been scheduled for at least one of the devices on port + * ap and we have to cycle over the do {} while () loop in + * ata_eh_recover() again. + */ + + spin_lock_irqsave(ap->lock, flags); + INIT_COMPLETION(ap->park_req_pending); + ata_port_for_each_link(link, ap) { + ata_link_for_each_dev(dev, link) { + struct ata_eh_info *ehi = &link->eh_info; + + link->eh_context.i.dev_action[dev->devno] |= + ehi->dev_action[dev->devno] & ATA_EH_PARK; + ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); + } + } + spin_unlock_irqrestore(ap->lock, flags); +} + +static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) +{ + struct ata_eh_context *ehc = &dev->link->eh_context; + struct ata_taskfile tf; + unsigned int err_mask; + + ata_tf_init(dev, &tf); + if (park) { + ehc->unloaded_mask |= 1 << dev->devno; + tf.command = ATA_CMD_IDLEIMMEDIATE; + tf.feature = 0x44; + tf.lbal = 0x4c; + tf.lbam = 0x4e; + tf.lbah = 0x55; + } else { + ehc->unloaded_mask &= ~(1 << dev->devno); + tf.command = ATA_CMD_CHK_POWER; + } + + tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; + tf.protocol |= ATA_PROT_NODATA; + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + if (park && (err_mask || tf.lbal != 0xc4)) { + ata_dev_printk(dev, KERN_ERR, "head unload failed!\n"); + ehc->unloaded_mask &= ~(1 << dev->devno); + } +} + static int ata_eh_revalidate_and_attach(struct ata_link *link, struct ata_device **r_failed_dev) { @@ -2472,7 +2670,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { WARN_ON(dev->class == ATA_DEV_PMP); - if (ata_link_offline(link)) { + if (ata_phys_link_offline(ata_dev_phys_link(dev))) { rc = -EIO; goto err; } @@ -2610,6 +2808,53 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) return rc; } +/** + * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset + * @dev: ATAPI device to clear UA for + * + * Resets and other operations can make an ATAPI device raise + * UNIT ATTENTION which causes the next operation to fail. This + * function clears UA. + * + * LOCKING: + * EH context (may sleep). + * + * RETURNS: + * 0 on success, -errno on failure. + */ +static int atapi_eh_clear_ua(struct ata_device *dev) +{ + int i; + + for (i = 0; i < ATA_EH_UA_TRIES; i++) { + u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; + u8 sense_key = 0; + unsigned int err_mask; + + err_mask = atapi_eh_tur(dev, &sense_key); + if (err_mask != 0 && err_mask != AC_ERR_DEV) { + ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY " + "failed (err_mask=0x%x)\n", err_mask); + return -EIO; + } + + if (!err_mask || sense_key != UNIT_ATTENTION) + return 0; + + err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); + if (err_mask) { + ata_dev_printk(dev, KERN_WARNING, "failed to clear " + "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); + return -EIO; + } + } + + ata_dev_printk(dev, KERN_WARNING, + "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); + + return 0; +} + static int ata_link_nr_enabled(struct ata_link *link) { struct ata_device *dev; @@ -2697,7 +2942,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) /* This is the last chance, better to slow * down than lose it. */ - sata_down_spd_limit(dev->link); + sata_down_spd_limit(ata_dev_phys_link(dev)); ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); } } @@ -2707,7 +2952,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) ata_dev_disable(dev); /* detach if offline */ - if (ata_link_offline(dev->link)) + if (ata_phys_link_offline(ata_dev_phys_link(dev))) ata_eh_detach_dev(dev); /* schedule probe if necessary */ @@ -2755,7 +3000,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, struct ata_device *dev; int nr_failed_devs; int rc; - unsigned long flags; + unsigned long flags, deadline; DPRINTK("ENTER\n"); @@ -2829,6 +3074,56 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, } } + do { + unsigned long now; + + /* + * clears ATA_EH_PARK in eh_info and resets + * ap->park_req_pending + */ + ata_eh_pull_park_action(ap); + + deadline = jiffies; + ata_port_for_each_link(link, ap) { + ata_link_for_each_dev(dev, link) { + struct ata_eh_context *ehc = &link->eh_context; + unsigned long tmp; + + if (dev->class != ATA_DEV_ATA) + continue; + if (!(ehc->i.dev_action[dev->devno] & + ATA_EH_PARK)) + continue; + tmp = dev->unpark_deadline; + if (time_before(deadline, tmp)) + deadline = tmp; + else if (time_before_eq(tmp, jiffies)) + continue; + if (ehc->unloaded_mask & (1 << dev->devno)) + continue; + + ata_eh_park_issue_cmd(dev, 1); + } + } + + now = jiffies; + if (time_before_eq(deadline, now)) + break; + + deadline = wait_for_completion_timeout(&ap->park_req_pending, + deadline - now); + } while (deadline); + ata_port_for_each_link(link, ap) { + ata_link_for_each_dev(dev, link) { + if (!(link->eh_context.unloaded_mask & + (1 << dev->devno))) + continue; + + ata_eh_park_issue_cmd(dev, 0); + ata_eh_done(link, dev, ATA_EH_PARK); + } + } + /* the rest */ ata_port_for_each_link(link, ap) { struct ata_eh_context *ehc = &link->eh_context; @@ -2852,6 +3147,20 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, ehc->i.flags &= ~ATA_EHI_SETMODE; } + /* If reset has been issued, clear UA to avoid + * disrupting the current users of the device. + */ + if (ehc->i.flags & ATA_EHI_DID_RESET) { + ata_link_for_each_dev(dev, link) { + if (dev->class != ATA_DEV_ATAPI) + continue; + rc = atapi_eh_clear_ua(dev); + if (rc) + goto dev_fail; + } + } + + /* configure link power saving */ if (ehc->i.action & ATA_EH_LPM) ata_link_for_each_dev(dev, link) ata_dev_enable_pm(dev, ap->pm_policy); |