diff options
author | Paul Mackerras | 2007-12-10 15:41:22 +1100 |
---|---|---|
committer | Paul Mackerras | 2007-12-10 15:41:22 +1100 |
commit | b242a60206881559bb3102110048762422e6b74e (patch) | |
tree | 86459efa47b9c3f69d865b4495beede9c4184003 /drivers | |
parent | e1fd18656c2963e383d67b7006c0e06c9c1d9c79 (diff) | |
parent | 94545baded0bfbabdc30a3a4cb48b3db479dd6ef (diff) |
Merge branch 'linux-2.6'
Diffstat (limited to 'drivers')
120 files changed, 2143 insertions, 1584 deletions
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c index a474ca2334d5..954ac8ce958a 100644 --- a/drivers/acpi/dispatcher/dsobject.c +++ b/drivers/acpi/dispatcher/dsobject.c @@ -137,6 +137,71 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } } + + /* Special object resolution for elements of a package */ + + if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || + (op->common.parent->common.aml_opcode == + AML_VAR_PACKAGE_OP)) { + /* + * Attempt to resolve the node to a value before we insert it into + * the package. If this is a reference to a common data type, + * resolve it immediately. According to the ACPI spec, package + * elements can only be "data objects" or method references. + * Attempt to resolve to an Integer, Buffer, String or Package. + * If cannot, return the named reference (for things like Devices, + * Methods, etc.) Buffer Fields and Fields will resolve to simple + * objects (int/buf/str/pkg). + * + * NOTE: References to things like Devices, Methods, Mutexes, etc. + * will remain as named references. This behavior is not described + * in the ACPI spec, but it appears to be an oversight. + */ + obj_desc = (union acpi_operand_object *)op->common.node; + + status = + acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR + (struct + acpi_namespace_node, + &obj_desc), + walk_state); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + switch (op->common.node->type) { + /* + * For these types, we need the actual node, not the subobject. + * However, the subobject got an extra reference count above. + */ + case ACPI_TYPE_MUTEX: + case ACPI_TYPE_METHOD: + case ACPI_TYPE_POWER: + case ACPI_TYPE_PROCESSOR: + case ACPI_TYPE_EVENT: + case ACPI_TYPE_REGION: + case ACPI_TYPE_DEVICE: + case ACPI_TYPE_THERMAL: + + obj_desc = + (union acpi_operand_object *)op->common. + node; + break; + + default: + break; + } + + /* + * If above resolved to an operand object, we are done. Otherwise, + * we have a NS node, we must create the package entry as a named + * reference. + */ + if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != + ACPI_DESC_TYPE_NAMED) { + goto exit; + } + } } /* Create and init a new internal ACPI object */ @@ -156,6 +221,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } + exit: *obj_desc_ptr = obj_desc; return_ACPI_STATUS(AE_OK); } @@ -356,12 +422,25 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, arg = arg->common.next; for (i = 0; arg && (i < element_count); i++) { if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { - - /* This package element is already built, just get it */ - - obj_desc->package.elements[i] = - ACPI_CAST_PTR(union acpi_operand_object, - arg->common.node); + if (arg->common.node->type == ACPI_TYPE_METHOD) { + /* + * A method reference "looks" to the parser to be a method + * invocation, so we special case it here + */ + arg->common.aml_opcode = AML_INT_NAMEPATH_OP; + status = + acpi_ds_build_internal_object(walk_state, + arg, + &obj_desc-> + package. + elements[i]); + } else { + /* This package element is already built, just get it */ + + obj_desc->package.elements[i] = + ACPI_CAST_PTR(union acpi_operand_object, + arg->common.node); + } } else { status = acpi_ds_build_internal_object(walk_state, arg, &obj_desc-> diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index b1fbee3f7fe1..2fe34cc73c13 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -531,6 +531,11 @@ static void acpi_processor_idle(void) case ACPI_STATE_C3: /* + * Must be done before busmaster disable as we might + * need to access HPET ! + */ + acpi_state_timer_broadcast(pr, cx, 1); + /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush @@ -557,7 +562,6 @@ static void acpi_processor_idle(void) /* Get start time (ticks) */ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); /* Invoke C3 */ - acpi_state_timer_broadcast(pr, cx, 1); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_cstate_enter(cx); @@ -1401,9 +1405,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, if (acpi_idle_suspend) return(acpi_idle_enter_c1(dev, state)); - if (pr->flags.bm_check) - acpi_idle_update_bm_rld(pr, cx); - local_irq_disable(); current_thread_info()->status &= ~TS_POLLING; /* @@ -1418,13 +1419,21 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, return 0; } + /* + * Must be done before busmaster disable as we might need to + * access HPET ! + */ + acpi_state_timer_broadcast(pr, cx, 1); + + if (pr->flags.bm_check) + acpi_idle_update_bm_rld(pr, cx); + if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); - acpi_state_timer_broadcast(pr, cx, 1); acpi_idle_do_entry(cx); t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index c26c61fb36c3..6742d7bc4777 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -29,6 +29,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> @@ -413,7 +414,7 @@ static int acpi_throttling_rdmsr(struct acpi_processor *pr, } else { msr_low = 0; msr_high = 0; - rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, + rdmsr_safe(MSR_IA32_THERM_CONTROL, (u32 *)&msr_low , (u32 *) &msr_high); msr = (msr_high << 32) | msr_low; *value = (acpi_integer) msr; @@ -438,7 +439,7 @@ static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value) "HARDWARE addr space,NOT supported yet\n"); } else { msr = value; - wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, + wrmsr_safe(MSR_IA32_THERM_CONTROL, msr & 0xffffffff, msr >> 32); ret = 0; } @@ -572,21 +573,32 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) return -ENODEV; pr->throttling.state = 0; - local_irq_disable(); + value = 0; ret = acpi_read_throttling_status(pr, &value); if (ret >= 0) { state = acpi_get_throttling_state(pr, value); pr->throttling.state = state; } - local_irq_enable(); return 0; } static int acpi_processor_get_throttling(struct acpi_processor *pr) { - return pr->throttling.acpi_processor_get_throttling(pr); + cpumask_t saved_mask; + int ret; + + /* + * Migrate task to the cpu pointed by pr. + */ + saved_mask = current->cpus_allowed; + set_cpus_allowed(current, cpumask_of_cpu(pr->id)); + ret = pr->throttling.acpi_processor_get_throttling(pr); + /* restore the previous state */ + set_cpus_allowed(current, saved_mask); + + return ret; } static int acpi_processor_get_fadt_info(struct acpi_processor *pr) @@ -717,21 +729,29 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, if (state < pr->throttling_platform_limit) return -EPERM; - local_irq_disable(); value = 0; ret = acpi_get_throttling_value(pr, state, &value); if (ret >= 0) { acpi_write_throttling_state(pr, value); pr->throttling.state = state; } - local_irq_enable(); return 0; } int acpi_processor_set_throttling(struct acpi_processor *pr, int state) { - return pr->throttling.acpi_processor_set_throttling(pr, state); + cpumask_t saved_mask; + int ret; + /* + * Migrate task to the cpu pointed by pr. + */ + saved_mask = current->cpus_allowed; + set_cpus_allowed(current, cpumask_of_cpu(pr->id)); + ret = pr->throttling.acpi_processor_set_throttling(pr, state); + /* restore the previous state */ + set_cpus_allowed(current, saved_mask); + return ret; } int acpi_processor_get_throttling_info(struct acpi_processor *pr) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index ed9b407e42d4..54f38c21dd95 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -193,6 +193,8 @@ enum { ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | ATA_FLAG_IPM, AHCI_LFLAG_COMMON = ATA_LFLAG_SKIP_D2H_BSY, + + ICH_MAP = 0x90, /* ICH MAP register */ }; struct ahci_cmd_hdr { @@ -536,6 +538,10 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ @@ -1267,9 +1273,9 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, /* prepare for SRST (AHCI-1.1 10.4.1) */ rc = ahci_kick_engine(ap, 1); - if (rc) + if (rc && rc != -EOPNOTSUPP) ata_link_printk(link, KERN_WARNING, - "failed to reset engine (errno=%d)", rc); + "failed to reset engine (errno=%d)\n", rc); ata_tf_init(link->device, &tf); @@ -1634,7 +1640,7 @@ static void ahci_port_intr(struct ata_port *ap) struct ahci_host_priv *hpriv = ap->host->private_data; int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); u32 status, qc_active; - int rc, known_irq = 0; + int rc; status = readl(port_mmio + PORT_IRQ_STAT); writel(status, port_mmio + PORT_IRQ_STAT); @@ -1692,80 +1698,12 @@ static void ahci_port_intr(struct ata_port *ap) rc = ata_qc_complete_multiple(ap, qc_active, NULL); - /* If resetting, spurious or invalid completions are expected, - * return unconditionally. - */ - if (resetting) - return; - - if (rc > 0) - return; - if (rc < 0) { + /* while resetting, invalid completions are expected */ + if (unlikely(rc < 0 && !resetting)) { ehi->err_mask |= AC_ERR_HSM; ehi->action |= ATA_EH_SOFTRESET; ata_port_freeze(ap); - return; } - - /* hmmm... a spurious interrupt */ - - /* if !NCQ, ignore. No modern ATA device has broken HSM - * implementation for non-NCQ commands. - */ - if (!ap->link.sactive) - return; - - if (status & PORT_IRQ_D2H_REG_FIS) { - if (!pp->ncq_saw_d2h) - ata_port_printk(ap, KERN_INFO, - "D2H reg with I during NCQ, " - "this message won't be printed again\n"); - pp->ncq_saw_d2h = 1; - known_irq = 1; - } - - if (status & PORT_IRQ_DMAS_FIS) { - if (!pp->ncq_saw_dmas) - ata_port_printk(ap, KERN_INFO, - "DMAS FIS during NCQ, " - "this message won't be printed again\n"); - pp->ncq_saw_dmas = 1; - known_irq = 1; - } - - if (status & PORT_IRQ_SDB_FIS) { - const __le32 *f = pp->rx_fis + RX_FIS_SDB; - - if (le32_to_cpu(f[1])) { - /* SDB FIS containing spurious completions - * might be dangerous, whine and fail commands - * with HSM violation. EH will turn off NCQ - * after several such failures. - */ - ata_ehi_push_desc(ehi, - "spurious completions during NCQ " - "issue=0x%x SAct=0x%x FIS=%08x:%08x", - readl(port_mmio + PORT_CMD_ISSUE), - readl(port_mmio + PORT_SCR_ACT), - le32_to_cpu(f[0]), le32_to_cpu(f[1])); - ehi->err_mask |= AC_ERR_HSM; - ehi->action |= ATA_EH_SOFTRESET; - ata_port_freeze(ap); - } else { - if (!pp->ncq_saw_sdb) - ata_port_printk(ap, KERN_INFO, - "spurious SDB FIS %08x:%08x during NCQ, " - "this message won't be printed again\n", - le32_to_cpu(f[0]), le32_to_cpu(f[1])); - pp->ncq_saw_sdb = 1; - } - known_irq = 1; - } - - if (!known_irq) - ata_port_printk(ap, KERN_INFO, "spurious interrupt " - "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n", - status, ap->link.active_tag, ap->link.sactive); } static void ahci_irq_clear(struct ata_port *ap) @@ -2269,6 +2207,22 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == 0x2652 || pdev->device == 0x2653)) { + u8 map; + + /* ICH6s share the same PCI ID for both piix and ahci + * modes. Enabling ahci mode while MAP indicates + * combined mode is a bad idea. Yield to ata_piix. + */ + pci_read_config_byte(pdev, ICH_MAP, &map); + if (map & 0x3) { + dev_printk(KERN_INFO, &pdev->dev, "controller is in " + "combined mode, can't enable AHCI mode\n"); + return -ENODEV; + } + } + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) return -ENOMEM; diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 483269db2c7d..bb62a588f489 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -967,6 +967,20 @@ static int piix_broken_suspend(void) }, }, { + .ident = "TECRA M3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"), + }, + }, + { + .ident = "TECRA M4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M4"), + }, + }, + { .ident = "TECRA M5", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), @@ -981,6 +995,20 @@ static int piix_broken_suspend(void) }, }, { + .ident = "TECRA A8", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A8"), + }, + }, + { + .ident = "Satellite R25", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R25"), + }, + }, + { .ident = "Satellite U200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), @@ -988,6 +1016,13 @@ static int piix_broken_suspend(void) }, }, { + .ident = "Satellite U200", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U200"), + }, + }, + { .ident = "Satellite Pro U200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 33f06277b3be..e4dea8623a71 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4140,6 +4140,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Devices where NCQ should be avoided */ /* NCQ is slow */ { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, + { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, /* http://thread.gmane.org/gmane.linux.ide/14907 */ { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, /* NCQ is broken */ @@ -4154,23 +4155,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, - /* Drives which do spurious command completion */ - { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, }, - { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, - { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, }, - { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, - { "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, }, - { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, - { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, }, - { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, - { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, }, - { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, - { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, }, - { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ, }, - { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, }, - { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, }, - { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, }, - { "Maxtor 7V300F0", "VA111900", ATA_HORKAGE_NONCQ, }, /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, @@ -4185,6 +4169,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Devices which get the IVB wrong */ { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, }, + { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, }, + { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, }, + { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, }, /* End Marker */ { } @@ -6964,12 +6951,11 @@ int ata_host_start(struct ata_host *host) if (ap->ops->port_start) { rc = ap->ops->port_start(ap); if (rc) { - ata_port_printk(ap, KERN_ERR, "failed to " - "start port (errno=%d)\n", rc); + if (rc != -ENODEV) + dev_printk(KERN_ERR, host->dev, "failed to start port %d (errno=%d)\n", i, rc); goto err_out; } } - ata_eh_freeze_port(ap); } diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 0dac69db1fdf..e6605f038647 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1850,30 +1850,54 @@ static void ata_eh_link_report(struct ata_link *link) ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - static const char *dma_str[] = { - [DMA_BIDIRECTIONAL] = "bidi", - [DMA_TO_DEVICE] = "out", - [DMA_FROM_DEVICE] = "in", - [DMA_NONE] = "", - }; struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; + const u8 *cdb = qc->cdb; + char data_buf[20] = ""; + char cdb_buf[70] = ""; if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link || !qc->err_mask) continue; + if (qc->dma_dir != DMA_NONE) { + static const char *dma_str[] = { + [DMA_BIDIRECTIONAL] = "bidi", + [DMA_TO_DEVICE] = "out", + [DMA_FROM_DEVICE] = "in", + }; + static const char *prot_str[] = { + [ATA_PROT_PIO] = "pio", + [ATA_PROT_DMA] = "dma", + [ATA_PROT_NCQ] = "ncq", + [ATA_PROT_ATAPI] = "pio", + [ATA_PROT_ATAPI_DMA] = "dma", + }; + + snprintf(data_buf, sizeof(data_buf), " %s %u %s", + prot_str[qc->tf.protocol], qc->nbytes, + dma_str[qc->dma_dir]); + } + + if (is_atapi_taskfile(&qc->tf)) + snprintf(cdb_buf, sizeof(cdb_buf), + "cdb %02x %02x %02x %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x %02x %02x %02x %02x\n ", + cdb[0], cdb[1], cdb[2], cdb[3], + cdb[4], cdb[5], cdb[6], cdb[7], + cdb[8], cdb[9], cdb[10], cdb[11], + cdb[12], cdb[13], cdb[14], cdb[15]); + ata_dev_printk(qc->dev, KERN_ERR, "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " - "tag %d cdb 0x%x data %u %s\n " + "tag %d%s\n %s" "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " "Emask 0x%x (%s)%s\n", cmd->command, cmd->feature, cmd->nsect, cmd->lbal, cmd->lbam, cmd->lbah, cmd->hob_feature, cmd->hob_nsect, cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, - cmd->device, qc->tag, qc->cdb[0], qc->nbytes, - dma_str[qc->dma_dir], + cmd->device, qc->tag, data_buf, cdb_buf, res->command, res->feature, res->nsect, res->lbal, res->lbam, res->lbah, res->hob_feature, res->hob_nsect, diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index c5779ad4abca..3cc27b514654 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -25,7 +25,7 @@ #include <linux/libata.h> #define DRV_NAME "pata_amd" -#define DRV_VERSION "0.3.9" +#define DRV_VERSION "0.3.10" /** * timing_setup - shared timing computation and load @@ -115,7 +115,8 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse } /* UDMA timing */ - pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t); + if (at.udma) + pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t); } /** diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c index bb250a48e27c..67e574de31e8 100644 --- a/drivers/ata/pata_at32.c +++ b/drivers/ata/pata_at32.c @@ -28,7 +28,7 @@ #include <asm/arch/smc.h> #define DRV_NAME "pata_at32" -#define DRV_VERSION "0.0.2" +#define DRV_VERSION "0.0.3" /* * CompactFlash controller memory layout relative to the base address: @@ -64,6 +64,8 @@ * Mode 2 | 8.3 | 240 ns | 0x07 * Mode 3 | 11.1 | 180 ns | 0x0f * Mode 4 | 16.7 | 120 ns | 0x1f + * + * Alter PIO_MASK below according to table to set maximal PIO mode. */ #define PIO_MASK (0x1f) @@ -85,36 +87,40 @@ struct at32_ide_info { */ static int pata_at32_setup_timing(struct device *dev, struct at32_ide_info *info, - const struct ata_timing *timing) + const struct ata_timing *ata) { - /* These two values are found through testing */ - const int min_recover = 25; - const int ncs_hold = 15; - struct smc_config *smc = &info->smc; + struct smc_timing timing; int active; int recover; + memset(&timing, 0, sizeof(struct smc_timing)); + /* Total cycle time */ - smc->read_cycle = timing->cyc8b; + timing.read_cycle = ata->cyc8b; /* DIOR <= CFIOR timings */ - smc->nrd_setup = timing->setup; - smc->nrd_pulse = timing->act8b; + timing.nrd_setup = ata->setup; + timing.nrd_pulse = ata->act8b; + timing.nrd_recover = ata->rec8b; + + /* Convert nanosecond timing to clock cycles */ + smc_set_timing(smc, &timing); - /* Compute recover, extend total cycle if needed */ - active = smc->nrd_setup + smc->nrd_pulse; + /* Add one extra cycle setup due to signal ring */ + smc->nrd_setup = smc->nrd_setup + 1; + + active = smc->nrd_setup + smc->nrd_pulse; recover = smc->read_cycle - active; - if (recover < min_recover) { - smc->read_cycle = active + min_recover; - recover = min_recover; - } + /* Need at least two cycles recovery */ + if (recover < 2) + smc->read_cycle = active + 2; /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */ - smc->ncs_read_setup = 0; - smc->ncs_read_pulse = active + ncs_hold; + smc->ncs_read_setup = 1; + smc->ncs_read_pulse = smc->read_cycle - 2; /* Write timings same as read timings */ smc->write_cycle = smc->read_cycle; @@ -123,11 +129,13 @@ static int pata_at32_setup_timing(struct device *dev, smc->ncs_write_setup = smc->ncs_read_setup; smc->ncs_write_pulse = smc->ncs_read_pulse; - /* Do some debugging output */ - dev_dbg(dev, "SMC: C=%d S=%d P=%d R=%d NCSS=%d NCSP=%d NCSR=%d\n", + /* Do some debugging output of ATA and SMC timings */ + dev_dbg(dev, "ATA: C=%d S=%d P=%d R=%d\n", + ata->cyc8b, ata->setup, ata->act8b, ata->rec8b); + + dev_dbg(dev, "SMC: C=%d S=%d P=%d NS=%d NP=%d\n", smc->read_cycle, smc->nrd_setup, smc->nrd_pulse, - recover, smc->ncs_read_setup, smc->ncs_read_pulse, - smc->read_cycle - smc->ncs_read_pulse); + smc->ncs_read_setup, smc->ncs_read_pulse); /* Finally, configure the SMC */ return smc_set_configuration(info->cs, smc); @@ -182,7 +190,6 @@ static struct scsi_host_template at32_sht = { }; static struct ata_port_operations at32_port_ops = { - .port_disable = ata_port_disable, .set_piomode = pata_at32_set_piomode, .tf_load = ata_tf_load, .tf_read = ata_tf_read, @@ -203,7 +210,6 @@ static struct ata_port_operations at32_port_ops = { .irq_clear = pata_at32_irq_clear, .irq_on = ata_irq_on, - .irq_ack = ata_irq_ack, .port_start = ata_sff_port_start, }; @@ -223,8 +229,7 @@ static int __init pata_at32_init_one(struct device *dev, /* Setup ATA bindings */ ap->ops = &at32_port_ops; ap->pio_mask = PIO_MASK; - ap->flags = ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS - | ATA_FLAG_PIO_POLLING; + ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS; /* * Since all 8-bit taskfile transfers has to go on the lower @@ -357,12 +362,12 @@ static int __init pata_at32_probe(struct platform_device *pdev) info->smc.tdf_mode = 0; /* TDF optimization disabled */ info->smc.tdf_cycles = 0; /* No TDF wait cycles */ - /* Setup ATA timing */ + /* Setup SMC to ATA timing */ ret = pata_at32_setup_timing(dev, info, &initial_timing); if (ret) goto err_setup_timing; - /* Setup ATA addresses */ + /* Map ATA address space */ ret = -ENOMEM; info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16); info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16); @@ -373,7 +378,7 @@ static int __init pata_at32_probe(struct platform_device *pdev) pata_at32_debug_bus(dev, info); #endif - /* Register ATA device */ + /* Setup and register ATA device */ ret = pata_at32_init_one(dev, info); if (ret) goto err_ata_device; diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index 81db405a5445..088a41f4e656 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c @@ -1489,6 +1489,8 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev) int board_idx = 0; struct resource *res; struct ata_host *host; + unsigned int fsclk = get_sclk(); + int udma_mode = 5; const struct ata_port_info *ppi[] = { &bfin_port_info[board_idx], NULL }; @@ -1507,6 +1509,11 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev) if (res == NULL) return -EINVAL; + while (bfin_port_info[board_idx].udma_mask>0 && udma_fsclk[udma_mode] > fsclk) { + udma_mode--; + bfin_port_info[board_idx].udma_mask >>= 1; + } + /* * Now that that's out of the way, wire up the port.. */ diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index a4175fbdd170..453d72bf2598 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -63,7 +63,7 @@ #include <linux/dmi.h> #define DRV_NAME "pata_via" -#define DRV_VERSION "0.3.2" +#define DRV_VERSION "0.3.3" /* * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx @@ -296,7 +296,7 @@ static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mo } /* Set UDMA unless device is not UDMA capable */ - if (udma_type) { + if (udma_type && t.udma) { u8 cable80_status; /* Get 80-wire cable detection bit */ diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index a43f64d2775b..fe0105d35bae 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -164,10 +164,14 @@ enum { MV_PCI_ERR_ATTRIBUTE = 0x1d48, MV_PCI_ERR_COMMAND = 0x1d50, - PCI_IRQ_CAUSE_OFS = 0x1d58, - PCI_IRQ_MASK_OFS = 0x1d5c, + PCI_IRQ_CAUSE_OFS = 0x1d58, + PCI_IRQ_MASK_OFS = 0x1d5c, PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ + PCIE_IRQ_CAUSE_OFS = 0x1900, + PCIE_IRQ_MASK_OFS = 0x1910, + PCIE_UNMASK_ALL_IRQS = 0x70a, /* assorted bits */ + HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, HC_MAIN_IRQ_MASK_OFS = 0x1d64, PORT0_ERR = (1 << 0), /* shift by port # */ @@ -303,6 +307,7 @@ enum { MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ + MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ @@ -388,7 +393,15 @@ struct mv_port_signal { u32 pre; }; -struct mv_host_priv; +struct mv_host_priv { + u32 hp_flags; + struct mv_port_signal signal[8]; + const struct mv_hw_ops *ops; + u32 irq_cause_ofs; + u32 irq_mask_ofs; + u32 unmask_all_irqs; +}; + struct mv_hw_ops { void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); @@ -401,12 +414,6 @@ struct mv_hw_ops { void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio); }; -struct mv_host_priv { - u32 hp_flags; - struct mv_port_signal signal[8]; - const struct mv_hw_ops *ops; -}; - static void mv_irq_clear(struct ata_port *ap); static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); @@ -631,11 +638,13 @@ static const struct pci_device_id mv_pci_tbl[] = { /* Adaptec 1430SA */ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, - { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, - - /* add Marvell 7042 support */ + /* Marvell 7042 support */ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, + /* Highpoint RocketRAID PCIe series */ + { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, + { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, + { } /* terminate list */ }; @@ -1648,13 +1657,14 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) static void mv_pci_error(struct ata_host *host, void __iomem *mmio) { + struct mv_host_priv *hpriv = host->private_data; struct ata_port *ap; struct ata_queued_cmd *qc; struct ata_eh_info *ehi; unsigned int i, err_mask, printed = 0; u32 err_cause; - err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS); + err_cause = readl(mmio + hpriv->irq_cause_ofs); dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause); @@ -1662,7 +1672,7 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio) DPRINTK("All regs @ PCI error\n"); mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); - writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); + writelfl(0, mmio + hpriv->irq_cause_ofs); for (i = 0; i < host->n_ports; i++) { ap = host->ports[i]; @@ -1926,6 +1936,8 @@ static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, #define ZERO(reg) writel(0, mmio + (reg)) static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) { + struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct mv_host_priv *hpriv = host->private_data; u32 tmp; tmp = readl(mmio + MV_PCI_MODE); @@ -1937,8 +1949,8 @@ static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); ZERO(HC_MAIN_IRQ_MASK_OFS); ZERO(MV_PCI_SERR_MASK); - ZERO(PCI_IRQ_CAUSE_OFS); - ZERO(PCI_IRQ_MASK_OFS); + ZERO(hpriv->irq_cause_ofs); + ZERO(hpriv->irq_mask_ofs); ZERO(MV_PCI_ERR_LOW_ADDRESS); ZERO(MV_PCI_ERR_HIGH_ADDRESS); ZERO(MV_PCI_ERR_ATTRIBUTE); @@ -2170,7 +2182,7 @@ static void mv_phy_reset(struct ata_port *ap, unsigned int *class, mv_scr_read(ap, SCR_ERROR, &serror); mv_scr_read(ap, SCR_CONTROL, &scontrol); DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " - "SCtrl 0x%08x\n", status, serror, scontrol); + "SCtrl 0x%08x\n", sstatus, serror, scontrol); } #endif @@ -2490,6 +2502,16 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) break; case chip_7042: + hp_flags |= MV_HP_PCIE; + if (pdev->vendor == PCI_VENDOR_ID_TTI && + (pdev->device == 0x2300 || pdev->device == 0x2310)) + { + printk(KERN_WARNING "sata_mv: Highpoint RocketRAID BIOS" + " will CORRUPT DATA on attached drives when" + " configured as \"Legacy\". BEWARE!\n"); + printk(KERN_WARNING "sata_mv: Use BIOS \"JBOD\" volumes" + " instead for safety.\n"); + } case chip_6042: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_IIE; @@ -2516,6 +2538,15 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) } hpriv->hp_flags = hp_flags; + if (hp_flags & MV_HP_PCIE) { + hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS; + hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS; + hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; + } else { + hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS; + hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS; + hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; + } return 0; } @@ -2595,10 +2626,10 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) } /* Clear any currently outstanding host interrupt conditions */ - writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); + writelfl(0, mmio + hpriv->irq_cause_ofs); /* and unmask interrupt generation for host regs */ - writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); + writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); if (IS_GEN_I(hpriv)) writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS); @@ -2609,8 +2640,8 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) "PCI int cause/mask=0x%08x/0x%08x\n", readl(mmio + HC_MAIN_IRQ_CAUSE_OFS), readl(mmio + HC_MAIN_IRQ_MASK_OFS), - readl(mmio + PCI_IRQ_CAUSE_OFS), - readl(mmio + PCI_IRQ_MASK_OFS)); + readl(mmio + hpriv->irq_cause_ofs), + readl(mmio + hpriv->irq_mask_ofs)); done: return rc; diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 44f9e5d9e362..ed5dc7cb50cd 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -791,11 +791,13 @@ static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc) static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { - /* Since commands where a result TF is requested are not - executed in ADMA mode, the only time this function will be called - in ADMA mode will be if a command fails. In this case we - don't care about going into register mode with ADMA commands - pending, as the commands will all shortly be aborted anyway. */ + /* Other than when internal or pass-through commands are executed, + the only time this function will be called in ADMA mode will be + if a command fails. In the failure case we don't care about going + into register mode with ADMA commands pending, as the commands will + all shortly be aborted anyway. We assume that NCQ commands are not + issued via passthrough, which is the only way that switching into + ADMA mode could abort outstanding commands. */ nv_adma_register_mode(ap); ata_tf_read(ap, tf); @@ -1359,11 +1361,9 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) struct nv_adma_port_priv *pp = qc->ap->private_data; /* ADMA engine can only be used for non-ATAPI DMA commands, - or interrupt-driven no-data commands, where a result taskfile - is not required. */ + or interrupt-driven no-data commands. */ if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || - (qc->tf.flags & ATA_TFLAG_POLLING) || - (qc->flags & ATA_QCFLAG_RESULT_TF)) + (qc->tf.flags & ATA_TFLAG_POLLING)) return 1; if ((qc->flags & ATA_QCFLAG_DMAMAP) || @@ -1381,6 +1381,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) NV_CPB_CTL_IEN; if (nv_adma_use_reg_mode(qc)) { + BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && + (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); ata_qc_prep(qc); return; @@ -1425,9 +1427,21 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) VPRINTK("ENTER\n"); + /* We can't handle result taskfile with NCQ commands, since + retrieving the taskfile switches us out of ADMA mode and would abort + existing commands. */ + if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && + (qc->flags & ATA_QCFLAG_RESULT_TF))) { + ata_dev_printk(qc->dev, KERN_ERR, + "NCQ w/ RESULT_TF not allowed\n"); + return AC_ERR_SYSTEM; + } + if (nv_adma_use_reg_mode(qc)) { /* use ATA register mode */ VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); + BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && + (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); return ata_qc_issue_prot(qc); } else diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index c99e43b837f5..17d54315e146 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c @@ -295,7 +295,6 @@ static int apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) { struct apm_user *as = filp->private_data; - unsigned long flags; int err = -EINVAL; if (!as->suser || !as->writer) @@ -331,10 +330,16 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) * Wait for the suspend/resume to complete. If there * are pending acknowledges, we wait here for them. */ - flags = current->flags; + freezer_do_not_count(); wait_event(apm_suspend_waitqueue, as->suspend_state == SUSPEND_DONE); + + /* + * Since we are waiting until the suspend is done, the + * try_to_freeze() in freezer_count() will not trigger + */ + freezer_count(); } else { as->suspend_state = SUSPEND_WAIT; mutex_unlock(&state_lock); @@ -362,14 +367,10 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) * Wait for the suspend/resume to complete. If there * are pending acknowledges, we wait here for them. */ - flags = current->flags; - - wait_event_interruptible(apm_suspend_waitqueue, + wait_event_freezable(apm_suspend_waitqueue, as->suspend_state == SUSPEND_DONE); } - current->flags = flags; - mutex_lock(&state_lock); err = as->suspend_result; as->suspend_state = SUSPEND_NONE; diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c index fe6d2407baed..c2d23cae9515 100644 --- a/drivers/char/cs5535_gpio.c +++ b/drivers/char/cs5535_gpio.c @@ -104,6 +104,11 @@ static ssize_t cs5535_gpio_write(struct file *file, const char __user *data, for (j = 0; j < ARRAY_SIZE(rm); j++) { if (c == rm[j].on) { outl(m1, base + rm[j].wr_offset); + /* If enabling output, turn off AUX 1 and AUX 2 */ + if (c == 'O') { + outl(m0, base + 0x10); + outl(m0, base + 0x14); + } break; } else if (c == rm[j].off) { outl(m0, base + rm[j].wr_offset); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5fd6688a444a..ddd3a259cea1 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -12,7 +12,7 @@ if CRYPTO_HW config CRYPTO_DEV_PADLOCK tristate "Support for VIA PadLock ACE" - depends on X86_32 + depends on X86_32 && !UML select CRYPTO_ALGAPI help Some VIA processors come with an integrated crypto engine diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 5c82ec7f8bbd..3ee60d26e3a2 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -337,11 +337,10 @@ static int coretemp_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: - case CPU_ONLINE_FROZEN: + case CPU_DOWN_FAILED: coretemp_device_add(cpu); break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: + case CPU_DOWN_PREPARE: coretemp_device_remove(cpu); break; } diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index f59aecf5ec15..fd9c5d51870a 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -267,13 +267,12 @@ static int ads7846_read12_ser(struct device *dev, unsigned command) ts->irq_disabled = 0; enable_irq(spi->irq); - if (req->msg.status) - status = req->msg.status; - - /* on-wire is a must-ignore bit, a BE12 value, then padding */ - sample = be16_to_cpu(req->sample); - sample = sample >> 3; - sample &= 0x0fff; + if (status == 0) { + /* on-wire is a must-ignore bit, a BE12 value, then padding */ + sample = be16_to_cpu(req->sample); + sample = sample >> 3; + sample &= 0x0fff; + } kfree(req); return status ? status : sample; diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index c6df2925ebd0..d6952959d72a 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1515,6 +1515,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; + iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; @@ -1599,6 +1600,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; + iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) @@ -1643,7 +1645,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) } else { p = (char __user *) iocts.arg; for (i = 0; i < 10; i++) { - sprintf(bname, "%s%s", + snprintf(bname, sizeof(bname), "%s%s", strlen(dev->drv[drvidx]->msn2eaz[i]) ? dev->drv[drvidx]->msn2eaz[i] : "_", (i < 9) ? "," : "\0"); @@ -1673,6 +1675,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; + iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 4211293ce862..ba8b04b03b9f 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -111,7 +111,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) write_unlock(&leds_list_lock); #ifdef CONFIG_LEDS_TRIGGERS - rwlock_init(&led_cdev->trigger_lock); + init_rwsem(&led_cdev->trigger_lock); rc = device_create_file(led_cdev->dev, &dev_attr_trigger); if (rc) @@ -147,10 +147,10 @@ void led_classdev_unregister(struct led_classdev *led_cdev) device_remove_file(led_cdev->dev, &dev_attr_brightness); #ifdef CONFIG_LEDS_TRIGGERS device_remove_file(led_cdev->dev, &dev_attr_trigger); - write_lock(&led_cdev->trigger_lock); + down_write(&led_cdev->trigger_lock); if (led_cdev->trigger) led_trigger_set(led_cdev, NULL); - write_unlock(&led_cdev->trigger_lock); + up_write(&led_cdev->trigger_lock); #endif device_unregister(led_cdev->dev); diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index 575368c2b100..0bdb786210b1 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -19,13 +19,14 @@ #include <linux/device.h> #include <linux/sysdev.h> #include <linux/timer.h> +#include <linux/rwsem.h> #include <linux/leds.h> #include "leds.h" /* * Nests outside led_cdev->trigger_lock */ -static DEFINE_RWLOCK(triggers_list_lock); +static DECLARE_RWSEM(triggers_list_lock); static LIST_HEAD(trigger_list); ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, @@ -44,24 +45,24 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, trigger_name[len - 1] = '\0'; if (!strcmp(trigger_name, "none")) { - write_lock(&led_cdev->trigger_lock); + down_write(&led_cdev->trigger_lock); led_trigger_set(led_cdev, NULL); - write_unlock(&led_cdev->trigger_lock); + up_write(&led_cdev->trigger_lock); return count; } - read_lock(&triggers_list_lock); + down_read(&triggers_list_lock); list_for_each_entry(trig, &trigger_list, next_trig) { if (!strcmp(trigger_name, trig->name)) { - write_lock(&led_cdev->trigger_lock); + down_write(&led_cdev->trigger_lock); led_trigger_set(led_cdev, trig); - write_unlock(&led_cdev->trigger_lock); + up_write(&led_cdev->trigger_lock); - read_unlock(&triggers_list_lock); + up_read(&triggers_list_lock); return count; } } - read_unlock(&triggers_list_lock); + up_read(&triggers_list_lock); return -EINVAL; } @@ -74,8 +75,8 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, struct led_trigger *trig; int len = 0; - read_lock(&triggers_list_lock); - read_lock(&led_cdev->trigger_lock); + down_read(&triggers_list_lock); + down_read(&led_cdev->trigger_lock); if (!led_cdev->trigger) len += sprintf(buf+len, "[none] "); @@ -89,8 +90,8 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, else len += sprintf(buf+len, "%s ", trig->name); } - read_unlock(&led_cdev->trigger_lock); - read_unlock(&triggers_list_lock); + up_read(&led_cdev->trigger_lock); + up_read(&triggers_list_lock); len += sprintf(len+buf, "\n"); return len; @@ -145,14 +146,14 @@ void led_trigger_set_default(struct led_classdev *led_cdev) if (!led_cdev->default_trigger) return; - read_lock(&triggers_list_lock); - write_lock(&led_cdev->trigger_lock); + down_read(&triggers_list_lock); + down_write(&led_cdev->trigger_lock); list_for_each_entry(trig, &trigger_list, next_trig) { if (!strcmp(led_cdev->default_trigger, trig->name)) led_trigger_set(led_cdev, trig); } - write_unlock(&led_cdev->trigger_lock); - read_unlock(&triggers_list_lock); + up_write(&led_cdev->trigger_lock); + up_read(&triggers_list_lock); } int led_trigger_register(struct led_trigger *trigger) @@ -163,18 +164,18 @@ int led_trigger_register(struct led_trigger *trigger) INIT_LIST_HEAD(&trigger->led_cdevs); /* Add to the list of led triggers */ - write_lock(&triggers_list_lock); + down_write(&triggers_list_lock); list_add_tail(&trigger->next_trig, &trigger_list); - write_unlock(&triggers_list_lock); + up_write(&triggers_list_lock); /* Register with any LEDs that have this as a default trigger */ read_lock(&leds_list_lock); list_for_each_entry(led_cdev, &leds_list, node) { - write_lock(&led_cdev->trigger_lock); + down_write(&led_cdev->trigger_lock); if (!led_cdev->trigger && led_cdev->default_trigger && !strcmp(led_cdev->default_trigger, trigger->name)) led_trigger_set(led_cdev, trigger); - write_unlock(&led_cdev->trigger_lock); + up_write(&led_cdev->trigger_lock); } read_unlock(&leds_list_lock); @@ -206,17 +207,17 @@ void led_trigger_unregister(struct led_trigger *trigger) struct led_classdev *led_cdev; /* Remove from the list of led triggers */ - write_lock(&triggers_list_lock); + down_write(&triggers_list_lock); list_del(&trigger->next_trig); - write_unlock(&triggers_list_lock); + up_write(&triggers_list_lock); /* Remove anyone actively using this trigger */ read_lock(&leds_list_lock); list_for_each_entry(led_cdev, &leds_list, node) { - write_lock(&led_cdev->trigger_lock); + down_write(&led_cdev->trigger_lock); if (led_cdev->trigger == trigger) led_trigger_set(led_cdev, NULL); - write_unlock(&led_cdev->trigger_lock); + up_write(&led_cdev->trigger_lock); } read_unlock(&leds_list_lock); } diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index a6469218f194..365024b83d3d 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -176,8 +176,6 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) DMA_FROM_DEVICE); status = spi_sync(host->spi, &host->readback); - if (status == 0) - status = host->readback.status; if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, @@ -480,8 +478,6 @@ mmc_spi_command_send(struct mmc_spi_host *host, DMA_BIDIRECTIONAL); } status = spi_sync(host->spi, &host->m); - if (status == 0) - status = host->m.status; if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, @@ -624,8 +620,6 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t) DMA_BIDIRECTIONAL); status = spi_sync(spi, &host->m); - if (status == 0) - status = host->m.status; if (status != 0) { dev_dbg(&spi->dev, "write error (%d)\n", status); @@ -726,8 +720,6 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t) } status = spi_sync(spi, &host->m); - if (status == 0) - status = host->m.status; if (host->dma_dev) { dma_sync_single_for_cpu(host->dma_dev, @@ -905,8 +897,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, DMA_BIDIRECTIONAL); tmp = spi_sync(spi, &host->m); - if (tmp == 0) - tmp = host->m.status; if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index e8d69b0adf90..6cde4edc846b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -888,7 +888,7 @@ config SMC91X tristate "SMC 91C9x/91C1xxx support" select CRC32 select MII - depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || SOC_AU1X00 || BFIN + depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || SOC_AU1X00 || BLACKFIN help This is a driver for SMC's 91x series of Ethernet chipsets, including the SMC91C94 and the SMC91C111. Say Y if you want it @@ -926,7 +926,7 @@ config SMC911X tristate "SMSC LAN911[5678] support" select CRC32 select MII - depends on ARCH_PXA || SUPERH + depends on ARCH_PXA || SH_MAGIC_PANEL_R2 help This is a driver for SMSC's LAN911x series of Ethernet chipsets including the new LAN9115, LAN9116, LAN9117, and LAN9118. @@ -2588,6 +2588,7 @@ config MLX4_DEBUG config TEHUTI tristate "Tehuti Networks 10G Ethernet" depends on PCI + select ZLIB_INFLATE help Tehuti Networks 10G Ethernet NIC diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index eebf5bb2b03a..e7fdd81919bd 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c @@ -1340,7 +1340,9 @@ static int amd8111e_close(struct net_device * dev) struct amd8111e_priv *lp = netdev_priv(dev); netif_stop_queue(dev); +#ifdef CONFIG_AMD8111E_NAPI napi_disable(&lp->napi); +#endif spin_lock_irq(&lp->lock); @@ -1372,7 +1374,9 @@ static int amd8111e_open(struct net_device * dev ) dev->name, dev)) return -EAGAIN; +#ifdef CONFIG_AMD8111E_NAPI napi_enable(&lp->napi); +#endif spin_lock_irq(&lp->lock); @@ -1380,7 +1384,9 @@ static int amd8111e_open(struct net_device * dev ) if(amd8111e_restart(dev)){ spin_unlock_irq(&lp->lock); +#ifdef CONFIG_AMD8111E_NAPI napi_disable(&lp->napi); +#endif if (dev->irq) free_irq(dev->irq, dev); return -ENOMEM; diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 0b99b5549295..eb971755a3ff 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -676,7 +676,7 @@ static void bf537mac_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); #if defined(BFIN_MAC_CSUM_OFFLOAD) skb->csum = current_rx_ptr->status.ip_payload_csum; - skb->ip_summed = CHECKSUM_PARTIAL; + skb->ip_summed = CHECKSUM_COMPLETE; #endif netif_rx(skb); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 423298c84a1d..b0b26036266b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -74,6 +74,7 @@ #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/if_bonding.h> +#include <linux/jiffies.h> #include <net/route.h> #include <net/net_namespace.h> #include "bonding.h" @@ -174,6 +175,7 @@ struct bond_parm_tbl bond_mode_tbl[] = { struct bond_parm_tbl xmit_hashtype_tbl[] = { { "layer2", BOND_XMIT_POLICY_LAYER2}, { "layer3+4", BOND_XMIT_POLICY_LAYER34}, +{ "layer2+3", BOND_XMIT_POLICY_LAYER23}, { NULL, -1}, }; @@ -2722,8 +2724,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work) */ bond_for_each_slave(bond, slave, i) { if (slave->link != BOND_LINK_UP) { - if (((jiffies - slave->dev->trans_start) <= delta_in_ticks) && - ((jiffies - slave->dev->last_rx) <= delta_in_ticks)) { + if (time_before_eq(jiffies, slave->dev->trans_start + delta_in_ticks) && + time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) { slave->link = BOND_LINK_UP; slave->state = BOND_STATE_ACTIVE; @@ -2754,8 +2756,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work) * when the source ip is 0, so don't take the link down * if we don't know our ip yet */ - if (((jiffies - slave->dev->trans_start) >= (2*delta_in_ticks)) || - (((jiffies - slave->dev->last_rx) >= (2*delta_in_ticks)) && + if (time_after_eq(jiffies, slave->dev->trans_start + 2*delta_in_ticks) || + (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks) && bond_has_ip(bond))) { slave->link = BOND_LINK_DOWN; @@ -2848,8 +2850,8 @@ void bond_activebackup_arp_mon(struct work_struct *work) */ bond_for_each_slave(bond, slave, i) { if (slave->link != BOND_LINK_UP) { - if ((jiffies - slave_last_rx(bond, slave)) <= - delta_in_ticks) { + if (time_before_eq(jiffies, + slave_last_rx(bond, slave) + delta_in_ticks)) { slave->link = BOND_LINK_UP; @@ -2858,7 +2860,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) write_lock_bh(&bond->curr_slave_lock); if ((!bond->curr_active_slave) && - ((jiffies - slave->dev->trans_start) <= delta_in_ticks)) { + time_before_eq(jiffies, slave->dev->trans_start + delta_in_ticks)) { bond_change_active_slave(bond, slave); bond->current_arp_slave = NULL; } else if (bond->curr_active_slave != slave) { @@ -2897,7 +2899,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) if ((slave != bond->curr_active_slave) && (!bond->current_arp_slave) && - (((jiffies - slave_last_rx(bond, slave)) >= 3*delta_in_ticks) && + (time_after_eq(jiffies, slave_last_rx(bond, slave) + 3*delta_in_ticks) && bond_has_ip(bond))) { /* a backup slave has gone down; three times * the delta allows the current slave to be @@ -2943,10 +2945,10 @@ void bond_activebackup_arp_mon(struct work_struct *work) * before being taken out. if a primary is being used, check * if it is up and needs to take over as the curr_active_slave */ - if ((((jiffies - slave->dev->trans_start) >= (2*delta_in_ticks)) || - (((jiffies - slave_last_rx(bond, slave)) >= (2*delta_in_ticks)) && - bond_has_ip(bond))) && - ((jiffies - slave->jiffies) >= 2*delta_in_ticks)) { + if ((time_after_eq(jiffies, slave->dev->trans_start + 2*delta_in_ticks) || + (time_after_eq(jiffies, slave_last_rx(bond, slave) + 2*delta_in_ticks) && + bond_has_ip(bond))) && + time_after_eq(jiffies, slave->jiffies + 2*delta_in_ticks)) { slave->link = BOND_LINK_DOWN; @@ -3604,6 +3606,24 @@ void bond_unregister_arp(struct bonding *bond) /*---------------------------- Hashing Policies -----------------------------*/ /* + * Hash for the output device based upon layer 2 and layer 3 data. If + * the packet is not IP mimic bond_xmit_hash_policy_l2() + */ +static int bond_xmit_hash_policy_l23(struct sk_buff *skb, + struct net_device *bond_dev, int count) +{ + struct ethhdr *data = (struct ethhdr *)skb->data; + struct iphdr *iph = ip_hdr(skb); + + if (skb->protocol == __constant_htons(ETH_P_IP)) { + return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ + (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count; + } + + return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; +} + +/* * Hash for the output device based upon layer 3 and layer 4 data. If * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is * altogether not IP, mimic bond_xmit_hash_policy_l2() @@ -4305,6 +4325,22 @@ out: /*------------------------- Device initialization ---------------------------*/ +static void bond_set_xmit_hash_policy(struct bonding *bond) +{ + switch (bond->params.xmit_policy) { + case BOND_XMIT_POLICY_LAYER23: + bond->xmit_hash_policy = bond_xmit_hash_policy_l23; + break; + case BOND_XMIT_POLICY_LAYER34: + bond->xmit_hash_policy = bond_xmit_hash_policy_l34; + break; + case BOND_XMIT_POLICY_LAYER2: + default: + bond->xmit_hash_policy = bond_xmit_hash_policy_l2; + break; + } +} + /* * set bond mode specific net device operations */ @@ -4321,10 +4357,7 @@ void bond_set_mode_ops(struct bonding *bond, int mode) break; case BOND_MODE_XOR: bond_dev->hard_start_xmit = bond_xmit_xor; - if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) - bond->xmit_hash_policy = bond_xmit_hash_policy_l34; - else - bond->xmit_hash_policy = bond_xmit_hash_policy_l2; + bond_set_xmit_hash_policy(bond); break; case BOND_MODE_BROADCAST: bond_dev->hard_start_xmit = bond_xmit_broadcast; @@ -4332,10 +4365,7 @@ void bond_set_mode_ops(struct bonding *bond, int mode) case BOND_MODE_8023AD: bond_set_master_3ad_flags(bond); bond_dev->hard_start_xmit = bond_3ad_xmit_xor; - if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) - bond->xmit_hash_policy = bond_xmit_hash_policy_l34; - else - bond->xmit_hash_policy = bond_xmit_hash_policy_l2; + bond_set_xmit_hash_policy(bond); break; case BOND_MODE_ALB: bond_set_master_alb_flags(bond); @@ -4462,6 +4492,27 @@ static void bond_deinit(struct net_device *bond_dev) #endif } +static void bond_work_cancel_all(struct bonding *bond) +{ + write_lock_bh(&bond->lock); + bond->kill_timers = 1; + write_unlock_bh(&bond->lock); + + if (bond->params.miimon && delayed_work_pending(&bond->mii_work)) + cancel_delayed_work(&bond->mii_work); + + if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work)) + cancel_delayed_work(&bond->arp_work); + + if (bond->params.mode == BOND_MODE_ALB && + delayed_work_pending(&bond->alb_work)) + cancel_delayed_work(&bond->alb_work); + + if (bond->params.mode == BOND_MODE_8023AD && + delayed_work_pending(&bond->ad_work)) + cancel_delayed_work(&bond->ad_work); +} + /* Unregister and free all bond devices. * Caller must hold rtnl_lock. */ @@ -4472,6 +4523,7 @@ static void bond_free_all(void) list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) { struct net_device *bond_dev = bond->dev; + bond_work_cancel_all(bond); bond_mc_list_destroy(bond); /* Release the bonded slaves */ bond_release_all(bond_dev); @@ -4497,8 +4549,7 @@ int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl) for (i = 0; tbl[i].modename; i++) { if ((isdigit(*mode_arg) && tbl[i].mode == simple_strtol(mode_arg, NULL, 0)) || - (strncmp(mode_arg, tbl[i].modename, - strlen(tbl[i].modename)) == 0)) { + (strcmp(mode_arg, tbl[i].modename) == 0)) { return tbl[i].mode; } } @@ -4873,27 +4924,6 @@ out_rtnl: return res; } -static void bond_work_cancel_all(struct bonding *bond) -{ - write_lock_bh(&bond->lock); - bond->kill_timers = 1; - write_unlock_bh(&bond->lock); - - if (bond->params.miimon && delayed_work_pending(&bond->mii_work)) - cancel_delayed_work(&bond->mii_work); - - if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work)) - cancel_delayed_work(&bond->arp_work); - - if (bond->params.mode == BOND_MODE_ALB && - delayed_work_pending(&bond->alb_work)) - cancel_delayed_work(&bond->alb_work); - - if (bond->params.mode == BOND_MODE_8023AD && - delayed_work_pending(&bond->ad_work)) - cancel_delayed_work(&bond->ad_work); -} - static int __init bonding_init(void) { int i; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index b29330d8e309..11b76b352415 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -74,7 +74,7 @@ struct rw_semaphore bonding_rwsem; * "show" function for the bond_masters attribute. * The class parameter is ignored. */ -static ssize_t bonding_show_bonds(struct class *cls, char *buffer) +static ssize_t bonding_show_bonds(struct class *cls, char *buf) { int res = 0; struct bonding *bond; @@ -86,14 +86,13 @@ static ssize_t bonding_show_bonds(struct class *cls, char *buffer) /* not enough space for another interface name */ if ((PAGE_SIZE - res) > 10) res = PAGE_SIZE - 10; - res += sprintf(buffer + res, "++more++"); + res += sprintf(buf + res, "++more++ "); break; } - res += sprintf(buffer + res, "%s ", - bond->dev->name); + res += sprintf(buf + res, "%s ", bond->dev->name); } - res += sprintf(buffer + res, "\n"); - res++; + if (res) + buf[res-1] = '\n'; /* eat the leftover space */ up_read(&(bonding_rwsem)); return res; } @@ -235,14 +234,14 @@ static ssize_t bonding_show_slaves(struct device *d, /* not enough space for another interface name */ if ((PAGE_SIZE - res) > 10) res = PAGE_SIZE - 10; - res += sprintf(buf + res, "++more++"); + res += sprintf(buf + res, "++more++ "); break; } res += sprintf(buf + res, "%s ", slave->dev->name); } read_unlock(&bond->lock); - res += sprintf(buf + res, "\n"); - res++; + if (res) + buf[res-1] = '\n'; /* eat the leftover space */ return res; } @@ -406,7 +405,7 @@ static ssize_t bonding_show_mode(struct device *d, return sprintf(buf, "%s %d\n", bond_mode_tbl[bond->params.mode].modename, - bond->params.mode) + 1; + bond->params.mode); } static ssize_t bonding_store_mode(struct device *d, @@ -457,20 +456,11 @@ static ssize_t bonding_show_xmit_hash(struct device *d, struct device_attribute *attr, char *buf) { - int count; struct bonding *bond = to_bond(d); - if ((bond->params.mode != BOND_MODE_XOR) && - (bond->params.mode != BOND_MODE_8023AD)) { - // Not Applicable - count = sprintf(buf, "NA\n") + 1; - } else { - count = sprintf(buf, "%s %d\n", - xmit_hashtype_tbl[bond->params.xmit_policy].modename, - bond->params.xmit_policy) + 1; - } - - return count; + return sprintf(buf, "%s %d\n", + xmit_hashtype_tbl[bond->params.xmit_policy].modename, + bond->params.xmit_policy); } static ssize_t bonding_store_xmit_hash(struct device *d, @@ -488,15 +478,6 @@ static ssize_t bonding_store_xmit_hash(struct device *d, goto out; } - if ((bond->params.mode != BOND_MODE_XOR) && - (bond->params.mode != BOND_MODE_8023AD)) { - printk(KERN_ERR DRV_NAME - "%s: Transmit hash policy is irrelevant in this mode.\n", - bond->dev->name); - ret = -EPERM; - goto out; - } - new_value = bond_parse_parm((char *)buf, xmit_hashtype_tbl); if (new_value < 0) { printk(KERN_ERR DRV_NAME @@ -527,7 +508,7 @@ static ssize_t bonding_show_arp_validate(struct device *d, return sprintf(buf, "%s %d\n", arp_validate_tbl[bond->params.arp_validate].modename, - bond->params.arp_validate) + 1; + bond->params.arp_validate); } static ssize_t bonding_store_arp_validate(struct device *d, @@ -627,7 +608,7 @@ static ssize_t bonding_show_arp_interval(struct device *d, { struct bonding *bond = to_bond(d); - return sprintf(buf, "%d\n", bond->params.arp_interval) + 1; + return sprintf(buf, "%d\n", bond->params.arp_interval); } static ssize_t bonding_store_arp_interval(struct device *d, @@ -712,9 +693,7 @@ static ssize_t bonding_show_arp_targets(struct device *d, NIPQUAD(bond->params.arp_targets[i])); } if (res) - res--; /* eat the leftover space */ - res += sprintf(buf + res, "\n"); - res++; + buf[res-1] = '\n'; /* eat the leftover space */ return res; } @@ -815,7 +794,7 @@ static ssize_t bonding_show_downdelay(struct device *d, { struct bonding *bond = to_bond(d); - return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon) + 1; + return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon); } static ssize_t bonding_store_downdelay(struct device *d, @@ -872,7 +851,7 @@ static ssize_t bonding_show_updelay(struct device *d, { struct bonding *bond = to_bond(d); - return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon) + 1; + return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon); } @@ -936,7 +915,7 @@ static ssize_t bonding_show_lacp(struct device *d, return sprintf(buf, "%s %d\n", bond_lacp_tbl[bond->params.lacp_fast].modename, - bond->params.lacp_fast) + 1; + bond->params.lacp_fast); } static ssize_t bonding_store_lacp(struct device *d, @@ -992,7 +971,7 @@ static ssize_t bonding_show_miimon(struct device *d, { struct bonding *bond = to_bond(d); - return sprintf(buf, "%d\n", bond->params.miimon) + 1; + return sprintf(buf, "%d\n", bond->params.miimon); } static ssize_t bonding_store_miimon(struct device *d, @@ -1083,9 +1062,7 @@ static ssize_t bonding_show_primary(struct device *d, struct bonding *bond = to_bond(d); if (bond->primary_slave) - count = sprintf(buf, "%s\n", bond->primary_slave->dev->name) + 1; - else - count = sprintf(buf, "\n") + 1; + count = sprintf(buf, "%s\n", bond->primary_slave->dev->name); return count; } @@ -1149,7 +1126,7 @@ static ssize_t bonding_show_carrier(struct device *d, { struct bonding *bond = to_bond(d); - return sprintf(buf, "%d\n", bond->params.use_carrier) + 1; + return sprintf(buf, "%d\n", bond->params.use_carrier); } static ssize_t bonding_store_carrier(struct device *d, @@ -1191,16 +1168,14 @@ static ssize_t bonding_show_active_slave(struct device *d, { struct slave *curr; struct bonding *bond = to_bond(d); - int count; + int count = 0; read_lock(&bond->curr_slave_lock); curr = bond->curr_active_slave; read_unlock(&bond->curr_slave_lock); if (USES_PRIMARY(bond->params.mode) && curr) - count = sprintf(buf, "%s\n", curr->dev->name) + 1; - else - count = sprintf(buf, "\n") + 1; + count = sprintf(buf, "%s\n", curr->dev->name); return count; } @@ -1295,7 +1270,7 @@ static ssize_t bonding_show_mii_status(struct device *d, curr = bond->curr_active_slave; read_unlock(&bond->curr_slave_lock); - return sprintf(buf, "%s\n", (curr) ? "up" : "down") + 1; + return sprintf(buf, "%s\n", (curr) ? "up" : "down"); } static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); @@ -1312,10 +1287,8 @@ static ssize_t bonding_show_ad_aggregator(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; - count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.aggregator_id) + 1; + count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.aggregator_id); } - else - count = sprintf(buf, "\n") + 1; return count; } @@ -1334,10 +1307,8 @@ static ssize_t bonding_show_ad_num_ports(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; - count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0: ad_info.ports) + 1; + count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0: ad_info.ports); } - else - count = sprintf(buf, "\n") + 1; return count; } @@ -1356,10 +1327,8 @@ static ssize_t bonding_show_ad_actor_key(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; - count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.actor_key) + 1; + count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.actor_key); } - else - count = sprintf(buf, "\n") + 1; return count; } @@ -1378,10 +1347,8 @@ static ssize_t bonding_show_ad_partner_key(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; - count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.partner_key) + 1; + count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.partner_key); } - else - count = sprintf(buf, "\n") + 1; return count; } @@ -1403,12 +1370,9 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d, struct ad_info ad_info; if (!bond_3ad_get_active_agg_info(bond, &ad_info)) { count = sprintf(buf,"%s\n", - print_mac(mac, ad_info.partner_system)) - + 1; + print_mac(mac, ad_info.partner_system)); } } - else - count = sprintf(buf, "\n") + 1; return count; } diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 61c1b4536d34..e1e4734e23ce 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -22,8 +22,8 @@ #include "bond_3ad.h" #include "bond_alb.h" -#define DRV_VERSION "3.2.1" -#define DRV_RELDATE "October 15, 2007" +#define DRV_VERSION "3.2.3" +#define DRV_RELDATE "December 6, 2007" #define DRV_NAME "bonding" #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index 2dbf8dc116c6..c5975047c89b 100644 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c @@ -374,7 +374,9 @@ static char stats_strings[][ETH_GSTRING_LEN] = { "TxInternalMACXmitError", "TxFramesWithExcessiveDeferral", "TxFCSErrors", - + "TxJumboFramesOk", + "TxJumboOctetsOk", + "RxOctetsOK", "RxOctetsBad", "RxUnicastFramesOK", @@ -392,16 +394,17 @@ static char stats_strings[][ETH_GSTRING_LEN] = { "RxInRangeLengthErrors", "RxOutOfRangeLengthField", "RxFrameTooLongErrors", + "RxJumboFramesOk", + "RxJumboOctetsOk", /* Port stats */ - "RxPackets", "RxCsumGood", - "TxPackets", "TxCsumOffload", "TxTso", "RxVlan", "TxVlan", - + "TxNeedHeadroom", + /* Interrupt stats */ "rx drops", "pure_rsps", @@ -463,23 +466,56 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, const struct cmac_statistics *s; const struct sge_intr_counts *t; struct sge_port_stats ss; - unsigned int len; s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); - - len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK); - memcpy(data, &s->TxOctetsOK, len); - data += len; - - len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK); - memcpy(data, &s->RxOctetsOK, len); - data += len; - + t = t1_sge_get_intr_counts(adapter->sge); t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); - memcpy(data, &ss, sizeof(ss)); - data += sizeof(ss); - t = t1_sge_get_intr_counts(adapter->sge); + *data++ = s->TxOctetsOK; + *data++ = s->TxOctetsBad; + *data++ = s->TxUnicastFramesOK; + *data++ = s->TxMulticastFramesOK; + *data++ = s->TxBroadcastFramesOK; + *data++ = s->TxPauseFrames; + *data++ = s->TxFramesWithDeferredXmissions; + *data++ = s->TxLateCollisions; + *data++ = s->TxTotalCollisions; + *data++ = s->TxFramesAbortedDueToXSCollisions; + *data++ = s->TxUnderrun; + *data++ = s->TxLengthErrors; + *data++ = s->TxInternalMACXmitError; + *data++ = s->TxFramesWithExcessiveDeferral; + *data++ = s->TxFCSErrors; + *data++ = s->TxJumboFramesOK; + *data++ = s->TxJumboOctetsOK; + + *data++ = s->RxOctetsOK; + *data++ = s->RxOctetsBad; + *data++ = s->RxUnicastFramesOK; + *data++ = s->RxMulticastFramesOK; + *data++ = s->RxBroadcastFramesOK; + *data++ = s->RxPauseFrames; + *data++ = s->RxFCSErrors; + *data++ = s->RxAlignErrors; + *data++ = s->RxSymbolErrors; + *data++ = s->RxDataErrors; + *data++ = s->RxSequenceErrors; + *data++ = s->RxRuntErrors; + *data++ = s->RxJabberErrors; + *data++ = s->RxInternalMACRcvError; + *data++ = s->RxInRangeLengthErrors; + *data++ = s->RxOutOfRangeLengthField; + *data++ = s->RxFrameTooLongErrors; + *data++ = s->RxJumboFramesOK; + *data++ = s->RxJumboOctetsOK; + + *data++ = ss.rx_cso_good; + *data++ = ss.tx_cso; + *data++ = ss.tx_tso; + *data++ = ss.vlan_xtract; + *data++ = ss.vlan_insert; + *data++ = ss.tx_need_hdrroom; + *data++ = t->rx_drops; *data++ = t->pure_rsps; *data++ = t->unhandled_irqs; diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c index 678778a8d133..2117c4fbb107 100644 --- a/drivers/net/chelsio/pm3393.c +++ b/drivers/net/chelsio/pm3393.c @@ -45,7 +45,7 @@ #include <linux/crc32.h> -#define OFFSET(REG_ADDR) (REG_ADDR << 2) +#define OFFSET(REG_ADDR) ((REG_ADDR) << 2) /* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */ #define MAX_FRAME_SIZE 9600 @@ -428,69 +428,26 @@ static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex, return 0; } -static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val, - int over) -{ - u32 val0, val1, val2; - - t1_tpi_read(adapter, offs, &val0); - t1_tpi_read(adapter, offs + 4, &val1); - t1_tpi_read(adapter, offs + 8, &val2); - - *val &= ~0ull << 40; - *val |= val0 & 0xffff; - *val |= (val1 & 0xffff) << 16; - *val |= (u64)(val2 & 0xff) << 32; - - if (over) - *val += 1ull << 40; +#define RMON_UPDATE(mac, name, stat_name) \ +{ \ + t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \ + t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \ + t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \ + (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \ + ((u64)(val1 & 0xffff) << 16) | \ + ((u64)(val2 & 0xff) << 32) | \ + ((mac)->stats.stat_name & \ + 0xffffff0000000000ULL); \ + if (ro & \ + (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \ + (mac)->stats.stat_name += 1ULL << 40; \ } static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, int flag) { - static struct { - unsigned int reg; - unsigned int offset; - } hw_stats [] = { - -#define HW_STAT(name, stat_name) \ - { name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } - - /* Rx stats */ - HW_STAT(RxOctetsReceivedOK, RxOctetsOK), - HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK), - HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK), - HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK), - HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames), - HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors), - HW_STAT(RxFramesLostDueToInternalMACErrors, - RxInternalMACRcvError), - HW_STAT(RxSymbolErrors, RxSymbolErrors), - HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors), - HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors), - HW_STAT(RxJabbers, RxJabberErrors), - HW_STAT(RxFragments, RxRuntErrors), - HW_STAT(RxUndersizedFrames, RxRuntErrors), - HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK), - HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK), - - /* Tx stats */ - HW_STAT(TxOctetsTransmittedOK, TxOctetsOK), - HW_STAT(TxFramesLostDueToInternalMACTransmissionError, - TxInternalMACXmitError), - HW_STAT(TxTransmitSystemError, TxFCSErrors), - HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK), - HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK), - HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK), - HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames), - HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK), - HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK) - }, *p = hw_stats; - u64 ro; - u32 val0, val1, val2, val3; - u64 *stats = (u64 *) &mac->stats; - unsigned int i; + u64 ro; + u32 val0, val1, val2, val3; /* Snap the counters */ pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, @@ -504,14 +461,35 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); - for (i = 0; i < ARRAY_SIZE(hw_stats); i++) { - unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW; - - pm3393_rmon_update((mac)->adapter, OFFSET(p->reg), - stats + p->offset, ro & (reg >> 2)); - } - - + /* Rx stats */ + RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); + RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); + RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); + RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); + RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); + RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); + RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, + RxInternalMACRcvError); + RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); + RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors); + RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors); + RMON_UPDATE(mac, RxJabbers, RxJabberErrors); + RMON_UPDATE(mac, RxFragments, RxRuntErrors); + RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); + RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK); + RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK); + + /* Tx stats */ + RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); + RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError, + TxInternalMACXmitError); + RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors); + RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK); + RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); + RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); + RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); + RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK); + RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK); return &mac->stats; } diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 443666292a5c..b301c0428ae0 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -986,11 +986,10 @@ void t1_sge_get_port_stats(const struct sge *sge, int port, for_each_possible_cpu(cpu) { struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); - ss->rx_packets += st->rx_packets; ss->rx_cso_good += st->rx_cso_good; - ss->tx_packets += st->tx_packets; ss->tx_cso += st->tx_cso; ss->tx_tso += st->tx_tso; + ss->tx_need_hdrroom += st->tx_need_hdrroom; ss->vlan_xtract += st->vlan_xtract; ss->vlan_insert += st->vlan_insert; } @@ -1380,7 +1379,6 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) __skb_pull(skb, sizeof(*p)); st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); - st->rx_packets++; skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); skb->dev->last_rx = jiffies; @@ -1624,11 +1622,9 @@ int t1_poll(struct napi_struct *napi, int budget) { struct adapter *adapter = container_of(napi, struct adapter, napi); struct net_device *dev = adapter->port[0].dev; - int work_done; - - work_done = process_responses(adapter, budget); + int work_done = process_responses(adapter, budget); - if (likely(!responses_pending(adapter))) { + if (likely(work_done < budget)) { netif_rx_complete(dev, napi); writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); @@ -1848,7 +1844,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct adapter *adapter = dev->priv; struct sge *sge = adapter->sge; - struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id()); + struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], + smp_processor_id()); struct cpl_tx_pkt *cpl; struct sk_buff *orig_skb = skb; int ret; @@ -1856,6 +1853,18 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->protocol == htons(ETH_P_CPL5)) goto send; + /* + * We are using a non-standard hard_header_len. + * Allocate more header room in the rare cases it is not big enough. + */ + if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { + skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); + ++st->tx_need_hdrroom; + dev_kfree_skb_any(orig_skb); + if (!skb) + return NETDEV_TX_OK; + } + if (skb_shinfo(skb)->gso_size) { int eth_type; struct cpl_tx_pkt_lso *hdr; @@ -1889,24 +1898,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - /* - * We are using a non-standard hard_header_len and some kernel - * components, such as pktgen, do not handle it right. - * Complain when this happens but try to fix things up. - */ - if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { - pr_debug("%s: headroom %d header_len %d\n", dev->name, - skb_headroom(skb), dev->hard_header_len); - - if (net_ratelimit()) - printk(KERN_ERR "%s: inadequate headroom in " - "Tx packet\n", dev->name); - skb = skb_realloc_headroom(skb, sizeof(*cpl)); - dev_kfree_skb_any(orig_skb); - if (!skb) - return NETDEV_TX_OK; - } - if (!(adapter->flags & UDP_CSUM_CAPABLE) && skb->ip_summed == CHECKSUM_PARTIAL && ip_hdr(skb)->protocol == IPPROTO_UDP) { @@ -1952,7 +1943,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) cpl->vlan_valid = 0; send: - st->tx_packets++; dev->trans_start = jiffies; ret = t1_sge_tx(skb, adapter, 0, dev); diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h index 713d9c55f24d..cced9dff91c5 100644 --- a/drivers/net/chelsio/sge.h +++ b/drivers/net/chelsio/sge.h @@ -57,13 +57,12 @@ struct sge_intr_counts { }; struct sge_port_stats { - u64 rx_packets; /* # of Ethernet packets received */ u64 rx_cso_good; /* # of successful RX csum offloads */ - u64 tx_packets; /* # of TX packets */ u64 tx_cso; /* # of TX checksum offloads */ u64 tx_tso; /* # of TSO requests */ u64 vlan_xtract; /* # of VLAN tag extractions */ u64 vlan_insert; /* # of VLAN tag insertions */ + u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */ }; struct sk_buff; diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index 5e1bc0dec5f1..6e12bf4bc6cf 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h @@ -1937,6 +1937,10 @@ #define A_XGM_RXFIFO_CFG 0x884 +#define S_RXFIFO_EMPTY 31 +#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY) +#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U) + #define S_RXFIFOPAUSEHWM 17 #define M_RXFIFOPAUSEHWM 0xfff @@ -1961,6 +1965,10 @@ #define A_XGM_TXFIFO_CFG 0x888 +#define S_UNDERUNFIX 22 +#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX) +#define F_UNDERUNFIX V_UNDERUNFIX(1U) + #define S_TXIPG 13 #define M_TXIPG 0xff #define V_TXIPG(x) ((x) << S_TXIPG) @@ -2034,10 +2042,27 @@ #define V_XAUIIMP(x) ((x) << S_XAUIIMP) #define A_XGM_RX_MAX_PKT_SIZE 0x8a8 -#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4 + +#define S_RXMAXFRAMERSIZE 17 +#define M_RXMAXFRAMERSIZE 0x3fff +#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE) +#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE) + +#define S_RXENFRAMER 14 +#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER) +#define F_RXENFRAMER V_RXENFRAMER(1U) + +#define S_RXMAXPKTSIZE 0 +#define M_RXMAXPKTSIZE 0x3fff +#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE) +#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE) #define A_XGM_RESET_CTRL 0x8ac +#define S_XGMAC_STOP_EN 4 +#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN) +#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U) + #define S_XG2G_RESET_ 3 #define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_) #define F_XG2G_RESET_ V_XG2G_RESET_(1U) diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index d4ee00d32219..522834c42ae7 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -447,8 +447,8 @@ static const struct adapter_info t3_adap_info[] = { &mi1_mdio_ops, "Chelsio T302"}, {1, 0, 0, 0, F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | - F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0, - SUPPORTED_10000baseT_Full | SUPPORTED_AUI, + F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, + 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, &mi1_mdio_ext_ops, "Chelsio T310"}, {2, 0, 0, 0, F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | @@ -2613,7 +2613,7 @@ static void __devinit init_mtus(unsigned short mtus[]) * it can accomodate max size TCP/IP headers when SACK and timestamps * are enabled and still have at least 8 bytes of payload. */ - mtus[1] = 88; + mtus[0] = 88; mtus[1] = 88; mtus[2] = 256; mtus[3] = 512; diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c index eeb766aeced9..efcf09a709cf 100644 --- a/drivers/net/cxgb3/xgmac.c +++ b/drivers/net/cxgb3/xgmac.c @@ -106,6 +106,7 @@ int t3_mac_reset(struct cmac *mac) t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft, F_RXSTRFRWRD | F_DISERRFRAMES, uses_xaui(adap) ? 0 : F_RXSTRFRWRD); + t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX); if (uses_xaui(adap)) { if (adap->params.rev == 0) { @@ -124,7 +125,11 @@ int t3_mac_reset(struct cmac *mac) xaui_serdes_reset(mac); } - val = F_MAC_RESET_; + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft, + V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE), + V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER); + val = F_MAC_RESET_ | F_XGMAC_STOP_EN; + if (is_10G(adap)) val |= F_PCS_RESET_; else if (uses_xaui(adap)) @@ -313,8 +318,9 @@ static int rx_fifo_hwm(int mtu) int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) { - int hwm, lwm; - unsigned int thres, v; + int hwm, lwm, divisor; + int ipg; + unsigned int thres, v, reg; struct adapter *adap = mac->adapter; /* @@ -335,27 +341,32 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) hwm = min(hwm, MAC_RXFIFO_SIZE - 8192); lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4); - if (adap->params.rev == T3_REV_B2 && + if (adap->params.rev >= T3_REV_B2 && (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) { disable_exact_filters(mac); v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset); t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset, F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST); - /* drain rx FIFO */ - if (t3_wait_op_done(adap, - A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + - mac->offset, - 1 << 31, 1, 20, 5)) { + reg = adap->params.rev == T3_REV_B2 ? + A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG; + + /* drain RX FIFO */ + if (t3_wait_op_done(adap, reg + mac->offset, + F_RXFIFO_EMPTY, 1, 20, 5)) { t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); enable_exact_filters(mac); return -EIO; } - t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, + V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), + V_RXMAXPKTSIZE(mtu)); t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); enable_exact_filters(mac); } else - t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, + V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), + V_RXMAXPKTSIZE(mtu)); /* * Adjust the PAUSE frame watermarks. We always set the LWM, and the @@ -379,13 +390,16 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) thres /= 10; thres = mtu > thres ? (mtu - thres + 7) / 8 : 0; thres = max(thres, 8U); /* need at least 8 */ + ipg = (adap->params.rev == T3_REV_C) ? 0 : 1; t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset, V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG), - V_TXFIFOTHRESH(thres) | V_TXIPG(1)); + V_TXFIFOTHRESH(thres) | V_TXIPG(ipg)); - if (adap->params.rev > 0) + if (adap->params.rev > 0) { + divisor = (adap->params.rev == T3_REV_C) ? 64 : 8; t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset, - (hwm - lwm) * 4 / 8); + (hwm - lwm) * 4 / divisor); + } t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset, MAC_RXFIFO_SIZE * 4 * 8 / 512); return 0; @@ -522,7 +536,7 @@ int t3b2_mac_watchdog_task(struct cmac *mac) goto rxcheck; } - if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) { + if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) { if (mac->toggle_cnt > 4) { status = 2; goto out; diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 3dbaec680b46..e1c8a0d023ea 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -2214,13 +2214,11 @@ static void e100_get_drvinfo(struct net_device *netdev, strcpy(info->bus_info, pci_name(nic->pdev)); } +#define E100_PHY_REGS 0x1C static int e100_get_regs_len(struct net_device *netdev) { struct nic *nic = netdev_priv(netdev); -#define E100_PHY_REGS 0x1C -#define E100_REGS_LEN 1 + E100_PHY_REGS + \ - sizeof(nic->mem->dump_buf) / sizeof(u32) - return E100_REGS_LEN * sizeof(u32); + return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf); } static void e100_get_regs(struct net_device *netdev, diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index 667f18bcc172..b83ccce8a9b7 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c @@ -1923,7 +1923,7 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) switch (stringset) { case ETH_SS_TEST: memcpy(data, *e1000_gstrings_test, - E1000_TEST_LEN*ETH_GSTRING_LEN); + sizeof(e1000_gstrings_test)); break; case ETH_SS_STATS: for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index cf39473ef90a..4f37506ad374 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -3942,7 +3942,7 @@ e1000_clean(struct napi_struct *napi, int budget) &work_done, budget); /* If no Tx and not enough Rx work done, exit the polling mode */ - if ((!tx_cleaned && (work_done < budget)) || + if ((!tx_cleaned && (work_done == 0)) || !netif_running(poll_dev)) { quit_polling: if (likely(adapter->itr_setting & 3)) diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index 6a39784e7ee2..87f9da1b6b4e 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -1739,7 +1739,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: memcpy(data, *e1000_gstrings_test, - E1000_TEST_LEN*ETH_GSTRING_LEN); + sizeof(e1000_gstrings_test)); break; case ETH_SS_STATS: for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index f78e5bf7cb33..5f82a4647eee 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -40,7 +40,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0080" +#define DRV_VERSION "EHEA_0083" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index f0319f1e8e05..869e1604b16e 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -136,7 +136,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) struct ehea_port *port = netdev_priv(dev); struct net_device_stats *stats = &port->stats; struct hcp_ehea_port_cb2 *cb2; - u64 hret, rx_packets; + u64 hret, rx_packets, tx_packets; int i; memset(stats, 0, sizeof(*stats)); @@ -162,7 +162,11 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) for (i = 0; i < port->num_def_qps; i++) rx_packets += port->port_res[i].rx_packets; - stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp; + tx_packets = 0; + for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) + tx_packets += port->port_res[i].tx_packets; + + stats->tx_packets = tx_packets; stats->multicast = cb2->rxmcp; stats->rx_errors = cb2->rxuerr; stats->rx_bytes = cb2->rxo; @@ -406,11 +410,6 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, if (cqe->status & EHEA_CQE_STAT_ERR_CRC) pr->p_stats.err_frame_crc++; - if (netif_msg_rx_err(pr->port)) { - ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr); - ehea_dump(cqe, sizeof(*cqe), "CQE"); - } - if (rq == 2) { *processed_rq2 += 1; skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); @@ -422,7 +421,11 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, } if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { - ehea_error("Critical receive error. Resetting port."); + if (netif_msg_rx_err(pr->port)) { + ehea_error("Critical receive error for QP %d. " + "Resetting port.", pr->qp->init_attr.qp_nr); + ehea_dump(cqe, sizeof(*cqe), "CQE"); + } schedule_work(&pr->port->reset_task); return 1; } @@ -2000,6 +2003,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) } ehea_post_swqe(pr->qp, swqe); + pr->tx_packets++; if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { spin_lock_irqsave(&pr->netif_queue, flags); diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index 562de0ebdd85..bc62d389c166 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h @@ -145,8 +145,8 @@ struct ehea_rwqe { #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 #define EHEA_CQE_TYPE_RQ 0x60 -#define EHEA_CQE_STAT_ERR_MASK 0x720F -#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F +#define EHEA_CQE_STAT_ERR_MASK 0x700F +#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF #define EHEA_CQE_STAT_ERR_TCP 0x4000 #define EHEA_CQE_STAT_ERR_IP 0x2000 #define EHEA_CQE_STAT_ERR_CRC 0x1000 diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index a8a0ee220da6..79f7eade4773 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -422,7 +422,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status, (struct bcom_bd **)&bd); - dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_FROM_DEVICE); + dma_unmap_single(&dev->dev, bd->skb_pa, rskb->len, DMA_FROM_DEVICE); /* Test for errors in received frame */ if (status & BCOM_FEC_RX_BD_ERRORS) { @@ -467,7 +467,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) bcom_prepare_next_buffer(priv->rx_dmatsk); bd->status = FEC_RX_BUFFER_SIZE; - bd->skb_pa = dma_map_single(&dev->dev, rskb->data, + bd->skb_pa = dma_map_single(&dev->dev, skb->data, FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); bcom_submit_next_buffer(priv->rx_dmatsk, skb); @@ -971,6 +971,8 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) mpc52xx_fec_reset_stats(ndev); + SET_NETDEV_DEV(ndev, &op->dev); + /* Register the new network device */ rv = register_netdev(ndev); if (rv < 0) diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 92ce2e38f0d5..a96583cceb5e 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -5286,19 +5286,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); - for (i = 0; i < 5000; i++) { - msleep(1); - if (nv_mgmt_acquire_sema(dev)) { - /* management unit setup the phy already? */ - if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == - NVREG_XMITCTL_SYNC_PHY_INIT) { - /* phy is inited by mgmt unit */ - phyinitialized = 1; - dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); - } else { - /* we need to init the phy */ - } - break; + if (nv_mgmt_acquire_sema(dev)) { + /* management unit setup the phy already? */ + if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == + NVREG_XMITCTL_SYNC_PHY_INIT) { + /* phy is inited by mgmt unit */ + phyinitialized = 1; + dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); + } else { + /* we need to init the phy */ } } } @@ -5613,6 +5609,22 @@ static struct pci_device_id pci_tbl[] = { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, {0,}, }; diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 38268d7335a8..0431e9ed0fac 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -696,7 +696,7 @@ int startup_gfar(struct net_device *dev) { struct txbd8 *txbdp; struct rxbd8 *rxbdp; - dma_addr_t addr; + dma_addr_t addr = 0; unsigned long vaddr; int i; struct gfar_private *priv = netdev_priv(dev); diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 0de3aa2a2e44..cb06280dced5 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -3,6 +3,11 @@ * * Driver for PowerPC 4xx on-chip ethernet controller. * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * @@ -402,7 +407,7 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size) { u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR | - EMAC4_MR1_OBCI(dev->opb_bus_freq); + EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000); DBG2(dev, "__emac4_calc_base_mr1" NL); @@ -464,26 +469,34 @@ static int emac_configure(struct emac_instance *dev) { struct emac_regs __iomem *p = dev->emacp; struct net_device *ndev = dev->ndev; - int tx_size, rx_size; + int tx_size, rx_size, link = netif_carrier_ok(dev->ndev); u32 r, mr1 = 0; DBG(dev, "configure" NL); - if (emac_reset(dev) < 0) + if (!link) { + out_be32(&p->mr1, in_be32(&p->mr1) + | EMAC_MR1_FDE | EMAC_MR1_ILE); + udelay(100); + } else if (emac_reset(dev) < 0) return -ETIMEDOUT; if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) tah_reset(dev->tah_dev); - DBG(dev, " duplex = %d, pause = %d, asym_pause = %d\n", - dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause); + DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n", + link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause); /* Default fifo sizes */ tx_size = dev->tx_fifo_size; rx_size = dev->rx_fifo_size; + /* No link, force loopback */ + if (!link) + mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE; + /* Check for full duplex */ - if (dev->phy.duplex == DUPLEX_FULL) + else if (dev->phy.duplex == DUPLEX_FULL) mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001; /* Adjust fifo sizes, mr1 and timeouts based on link speed */ @@ -642,9 +655,11 @@ static void emac_reset_work(struct work_struct *work) DBG(dev, "reset_work" NL); mutex_lock(&dev->link_lock); - emac_netif_stop(dev); - emac_full_tx_reset(dev); - emac_netif_start(dev); + if (dev->opened) { + emac_netif_stop(dev); + emac_full_tx_reset(dev); + emac_netif_start(dev); + } mutex_unlock(&dev->link_lock); } @@ -701,7 +716,7 @@ static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg) r = EMAC_STACR_BASE(dev->opb_bus_freq); if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) r |= EMAC_STACR_OC; - if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR)) + if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR)) r |= EMACX_STACR_STAC_READ; else r |= EMAC_STACR_STAC_READ; @@ -773,7 +788,7 @@ static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg, r = EMAC_STACR_BASE(dev->opb_bus_freq); if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) r |= EMAC_STACR_OC; - if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR)) + if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR)) r |= EMACX_STACR_STAC_WRITE; else r |= EMAC_STACR_STAC_WRITE; @@ -1063,10 +1078,9 @@ static int emac_open(struct net_device *ndev) dev->rx_sg_skb = NULL; mutex_lock(&dev->link_lock); + dev->opened = 1; - /* XXX Start PHY polling now. Shouldn't wr do like sungem instead and - * always poll the PHY even when the iface is down ? That would allow - * things like laptop-net to work. --BenH + /* Start PHY polling now. */ if (dev->phy.address >= 0) { int link_poll_interval; @@ -1145,9 +1159,11 @@ static void emac_link_timer(struct work_struct *work) int link_poll_interval; mutex_lock(&dev->link_lock); - DBG2(dev, "link timer" NL); + if (!dev->opened) + goto bail; + if (dev->phy.def->ops->poll_link(&dev->phy)) { if (!netif_carrier_ok(dev->ndev)) { /* Get new link parameters */ @@ -1162,21 +1178,22 @@ static void emac_link_timer(struct work_struct *work) link_poll_interval = PHY_POLL_LINK_ON; } else { if (netif_carrier_ok(dev->ndev)) { - emac_reinitialize(dev); netif_carrier_off(dev->ndev); netif_tx_disable(dev->ndev); + emac_reinitialize(dev); emac_print_link_status(dev); } link_poll_interval = PHY_POLL_LINK_OFF; } schedule_delayed_work(&dev->link_work, link_poll_interval); - + bail: mutex_unlock(&dev->link_lock); } static void emac_force_link_update(struct emac_instance *dev) { netif_carrier_off(dev->ndev); + smp_rmb(); if (dev->link_polling) { cancel_rearming_delayed_work(&dev->link_work); if (dev->link_polling) @@ -1191,11 +1208,14 @@ static int emac_close(struct net_device *ndev) DBG(dev, "close" NL); - if (dev->phy.address >= 0) + if (dev->phy.address >= 0) { + dev->link_polling = 0; cancel_rearming_delayed_work(&dev->link_work); - + } + mutex_lock(&dev->link_lock); emac_netif_stop(dev); - flush_scheduled_work(); + dev->opened = 0; + mutex_unlock(&dev->link_lock); emac_rx_disable(dev); emac_tx_disable(dev); @@ -2427,7 +2447,7 @@ static int __devinit emac_init_config(struct emac_instance *dev) if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0)) dev->tah_ph = 0; if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0)) - dev->tah_ph = 0; + dev->tah_port = 0; if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0)) dev->mdio_ph = 0; if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0)) @@ -2465,16 +2485,19 @@ static int __devinit emac_init_config(struct emac_instance *dev) /* Check EMAC version */ if (of_device_is_compatible(np, "ibm,emac4")) dev->features |= EMAC_FTR_EMAC4; - if (of_device_is_compatible(np, "ibm,emac-axon") - || of_device_is_compatible(np, "ibm,emac-440epx")) - dev->features |= EMAC_FTR_HAS_AXON_STACR - | EMAC_FTR_STACR_OC_INVERT; - if (of_device_is_compatible(np, "ibm,emac-440spe")) + + /* Fixup some feature bits based on the device tree */ + if (of_get_property(np, "has-inverted-stacr-oc", NULL)) dev->features |= EMAC_FTR_STACR_OC_INVERT; + if (of_get_property(np, "has-new-stacr-staopc", NULL)) + dev->features |= EMAC_FTR_HAS_NEW_STACR; - /* Fixup some feature bits based on the device tree and verify - * we have support for them compiled in - */ + /* CAB lacks the appropriate properties */ + if (of_device_is_compatible(np, "ibm,emac-axon")) + dev->features |= EMAC_FTR_HAS_NEW_STACR | + EMAC_FTR_STACR_OC_INVERT; + + /* Enable TAH/ZMII/RGMII features as found */ if (dev->tah_ph != 0) { #ifdef CONFIG_IBM_NEW_EMAC_TAH dev->features |= EMAC_FTR_HAS_TAH; @@ -2532,6 +2555,10 @@ static int __devinit emac_probe(struct of_device *ofdev, struct device_node **blist = NULL; int err, i; + /* Skip unused/unwired EMACS */ + if (of_get_property(np, "unused", NULL)) + return -ENODEV; + /* Find ourselves in the bootlist if we are there */ for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++) if (emac_boot_list[i] == np) @@ -2756,6 +2783,8 @@ static int __devexit emac_remove(struct of_device *ofdev) unregister_netdev(dev->ndev); + flush_scheduled_work(); + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) tah_detach(dev->tah_dev, dev->tah_port); if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h index 4011803117ca..4e74d8287c65 100644 --- a/drivers/net/ibm_newemac/core.h +++ b/drivers/net/ibm_newemac/core.h @@ -3,6 +3,11 @@ * * Driver for PowerPC 4xx on-chip ethernet controller. * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * @@ -258,6 +263,7 @@ struct emac_instance { int stop_timeout; /* in us */ int no_mcast; int mcast_pending; + int opened; struct work_struct reset_work; spinlock_t lock; }; @@ -292,9 +298,9 @@ struct emac_instance { */ #define EMAC_FTR_HAS_RGMII 0x00000020 /* - * Set if we have axon-type STACR + * Set if we have new type STACR with STAOPC */ -#define EMAC_FTR_HAS_AXON_STACR 0x00000040 +#define EMAC_FTR_HAS_NEW_STACR 0x00000040 /* Right now, we don't quite handle the always/possible masks on the @@ -306,7 +312,7 @@ enum { EMAC_FTRS_POSSIBLE = #ifdef CONFIG_IBM_NEW_EMAC_EMAC4 - EMAC_FTR_EMAC4 | EMAC_FTR_HAS_AXON_STACR | + EMAC_FTR_EMAC4 | EMAC_FTR_HAS_NEW_STACR | EMAC_FTR_STACR_OC_INVERT | #endif #ifdef CONFIG_IBM_NEW_EMAC_TAH diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c index 170524ee0f19..a2fc660ca5d4 100644 --- a/drivers/net/ibm_newemac/debug.c +++ b/drivers/net/ibm_newemac/debug.c @@ -3,6 +3,11 @@ * * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright (c) 2004, 2005 Zultys Technologies * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * diff --git a/drivers/net/ibm_newemac/debug.h b/drivers/net/ibm_newemac/debug.h index 1dd2dcbc157f..b631842ec8d0 100644 --- a/drivers/net/ibm_newemac/debug.h +++ b/drivers/net/ibm_newemac/debug.h @@ -3,6 +3,11 @@ * * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright (c) 2004, 2005 Zultys Technologies * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h index bef92efeeadc..91cb096ab405 100644 --- a/drivers/net/ibm_newemac/emac.h +++ b/drivers/net/ibm_newemac/emac.h @@ -3,6 +3,11 @@ * * Register definitions for PowerPC 4xx on-chip ethernet contoller * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c index 9a88f71db004..6869f08c9dcb 100644 --- a/drivers/net/ibm_newemac/mal.c +++ b/drivers/net/ibm_newemac/mal.c @@ -3,6 +3,11 @@ * * Memory Access Layer (MAL) support * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h index 784edb8ea822..eaa7262dc079 100644 --- a/drivers/net/ibm_newemac/mal.h +++ b/drivers/net/ibm_newemac/mal.h @@ -3,6 +3,11 @@ * * Memory Access Layer (MAL) support * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c index aa1f0ddf1e3e..37bfeea8788a 100644 --- a/drivers/net/ibm_newemac/phy.c +++ b/drivers/net/ibm_newemac/phy.c @@ -8,6 +8,11 @@ * This file should be shared with other drivers or eventually * merged as the "low level" part of miilib * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org) * (c) 2004-2005, Eugene Surovegin <ebs@ebshome.net> * @@ -306,8 +311,84 @@ static struct mii_phy_def cis8201_phy_def = { .ops = &cis8201_phy_ops }; +static struct mii_phy_def bcm5248_phy_def = { + + .phy_id = 0x0143bc00, + .phy_id_mask = 0x0ffffff0, + .name = "BCM5248 10/100 SMII Ethernet", + .ops = &generic_phy_ops +}; + +static int m88e1111_init(struct mii_phy *phy) +{ + pr_debug("%s: Marvell 88E1111 Ethernet\n", __FUNCTION__); + phy_write(phy, 0x14, 0x0ce3); + phy_write(phy, 0x18, 0x4101); + phy_write(phy, 0x09, 0x0e00); + phy_write(phy, 0x04, 0x01e1); + phy_write(phy, 0x00, 0x9140); + phy_write(phy, 0x00, 0x1140); + + return 0; +} + +static int et1011c_init(struct mii_phy *phy) +{ + u16 reg_short; + + reg_short = (u16)(phy_read(phy, 0x16)); + reg_short &= ~(0x7); + reg_short |= 0x6; /* RGMII Trace Delay*/ + phy_write(phy, 0x16, reg_short); + + reg_short = (u16)(phy_read(phy, 0x17)); + reg_short &= ~(0x40); + phy_write(phy, 0x17, reg_short); + + phy_write(phy, 0x1c, 0x74f0); + return 0; +} + +static struct mii_phy_ops et1011c_phy_ops = { + .init = et1011c_init, + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link +}; + +static struct mii_phy_def et1011c_phy_def = { + .phy_id = 0x0282f000, + .phy_id_mask = 0x0fffff00, + .name = "ET1011C Gigabit Ethernet", + .ops = &et1011c_phy_ops +}; + + + + + +static struct mii_phy_ops m88e1111_phy_ops = { + .init = m88e1111_init, + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link +}; + +static struct mii_phy_def m88e1111_phy_def = { + + .phy_id = 0x01410CC0, + .phy_id_mask = 0x0ffffff0, + .name = "Marvell 88E1111 Ethernet", + .ops = &m88e1111_phy_ops, +}; + static struct mii_phy_def *mii_phy_table[] = { + &et1011c_phy_def, &cis8201_phy_def, + &bcm5248_phy_def, + &m88e1111_phy_def, &genmii_phy_def, NULL }; diff --git a/drivers/net/ibm_newemac/phy.h b/drivers/net/ibm_newemac/phy.h index 6feca26afedb..1b65c81f6557 100644 --- a/drivers/net/ibm_newemac/phy.h +++ b/drivers/net/ibm_newemac/phy.h @@ -3,6 +3,11 @@ * * Driver for PowerPC 4xx on-chip ethernet controller, PHY support * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Benjamin Herrenschmidt <benh@kernel.crashing.org> * February 2003 * diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c index de416951a435..9bc1132fa788 100644 --- a/drivers/net/ibm_newemac/rgmii.c +++ b/drivers/net/ibm_newemac/rgmii.c @@ -3,6 +3,11 @@ * * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support. * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * @@ -140,7 +145,7 @@ void rgmii_get_mdio(struct of_device *ofdev, int input) RGMII_DBG2(dev, "get_mdio(%d)" NL, input); - if (dev->type != RGMII_AXON) + if (!(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO)) return; mutex_lock(&dev->lock); @@ -161,7 +166,7 @@ void rgmii_put_mdio(struct of_device *ofdev, int input) RGMII_DBG2(dev, "put_mdio(%d)" NL, input); - if (dev->type != RGMII_AXON) + if (!(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO)) return; fer = in_be32(&p->fer); @@ -250,11 +255,13 @@ static int __devinit rgmii_probe(struct of_device *ofdev, goto err_free; } - /* Check for RGMII type */ + /* Check for RGMII flags */ + if (of_get_property(ofdev->node, "has-mdio", NULL)) + dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; + + /* CAB lacks the right properties, fix this up */ if (of_device_is_compatible(ofdev->node, "ibm,rgmii-axon")) - dev->type = RGMII_AXON; - else - dev->type = RGMII_STANDARD; + dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n", in_be32(&dev->base->fer), in_be32(&dev->base->ssr)); @@ -263,9 +270,9 @@ static int __devinit rgmii_probe(struct of_device *ofdev, out_be32(&dev->base->fer, 0); printk(KERN_INFO - "RGMII %s %s initialized\n", - dev->type == RGMII_STANDARD ? "standard" : "axon", - ofdev->node->full_name); + "RGMII %s initialized with%s MDIO support\n", + ofdev->node->full_name, + (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out"); wmb(); dev_set_drvdata(&ofdev->dev, dev); diff --git a/drivers/net/ibm_newemac/rgmii.h b/drivers/net/ibm_newemac/rgmii.h index 57806833121e..c4a4b358a270 100644 --- a/drivers/net/ibm_newemac/rgmii.h +++ b/drivers/net/ibm_newemac/rgmii.h @@ -3,6 +3,11 @@ * * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support. * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Based on ocp_zmii.h/ibm_emac_zmii.h * Armin Kuster akuster@mvista.com * @@ -35,8 +40,9 @@ struct rgmii_regs { struct rgmii_instance { struct rgmii_regs __iomem *base; - /* Type of RGMII bridge */ - int type; + /* RGMII bridge flags */ + int flags; +#define EMAC_RGMII_FLAG_HAS_MDIO 0x00000001 /* Only one EMAC whacks us at a time */ struct mutex lock; diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c index f161fb100e8e..96417adec326 100644 --- a/drivers/net/ibm_newemac/tah.c +++ b/drivers/net/ibm_newemac/tah.c @@ -3,6 +3,11 @@ * * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright 2004 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * @@ -116,13 +121,14 @@ static int __devinit tah_probe(struct of_device *ofdev, goto err_free; } + dev_set_drvdata(&ofdev->dev, dev); + /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ tah_reset(ofdev); printk(KERN_INFO "TAH %s initialized\n", ofdev->node->full_name); wmb(); - dev_set_drvdata(&ofdev->dev, dev); return 0; diff --git a/drivers/net/ibm_newemac/tah.h b/drivers/net/ibm_newemac/tah.h index bc41853b6e26..a068b5658dad 100644 --- a/drivers/net/ibm_newemac/tah.h +++ b/drivers/net/ibm_newemac/tah.h @@ -3,6 +3,11 @@ * * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright 2004 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c index 2219ec2740e0..2ea472aeab06 100644 --- a/drivers/net/ibm_newemac/zmii.c +++ b/drivers/net/ibm_newemac/zmii.c @@ -3,6 +3,11 @@ * * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support. * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * @@ -83,12 +88,14 @@ int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode) ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode); - if (!zmii_valid_mode(*mode)) + if (!zmii_valid_mode(*mode)) { /* Probably an EMAC connected to RGMII, * but it still may need ZMII for MDIO so * we don't fail here. */ + dev->users++; return 0; + } mutex_lock(&dev->lock); diff --git a/drivers/net/ibm_newemac/zmii.h b/drivers/net/ibm_newemac/zmii.h index 82a9968b1f74..6c9beba0c4b6 100644 --- a/drivers/net/ibm_newemac/zmii.h +++ b/drivers/net/ibm_newemac/zmii.h @@ -3,6 +3,11 @@ * * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support. * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * Based on the arch/ppc version of the driver: + * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c index 9a855e512147..b59f442bbf36 100644 --- a/drivers/net/lib82596.c +++ b/drivers/net/lib82596.c @@ -176,8 +176,8 @@ struct i596_reg { struct i596_tbd { unsigned short size; unsigned short pad; - dma_addr_t next; - dma_addr_t data; + u32 next; + u32 data; u32 cache_pad[5]; /* Total 32 bytes... */ }; @@ -195,12 +195,12 @@ struct i596_cmd { struct i596_cmd *v_next; /* Address from CPUs viewpoint */ unsigned short status; unsigned short command; - dma_addr_t b_next; /* Address from i596 viewpoint */ + u32 b_next; /* Address from i596 viewpoint */ }; struct tx_cmd { struct i596_cmd cmd; - dma_addr_t tbd; + u32 tbd; unsigned short size; unsigned short pad; struct sk_buff *skb; /* So we can free it after tx */ @@ -237,8 +237,8 @@ struct cf_cmd { struct i596_rfd { unsigned short stat; unsigned short cmd; - dma_addr_t b_next; /* Address from i596 viewpoint */ - dma_addr_t rbd; + u32 b_next; /* Address from i596 viewpoint */ + u32 rbd; unsigned short count; unsigned short size; struct i596_rfd *v_next; /* Address from CPUs viewpoint */ @@ -249,18 +249,18 @@ struct i596_rfd { }; struct i596_rbd { - /* hardware data */ - unsigned short count; - unsigned short zero1; - dma_addr_t b_next; - dma_addr_t b_data; /* Address from i596 viewpoint */ - unsigned short size; - unsigned short zero2; - /* driver data */ - struct sk_buff *skb; - struct i596_rbd *v_next; - dma_addr_t b_addr; /* This rbd addr from i596 view */ - unsigned char *v_data; /* Address from CPUs viewpoint */ + /* hardware data */ + unsigned short count; + unsigned short zero1; + u32 b_next; + u32 b_data; /* Address from i596 viewpoint */ + unsigned short size; + unsigned short zero2; + /* driver data */ + struct sk_buff *skb; + struct i596_rbd *v_next; + u32 b_addr; /* This rbd addr from i596 view */ + unsigned char *v_data; /* Address from CPUs viewpoint */ /* Total 32 bytes... */ #ifdef __LP64__ u32 cache_pad[4]; @@ -275,8 +275,8 @@ struct i596_rbd { struct i596_scb { unsigned short status; unsigned short command; - dma_addr_t cmd; - dma_addr_t rfd; + u32 cmd; + u32 rfd; u32 crc_err; u32 align_err; u32 resource_err; @@ -288,14 +288,14 @@ struct i596_scb { }; struct i596_iscp { - u32 stat; - dma_addr_t scb; + u32 stat; + u32 scb; }; struct i596_scp { - u32 sysbus; - u32 pad; - dma_addr_t iscp; + u32 sysbus; + u32 pad; + u32 iscp; }; struct i596_dma { diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 0f306ddb5630..8def8657251f 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -1979,6 +1979,7 @@ static int myri10ge_open(struct net_device *dev) lro_mgr->lro_arr = mgp->rx_done.lro_desc; lro_mgr->get_frag_header = myri10ge_get_frag_header; lro_mgr->max_aggr = myri10ge_lro_max_pkts; + lro_mgr->frag_align_pad = 2; if (lro_mgr->max_aggr > MAX_SKB_FRAGS) lro_mgr->max_aggr = MAX_SKB_FRAGS; diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 112ab079ce7d..abfc61c3a38c 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -1045,6 +1045,7 @@ static int niu_serdes_init(struct niu *np) } static void niu_init_xif(struct niu *); +static void niu_handle_led(struct niu *, int status); static int niu_link_status_common(struct niu *np, int link_up) { @@ -1066,11 +1067,15 @@ static int niu_link_status_common(struct niu *np, int link_up) spin_lock_irqsave(&np->lock, flags); niu_init_xif(np); + niu_handle_led(np, 1); spin_unlock_irqrestore(&np->lock, flags); netif_carrier_on(dev); } else if (netif_carrier_ok(dev) && !link_up) { niuwarn(LINK, "%s: Link is down\n", dev->name); + spin_lock_irqsave(&np->lock, flags); + niu_handle_led(np, 0); + spin_unlock_irqrestore(&np->lock, flags); netif_carrier_off(dev); } @@ -3915,16 +3920,14 @@ static int niu_init_ipp(struct niu *np) return 0; } -static void niu_init_xif_xmac(struct niu *np) +static void niu_handle_led(struct niu *np, int status) { - struct niu_link_config *lp = &np->link_config; u64 val; - val = nr64_mac(XMAC_CONFIG); if ((np->flags & NIU_FLAGS_10G) != 0 && (np->flags & NIU_FLAGS_FIBER) != 0) { - if (netif_carrier_ok(np->dev)) { + if (status) { val |= XMAC_CONFIG_LED_POLARITY; val &= ~XMAC_CONFIG_FORCE_LED_ON; } else { @@ -3933,6 +3936,15 @@ static void niu_init_xif_xmac(struct niu *np) } } + nw64_mac(XMAC_CONFIG, val); +} + +static void niu_init_xif_xmac(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u64 val; + + val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; val |= XMAC_CONFIG_TX_OUTPUT_EN; @@ -4776,6 +4788,8 @@ static int niu_close(struct net_device *dev) niu_free_channels(np); + niu_handle_led(np, 0); + return 0; } diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 09b4fde8d924..816a59e801b2 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -586,7 +586,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) /* CRC error flagged */ mac->netdev->stats.rx_errors++; mac->netdev->stats.rx_crc_errors++; - dev_kfree_skb_irq(skb); + /* No need to free skb, it'll be reused */ goto next; } @@ -1362,7 +1362,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); - dev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX | NETIF_F_SG; + dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG; /* These should come out of the device tree eventually */ mac->dma_txch = index; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 035fd41fb61f..f0574073a2a3 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -143,21 +143,29 @@ static int m88e1111_config_init(struct phy_device *phydev) int err; if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)) { + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || + (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) { int temp; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { - temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); - if (temp < 0) - return temp; + temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); + if (temp < 0) + return temp; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY); - - err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); - if (err < 0) - return err; + } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { + temp &= ~MII_M1111_TX_DELAY; + temp |= MII_M1111_RX_DELAY; + } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { + temp &= ~MII_M1111_RX_DELAY; + temp |= MII_M1111_TX_DELAY; } + err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); + if (err < 0) + return err; + temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); if (temp < 0) return temp; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index fc2f0e695a13..c30196d0ad16 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -91,9 +91,12 @@ int mdiobus_register(struct mii_bus *bus) err = device_register(&phydev->dev); - if (err) + if (err) { printk(KERN_ERR "phy %d failed to register\n", i); + phy_device_free(phydev); + phydev = NULL; + } } bus->phy_map[i] = phydev; @@ -110,10 +113,8 @@ void mdiobus_unregister(struct mii_bus *bus) int i; for (i = 0; i < PHY_MAX_ADDR; i++) { - if (bus->phy_map[i]) { + if (bus->phy_map[i]) device_unregister(&bus->phy_map[i]->dev); - kfree(bus->phy_map[i]); - } } } EXPORT_SYMBOL(mdiobus_unregister); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 9bc11773705b..7c9e6e349503 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -406,6 +406,9 @@ int phy_mii_ioctl(struct phy_device *phydev, && phydev->drv->config_init) phydev->drv->config_init(phydev); break; + + default: + return -ENOTTY; } return 0; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index f6e484812a98..5b9e1751e1b4 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -44,6 +44,16 @@ static struct phy_driver genphy_driver; extern int mdio_bus_init(void); extern void mdio_bus_exit(void); +void phy_device_free(struct phy_device *phydev) +{ + kfree(phydev); +} + +static void phy_device_release(struct device *dev) +{ + phy_device_free(to_phy_device(dev)); +} + struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) { struct phy_device *dev; @@ -54,6 +64,8 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) if (NULL == dev) return (struct phy_device*) PTR_ERR((void*)-ENOMEM); + dev->dev.release = phy_device_release; + dev->speed = 0; dev->duplex = -1; dev->pause = dev->asym_pause = 0; diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h index 01f08d726ace..f25264f2638e 100644 --- a/drivers/net/s2io-regs.h +++ b/drivers/net/s2io-regs.h @@ -66,6 +66,7 @@ struct XENA_dev_config { #define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8) #define ADAPTER_STATUS_MC_DRAM_READY s2BIT(24) #define ADAPTER_STATUS_MC_QUEUES_READY s2BIT(25) +#define ADAPTER_STATUS_RIC_RUNNING s2BIT(26) #define ADAPTER_STATUS_M_PLL_LOCK s2BIT(30) #define ADAPTER_STATUS_P_PLL_LOCK s2BIT(31) diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 632666706247..121cb100f93a 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -84,7 +84,7 @@ #include "s2io.h" #include "s2io-regs.h" -#define DRV_VERSION "2.0.26.6" +#define DRV_VERSION "2.0.26.10" /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; @@ -1081,7 +1081,7 @@ static int init_nic(struct s2io_nic *nic) /* to set the swapper controle on the card */ if(s2io_set_swapper(nic)) { DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n"); - return -1; + return -EIO; } /* @@ -1100,6 +1100,20 @@ static int init_nic(struct s2io_nic *nic) msleep(500); val64 = readq(&bar0->sw_reset); + /* Ensure that it's safe to access registers by checking + * RIC_RUNNING bit is reset. Check is valid only for XframeII. + */ + if (nic->device_type == XFRAME_II_DEVICE) { + for (i = 0; i < 50; i++) { + val64 = readq(&bar0->adapter_status); + if (!(val64 & ADAPTER_STATUS_RIC_RUNNING)) + break; + msleep(10); + } + if (i == 50) + return -ENODEV; + } + /* Enable Receiving broadcasts */ add = &bar0->mac_cfg; val64 = readq(&bar0->mac_cfg); @@ -1503,7 +1517,7 @@ static int init_nic(struct s2io_nic *nic) DBG_PRINT(ERR_DBG, "%s: failed rts ds steering", dev->name); DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i); - return FAILURE; + return -ENODEV; } } @@ -1570,7 +1584,7 @@ static int init_nic(struct s2io_nic *nic) if (time > 10) { DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n", dev->name); - return -1; + return -ENODEV; } msleep(50); time++; @@ -1623,7 +1637,7 @@ static int init_nic(struct s2io_nic *nic) if (time > 10) { DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n", dev->name); - return -1; + return -ENODEV; } time++; msleep(50); @@ -3914,6 +3928,12 @@ static int s2io_close(struct net_device *dev) { struct s2io_nic *sp = dev->priv; + /* Return if the device is already closed * + * Can happen when s2io_card_up failed in change_mtu * + */ + if (!is_s2io_card_up(sp)) + return 0; + netif_stop_queue(dev); napi_disable(&sp->napi); /* Reset card, kill tasklet and free Tx and Rx buffers. */ @@ -6355,6 +6375,7 @@ static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int s2io_change_mtu(struct net_device *dev, int new_mtu) { struct s2io_nic *sp = dev->priv; + int ret = 0; if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", @@ -6366,9 +6387,11 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) if (netif_running(dev)) { s2io_card_down(sp); netif_stop_queue(dev); - if (s2io_card_up(sp)) { + ret = s2io_card_up(sp); + if (ret) { DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", __FUNCTION__); + return ret; } if (netif_queue_stopped(dev)) netif_wake_queue(dev); @@ -6379,7 +6402,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); } - return 0; + return ret; } /** @@ -6777,6 +6800,9 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) unsigned long flags; register u64 val64 = 0; + if (!is_s2io_card_up(sp)) + return; + del_timer_sync(&sp->alarm_timer); /* If s2io_set_link task is executing, wait till it completes. */ while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) { @@ -6850,11 +6876,13 @@ static int s2io_card_up(struct s2io_nic * sp) u16 interruptible; /* Initialize the H/W I/O registers */ - if (init_nic(sp) != 0) { + ret = init_nic(sp); + if (ret != 0) { DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", dev->name); - s2io_reset(sp); - return -ENODEV; + if (ret != -EIO) + s2io_reset(sp); + return ret; } /* diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 6d62250fba07..186eb8ebfda6 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -44,7 +44,7 @@ #include "skge.h" #define DRV_NAME "skge" -#define DRV_VERSION "1.12" +#define DRV_VERSION "1.13" #define PFX DRV_NAME " " #define DEFAULT_TX_RING_SIZE 128 @@ -1095,16 +1095,9 @@ static void xm_link_down(struct skge_hw *hw, int port) { struct net_device *dev = hw->dev[port]; struct skge_port *skge = netdev_priv(dev); - u16 cmd = xm_read16(hw, port, XM_MMU_CMD); xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); - cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); - xm_write16(hw, port, XM_MMU_CMD, cmd); - - /* dummy read to ensure writing */ - xm_read16(hw, port, XM_MMU_CMD); - if (netif_carrier_ok(dev)) skge_link_down(skge); } @@ -1194,6 +1187,7 @@ static void genesis_init(struct skge_hw *hw) static void genesis_reset(struct skge_hw *hw, int port) { const u8 zero[8] = { 0 }; + u32 reg; skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); @@ -1209,6 +1203,11 @@ static void genesis_reset(struct skge_hw *hw, int port) xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); xm_outhash(hw, port, XM_HSM, zero); + + /* Flush TX and RX fifo */ + reg = xm_read32(hw, port, XM_MODE); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); } @@ -1634,15 +1633,14 @@ static void genesis_mac_init(struct skge_hw *hw, int port) } xm_write16(hw, port, XM_RX_CMD, r); - /* We want short frames padded to 60 bytes. */ xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); - /* - * Bump up the transmit threshold. This helps hold off transmit - * underruns when we're blasting traffic from both ports at once. - */ - xm_write16(hw, port, XM_TX_THR, 512); + /* Increase threshold for jumbo frames on dual port */ + if (hw->ports > 1 && jumbo) + xm_write16(hw, port, XM_TX_THR, 1020); + else + xm_write16(hw, port, XM_TX_THR, 512); /* * Enable the reception of all error frames. This is is @@ -1713,7 +1711,13 @@ static void genesis_stop(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; - u32 reg; + unsigned retries = 1000; + u16 cmd; + + /* Disable Tx and Rx */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); + xm_write16(hw, port, XM_MMU_CMD, cmd); genesis_reset(hw, port); @@ -1721,20 +1725,17 @@ static void genesis_stop(struct skge_port *skge) skge_write16(hw, B3_PA_CTRL, port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); - /* - * If the transfer sticks at the MAC the STOP command will not - * terminate if we don't flush the XMAC's transmit FIFO ! - */ - xm_write32(hw, port, XM_MODE, - xm_read32(hw, port, XM_MODE)|XM_MD_FTF); - - /* Reset the MAC */ - skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + do { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) + break; + } while (--retries > 0); /* For external PHYs there must be special handling */ if (hw->phy_type != SK_PHY_XMAC) { - reg = skge_read32(hw, B2_GP_IO); + u32 reg = skge_read32(hw, B2_GP_IO); if (port == 0) { reg |= GP_DIR_0; reg &= ~GP_IO_0; @@ -1801,11 +1802,6 @@ static void genesis_mac_intr(struct skge_hw *hw, int port) xm_write32(hw, port, XM_MODE, XM_MD_FTF); ++dev->stats.tx_fifo_errors; } - - if (status & XM_IS_RXF_OV) { - xm_write32(hw, port, XM_MODE, XM_MD_FRF); - ++dev->stats.rx_fifo_errors; - } } static void genesis_link_up(struct skge_port *skge) @@ -1862,9 +1858,9 @@ static void genesis_link_up(struct skge_port *skge) xm_write32(hw, port, XM_MODE, mode); - /* Turn on detection of Tx underrun, Rx overrun */ + /* Turn on detection of Tx underrun */ msk = xm_read16(hw, port, XM_IMSK); - msk &= ~(XM_IS_RXF_OV | XM_IS_TXF_UR); + msk &= ~XM_IS_TXF_UR; xm_write16(hw, port, XM_IMSK, msk); xm_read16(hw, port, XM_ISRC); @@ -2194,9 +2190,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port) TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); - /* serial mode register */ - reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); - if (hw->dev[port]->mtu > 1500) + /* configure the Serial Mode Register */ + reg = DATA_BLIND_VAL(DATA_BLIND_DEF) + | GM_SMOD_VLAN_ENA + | IPG_DATA_VAL(IPG_DATA_DEF); + + if (hw->dev[port]->mtu > ETH_DATA_LEN) reg |= GM_SMOD_JUMBO_ENA; gma_write16(hw, port, GM_SERIAL_MODE, reg); @@ -2619,8 +2618,8 @@ static int skge_up(struct net_device *dev) yukon_mac_init(hw, port); spin_unlock_bh(&hw->phy_lock); - /* Configure RAMbuffers */ - chunk = hw->ram_size / ((hw->ports + 1)*2); + /* Configure RAMbuffers - equally between ports and tx/rx */ + chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); ram_addr = hw->ram_offset + 2 * chunk * port; skge_ramset(hw, rxqaddr[port], ram_addr, chunk); @@ -2897,11 +2896,7 @@ static void skge_tx_timeout(struct net_device *dev) static int skge_change_mtu(struct net_device *dev, int new_mtu) { - struct skge_port *skge = netdev_priv(dev); - struct skge_hw *hw = skge->hw; - int port = skge->port; int err; - u16 ctl, reg; if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) return -EINVAL; @@ -2911,40 +2906,13 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu) return 0; } - skge_write32(hw, B0_IMSK, 0); - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_stop_queue(dev); - napi_disable(&skge->napi); - - ctl = gma_read16(hw, port, GM_GP_CTRL); - gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); - - skge_rx_clean(skge); - skge_rx_stop(hw, port); + skge_down(dev); dev->mtu = new_mtu; - reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); - if (new_mtu > 1500) - reg |= GM_SMOD_JUMBO_ENA; - gma_write16(hw, port, GM_SERIAL_MODE, reg); - - skge_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); - - err = skge_rx_fill(dev); - wmb(); - if (!err) - skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); - skge_write32(hw, B0_IMSK, hw->intr_mask); - + err = skge_up(dev); if (err) dev_close(dev); - else { - gma_write16(hw, port, GM_GP_CTRL, ctl); - - napi_enable(&skge->napi); - netif_wake_queue(dev); - } return err; } diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index a2070db725c9..6197afb3ed83 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -31,7 +31,6 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> -#include <linux/aer.h> #include <linux/ip.h> #include <net/ip.h> #include <linux/tcp.h> @@ -240,22 +239,21 @@ static void sky2_power_on(struct sky2_hw *hw) sky2_write8(hw, B2_Y2_CLK_GATE, 0); if (hw->flags & SKY2_HW_ADV_POWER_CTL) { - struct pci_dev *pdev = hw->pdev; u32 reg; - pci_write_config_dword(pdev, PCI_DEV_REG3, 0); + sky2_pci_write32(hw, PCI_DEV_REG3, 0); - pci_read_config_dword(pdev, PCI_DEV_REG4, ®); + reg = sky2_pci_read32(hw, PCI_DEV_REG4); /* set all bits to 0 except bits 15..12 and 8 */ reg &= P_ASPM_CONTROL_MSK; - pci_write_config_dword(pdev, PCI_DEV_REG4, reg); + sky2_pci_write32(hw, PCI_DEV_REG4, reg); - pci_read_config_dword(pdev, PCI_DEV_REG5, ®); + reg = sky2_pci_read32(hw, PCI_DEV_REG5); /* set all bits to 0 except bits 28 & 27 */ reg &= P_CTL_TIM_VMAIN_AV_MSK; - pci_write_config_dword(pdev, PCI_DEV_REG5, reg); + sky2_pci_write32(hw, PCI_DEV_REG5, reg); - pci_write_config_dword(pdev, PCI_CFG_REG_1, 0); + sky2_pci_write32(hw, PCI_CFG_REG_1, 0); /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ reg = sky2_read32(hw, B2_GP_IO); @@ -619,12 +617,11 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) { - struct pci_dev *pdev = hw->pdev; u32 reg1; static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; - pci_read_config_dword(pdev, PCI_DEV_REG1, ®1); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); /* Turn on/off phy power saving */ if (onoff) reg1 &= ~phy_power[port]; @@ -634,8 +631,8 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) if (onoff && hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) reg1 |= coma_mode[port]; - pci_write_config_dword(pdev, PCI_DEV_REG1, reg1); - pci_read_config_dword(pdev, PCI_DEV_REG1, ®1); + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); udelay(100); } @@ -704,9 +701,9 @@ static void sky2_wol_init(struct sky2_port *sky2) sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); /* Turn on legacy PCI-Express PME mode */ - pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); reg1 |= PCI_Y2_PME_LEGACY; - pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); /* block receiver */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); @@ -848,6 +845,13 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) sky2_set_tx_stfwd(hw, port); } + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + /* disable dynamic watermark */ + reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA)); + reg &= ~TX_DYN_WM_ENA; + sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg); + } } /* Assign Ram Buffer allocation to queue */ @@ -1320,15 +1324,12 @@ static int sky2_up(struct net_device *dev) */ if (otherdev && netif_running(otherdev) && (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { - struct sky2_port *osky2 = netdev_priv(otherdev); u16 cmd; - pci_read_config_word(hw->pdev, cap + PCI_X_CMD, &cmd); + cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); cmd &= ~PCI_X_CMD_MAX_SPLIT; - pci_write_config_word(hw->pdev, cap + PCI_X_CMD, cmd); + sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); - sky2->rx_csum = 0; - osky2->rx_csum = 0; } if (netif_msg_ifup(sky2)) @@ -2426,37 +2427,26 @@ static void sky2_hw_intr(struct sky2_hw *hw) if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { u16 pci_err; - pci_read_config_word(pdev, PCI_STATUS, &pci_err); + pci_err = sky2_pci_read16(hw, PCI_STATUS); if (net_ratelimit()) dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", pci_err); - pci_write_config_word(pdev, PCI_STATUS, + sky2_pci_write16(hw, PCI_STATUS, pci_err | PCI_STATUS_ERROR_BITS); } if (status & Y2_IS_PCI_EXP) { /* PCI-Express uncorrectable Error occurred */ - int aer = pci_find_aer_capability(hw->pdev); u32 err; - if (aer) { - pci_read_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, - &err); - pci_cleanup_aer_uncorrect_error_status(pdev); - } else { - /* Either AER not configured, or not working - * because of bad MMCONFIG, so just do recover - * manually. - */ - err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); - sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, - 0xfffffffful); - } - + err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); + sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, + 0xfffffffful); if (net_ratelimit()) dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); + sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); } if (status & Y2_HWE_L1_MASK) @@ -2703,13 +2693,10 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) static int __devinit sky2_init(struct sky2_hw *hw) { - int rc; u8 t8; /* Enable all clocks and check for bad PCI access */ - rc = pci_write_config_dword(hw->pdev, PCI_DEV_REG3, 0); - if (rc) - return rc; + sky2_pci_write32(hw, PCI_DEV_REG3, 0); sky2_write8(hw, B0_CTST, CS_RST_CLR); @@ -2806,32 +2793,21 @@ static void sky2_reset(struct sky2_hw *hw) sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); /* clear PCI errors, if any */ - pci_read_config_word(pdev, PCI_STATUS, &status); + status = sky2_pci_read16(hw, PCI_STATUS); status |= PCI_STATUS_ERROR_BITS; - pci_write_config_word(pdev, PCI_STATUS, status); + sky2_pci_write16(hw, PCI_STATUS, status); sky2_write8(hw, B0_CTST, CS_MRST_CLR); cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (cap) { - if (pci_find_aer_capability(pdev)) { - /* Check for advanced error reporting */ - pci_cleanup_aer_uncorrect_error_status(pdev); - pci_cleanup_aer_correct_error_status(pdev); - } else { - dev_warn(&pdev->dev, - "PCI Express Advanced Error Reporting" - " not configured or MMCONFIG problem?\n"); - - sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, - 0xfffffffful); - } + sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, + 0xfffffffful); /* If error bit is stuck on ignore it */ if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP) dev_info(&pdev->dev, "ignoring stuck error report bit\n"); - - else if (pci_enable_pcie_error_reporting(pdev)) + else hwe_mask |= Y2_IS_PCI_EXP; } @@ -2930,16 +2906,14 @@ static void sky2_restart(struct work_struct *work) int i, err; rtnl_lock(); - sky2_write32(hw, B0_IMSK, 0); - sky2_read32(hw, B0_IMSK); - napi_disable(&hw->napi); - for (i = 0; i < hw->ports; i++) { dev = hw->dev[i]; if (netif_running(dev)) sky2_down(dev); } + napi_disable(&hw->napi); + sky2_write32(hw, B0_IMSK, 0); sky2_reset(hw); sky2_write32(hw, B0_IMSK, Y2_IS_BASE); napi_enable(&hw->napi); @@ -3672,32 +3646,33 @@ static int sky2_set_tso(struct net_device *dev, u32 data) static int sky2_get_eeprom_len(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; u16 reg2; - pci_read_config_word(sky2->hw->pdev, PCI_DEV_REG2, ®2); + reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); } -static u32 sky2_vpd_read(struct pci_dev *pdev, int cap, u16 offset) +static u32 sky2_vpd_read(struct sky2_hw *hw, int cap, u16 offset) { u32 val; - pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); + sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); do { - pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR); } while (!(offset & PCI_VPD_ADDR_F)); - pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); + val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); return val; } -static void sky2_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) +static void sky2_vpd_write(struct sky2_hw *hw, int cap, u16 offset, u32 val) { - pci_write_config_word(pdev, cap + PCI_VPD_DATA, val); - pci_write_config_dword(pdev, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); + sky2_pci_write16(hw, cap + PCI_VPD_DATA, val); + sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); do { - pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR); } while (offset & PCI_VPD_ADDR_F); } @@ -3715,7 +3690,7 @@ static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom eeprom->magic = SKY2_EEPROM_MAGIC; while (length > 0) { - u32 val = sky2_vpd_read(sky2->hw->pdev, cap, offset); + u32 val = sky2_vpd_read(sky2->hw, cap, offset); int n = min_t(int, length, sizeof(val)); memcpy(data, &val, n); @@ -3745,10 +3720,10 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom int n = min_t(int, length, sizeof(val)); if (n < sizeof(val)) - val = sky2_vpd_read(sky2->hw->pdev, cap, offset); + val = sky2_vpd_read(sky2->hw, cap, offset); memcpy(&val, data, n); - sky2_vpd_write(sky2->hw->pdev, cap, offset, val); + sky2_vpd_write(sky2->hw, cap, offset, val); length -= n; data += n; @@ -4013,7 +3988,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, sky2->duplex = -1; sky2->speed = -1; sky2->advertising = sky2_supported_modes(hw); - sky2->rx_csum = 1; + sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); sky2->wol = wol; spin_lock_init(&sky2->phy_lock); @@ -4184,9 +4159,9 @@ static int __devinit sky2_probe(struct pci_dev *pdev, */ { u32 reg; - pci_read_config_dword(pdev,PCI_DEV_REG2, ®); + reg = sky2_pci_read32(hw, PCI_DEV_REG2); reg &= ~PCI_REV_DESC; - pci_write_config_dword(pdev, PCI_DEV_REG2, reg); + sky2_pci_write32(hw, PCI_DEV_REG2, reg); } #endif @@ -4377,7 +4352,7 @@ static int sky2_resume(struct pci_dev *pdev) if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_FE_P) - pci_write_config_dword(pdev, PCI_DEV_REG3, 0); + sky2_pci_write32(hw, PCI_DEV_REG3, 0); sky2_reset(hw); sky2_write32(hw, B0_IMSK, Y2_IS_BASE); diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 69525fd7908d..bc646a47edd2 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -2128,4 +2128,25 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg, gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); } + +/* PCI config space access */ +static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read32(hw, Y2_CFG_SPC + reg); +} + +static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read16(hw, Y2_CFG_SPC + reg); +} + +static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val) +{ + sky2_write32(hw, Y2_CFG_SPC + reg, val); +} + +static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val) +{ + sky2_write16(hw, Y2_CFG_SPC + reg, val); +} #endif diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index dd18af0ce676..76cc1d3adf71 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c @@ -428,7 +428,6 @@ static inline void smc911x_drop_pkt(struct net_device *dev) */ static inline void smc911x_rcv(struct net_device *dev) { - struct smc911x_local *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; unsigned int pkt_len, status; struct sk_buff *skb; @@ -473,6 +472,7 @@ static inline void smc911x_rcv(struct net_device *dev) skb_put(skb,pkt_len-4); #ifdef SMC_USE_DMA { + struct smc911x_local *lp = netdev_priv(dev); unsigned int fifo; /* Lower the FIFO threshold if possible */ fifo = SMC_GET_FIFO_INT(); @@ -1299,9 +1299,9 @@ smc911x_rx_dma_irq(int dma, void *data) PRINT_PKT(skb->data, skb->len); dev->last_rx = jiffies; skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; + netif_rx(skb); spin_lock_irqsave(&lp->lock, flags); pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16; @@ -1379,13 +1379,6 @@ static void smc911x_set_multicast_list(struct net_device *dev) unsigned int multicast_table[2]; unsigned int mcr, update_multicast = 0; unsigned long flags; - /* table for flipping the order of 5 bits */ - static const unsigned char invert5[] = - {0x00, 0x10, 0x08, 0x18, 0x04, 0x14, 0x0C, 0x1C, - 0x02, 0x12, 0x0A, 0x1A, 0x06, 0x16, 0x0E, 0x1E, - 0x01, 0x11, 0x09, 0x19, 0x05, 0x15, 0x0D, 0x1D, - 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x17, 0x0F, 0x1F}; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); @@ -1432,7 +1425,7 @@ static void smc911x_set_multicast_list(struct net_device *dev) cur_addr = dev->mc_list; for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) { - int position; + u32 position; /* do we have a pointer here? */ if (!cur_addr) @@ -1442,12 +1435,10 @@ static void smc911x_set_multicast_list(struct net_device *dev) if (!(*cur_addr->dmi_addr & 1)) continue; - /* only use the low order bits */ - position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f; + /* upper 6 bits are used as hash index */ + position = ether_crc(ETH_ALEN, cur_addr->dmi_addr)>>26; - /* do some messy swapping to put the bit in the right spot */ - multicast_table[invert5[position&0x1F]&0x1] |= - (1<<invert5[(position>>1)&0x1F]); + multicast_table[position>>5] |= 1 << (position&0x1f); } /* be sure I get rid of flags I might have set */ diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h index 16a0edc078fd..d04e4fa35206 100644 --- a/drivers/net/smc911x.h +++ b/drivers/net/smc911x.h @@ -37,7 +37,7 @@ #define SMC_USE_16BIT 0 #define SMC_USE_32BIT 1 #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING -#elif CONFIG_SH_MAGIC_PANEL_R2 +#elif defined(CONFIG_SH_MAGIC_PANEL_R2) #define SMC_USE_SH_DMA 0 #define SMC_USE_16BIT 0 #define SMC_USE_32BIT 1 diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index db34e1eb67e9..07b7f7120e37 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -55,7 +55,7 @@ #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#elif defined(CONFIG_BFIN) +#elif defined(CONFIG_BLACKFIN) #define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH #define RPC_LSA_DEFAULT RPC_LED_100_10 diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index f6fedcc32de1..68872142530b 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -2281,14 +2281,12 @@ static void gem_reset_task(struct work_struct *work) mutex_lock(&gp->pm_mutex); - napi_disable(&gp->napi); + if (gp->opened) + napi_disable(&gp->napi); spin_lock_irq(&gp->lock); spin_lock(&gp->tx_lock); - if (gp->running == 0) - goto not_running; - if (gp->running) { netif_stop_queue(gp->dev); @@ -2298,13 +2296,14 @@ static void gem_reset_task(struct work_struct *work) gem_set_link_modes(gp); netif_wake_queue(gp->dev); } - not_running: + gp->reset_task_pending = 0; spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); - napi_enable(&gp->napi); + if (gp->opened) + napi_enable(&gp->napi); mutex_unlock(&gp->pm_mutex); } diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index ca90566d5bcd..b4891caeae5a 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c @@ -2118,8 +2118,8 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state) pci_enable_wake(pci_dev, PCI_D3cold, 1); /* Power down device*/ - pci_set_power_state(pci_dev, pci_choose_state (pci_dev,state)); pci_save_state(pci_dev); + pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state)); return 0; } @@ -2129,8 +2129,8 @@ static int dmfe_resume(struct pci_dev *pci_dev) struct net_device *dev = pci_get_drvdata(pci_dev); u32 tmp; - pci_restore_state(pci_dev); pci_set_power_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); /* Re-initilize DM910X board */ dmfe_init_dm910x(dev); diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index a3ff270593f1..7f689907ac28 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -1460,6 +1460,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { upsmr |= UPSMR_RPM; switch (ugeth->max_speed) { @@ -1557,6 +1559,8 @@ static void adjust_link(struct net_device *dev) if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { if (phydev->speed == SPEED_10) upsmr |= UPSMR_R10M; @@ -3795,6 +3799,10 @@ static phy_interface_t to_phy_interface(const char *phy_connection_type) return PHY_INTERFACE_MODE_RGMII; if (strcasecmp(phy_connection_type, "rgmii-id") == 0) return PHY_INTERFACE_MODE_RGMII_ID; + if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) + return PHY_INTERFACE_MODE_RGMII_TXID; + if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) + return PHY_INTERFACE_MODE_RGMII_RXID; if (strcasecmp(phy_connection_type, "rtbi") == 0) return PHY_INTERFACE_MODE_RTBI; @@ -3889,6 +3897,8 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: case PHY_INTERFACE_MODE_TBI: case PHY_INTERFACE_MODE_RTBI: max_speed = SPEED_1000; diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 2c685734b7a4..1ffdd106f4c4 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -94,7 +94,7 @@ static void dm_write_async_callback(struct urb *urb) struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; if (urb->status < 0) - printk(KERN_DEBUG "dm_write_async_callback() failed with %d", + printk(KERN_DEBUG "dm_write_async_callback() failed with %d\n", urb->status); kfree(req); diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 450e29d7a9f3..35cd65d6b9ed 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1242,6 +1242,9 @@ static int velocity_rx_refill(struct velocity_info *vptr) static int velocity_init_rd_ring(struct velocity_info *vptr) { int ret; + int mtu = vptr->dev->mtu; + + vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; vptr->rd_info = kcalloc(vptr->options.numrx, sizeof(struct velocity_rd_info), GFP_KERNEL); @@ -1898,8 +1901,6 @@ static int velocity_open(struct net_device *dev) struct velocity_info *vptr = netdev_priv(dev); int ret; - vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32); - ret = velocity_init_rings(vptr); if (ret < 0) goto out; @@ -1978,12 +1979,6 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) velocity_free_rd_ring(vptr); dev->mtu = new_mtu; - if (new_mtu > 8192) - vptr->rx_buf_sz = 9 * 1024; - else if (new_mtu > 4096) - vptr->rx_buf_sz = 8192; - else - vptr->rx_buf_sz = 4 * 1024; ret = velocity_init_rd_ring(vptr); if (ret < 0) diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 2b17c1dc46f1..b45eecc53c4a 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -1566,7 +1566,7 @@ static void b43_release_firmware(struct b43_wldev *dev) static void b43_print_fw_helptext(struct b43_wl *wl) { b43err(wl, "You must go to " - "http://linuxwireless.org/en/users/Drivers/bcm43xx#devicefirmware " + "http://linuxwireless.org/en/users/Drivers/b43#devicefirmware " "and download the correct firmware (version 4).\n"); } diff --git a/drivers/net/wireless/b43/phy.c b/drivers/net/wireless/b43/phy.c index 3d4ed647c311..7ff091e69f05 100644 --- a/drivers/net/wireless/b43/phy.c +++ b/drivers/net/wireless/b43/phy.c @@ -2214,7 +2214,7 @@ int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev) } dyn_tssi2dbm = kmalloc(64, GFP_KERNEL); if (dyn_tssi2dbm == NULL) { - b43err(dev->wl, "Could not allocate memory" + b43err(dev->wl, "Could not allocate memory " "for tssi2dbm table\n"); return -ENOMEM; } diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index 8cb3dc4c4745..83161d9af813 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c @@ -996,7 +996,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev) err = ssb_dma_set_mask(dev->dev, dmamask); if (err) { -#ifdef BCM43XX_PIO +#ifdef CONFIG_B43LEGACY_PIO b43legacywarn(dev->wl, "DMA for this device not supported. " "Falling back to PIO\n"); dev->__using_pio = 1; diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 3bde1e9ab428..32d5e1785bda 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -1419,7 +1419,7 @@ static void b43legacy_release_firmware(struct b43legacy_wldev *dev) static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl) { b43legacyerr(wl, "You must go to http://linuxwireless.org/en/users/" - "Drivers/bcm43xx#devicefirmware " + "Drivers/b43#devicefirmware " "and download the correct firmware (version 3).\n"); } diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c index 22a4b3d0186d..491e518e4aeb 100644 --- a/drivers/net/wireless/b43legacy/phy.c +++ b/drivers/net/wireless/b43legacy/phy.c @@ -2020,7 +2020,7 @@ int b43legacy_phy_init_tssi2dbm_table(struct b43legacy_wldev *dev) phy->idle_tssi = 62; dyn_tssi2dbm = kmalloc(64, GFP_KERNEL); if (dyn_tssi2dbm == NULL) { - b43legacyerr(dev->wl, "Could not allocate memory" + b43legacyerr(dev->wl, "Could not allocate memory " "for tssi2dbm table\n"); return -ENOMEM; } diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c index b37f1e348700..af3de3343650 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c @@ -2149,7 +2149,7 @@ int bcm43xx_phy_init_tssi2dbm_table(struct bcm43xx_private *bcm) } dyn_tssi2dbm = kmalloc(64, GFP_KERNEL); if (dyn_tssi2dbm == NULL) { - printk(KERN_ERR PFX "Could not allocate memory" + printk(KERN_ERR PFX "Could not allocate memory " "for tssi2dbm table\n"); return -ENOMEM; } diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 465da4f67ce7..4bdf237f6adc 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -2915,6 +2915,10 @@ static void iwl_set_rate(struct iwl_priv *priv) int i; hw = iwl_get_hw_mode(priv, priv->phymode); + if (!hw) { + IWL_ERROR("Failed to set rate: unable to get hw mode\n"); + return; + } priv->active_rate = 0; priv->active_rate_basic = 0; @@ -6936,13 +6940,10 @@ static int iwl_mac_add_interface(struct ieee80211_hw *hw, DECLARE_MAC_BUF(mac); IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type); - if (conf->mac_addr) - IWL_DEBUG_MAC80211("enter: MAC %s\n", - print_mac(mac, conf->mac_addr)); if (priv->interface_id) { IWL_DEBUG_MAC80211("leave - interface_id != 0\n"); - return 0; + return -EOPNOTSUPP; } spin_lock_irqsave(&priv->lock, flags); @@ -6951,6 +6952,12 @@ static int iwl_mac_add_interface(struct ieee80211_hw *hw, spin_unlock_irqrestore(&priv->lock, flags); mutex_lock(&priv->mutex); + + if (conf->mac_addr) { + IWL_DEBUG_MAC80211("Set: %s\n", print_mac(mac, conf->mac_addr)); + memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); + } + iwl_set_mode(priv, conf->type); IWL_DEBUG_MAC80211("leave\n"); @@ -8270,6 +8277,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv) { iwl_hw_cancel_deferred_work(priv); + cancel_delayed_work_sync(&priv->init_alive_start); cancel_delayed_work(&priv->scan_check); cancel_delayed_work(&priv->alive_start); cancel_delayed_work(&priv->post_associate); diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c index 9918780f5e86..8f85564ec6fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl4965-base.c +++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c @@ -3003,6 +3003,10 @@ static void iwl_set_rate(struct iwl_priv *priv) int i; hw = iwl_get_hw_mode(priv, priv->phymode); + if (!hw) { + IWL_ERROR("Failed to set rate: unable to get hw mode\n"); + return; + } priv->active_rate = 0; priv->active_rate_basic = 0; @@ -7326,9 +7330,6 @@ static int iwl_mac_add_interface(struct ieee80211_hw *hw, DECLARE_MAC_BUF(mac); IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type); - if (conf->mac_addr) - IWL_DEBUG_MAC80211("enter: MAC %s\n", - print_mac(mac, conf->mac_addr)); if (priv->interface_id) { IWL_DEBUG_MAC80211("leave - interface_id != 0\n"); @@ -7341,6 +7342,11 @@ static int iwl_mac_add_interface(struct ieee80211_hw *hw, spin_unlock_irqrestore(&priv->lock, flags); mutex_lock(&priv->mutex); + + if (conf->mac_addr) { + IWL_DEBUG_MAC80211("Set %s\n", print_mac(mac, conf->mac_addr)); + memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); + } iwl_set_mode(priv, conf->type); IWL_DEBUG_MAC80211("leave\n"); @@ -8864,6 +8870,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv) { iwl_hw_cancel_deferred_work(priv); + cancel_delayed_work_sync(&priv->init_alive_start); cancel_delayed_work(&priv->scan_check); cancel_delayed_work(&priv->alive_start); cancel_delayed_work(&priv->post_associate); diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c index ec89dabc412c..ba4fc2b3bf0a 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/libertas/if_cs.c @@ -170,7 +170,8 @@ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 r #define IF_CS_H_IC_TX_OVER 0x0001 #define IF_CS_H_IC_RX_OVER 0x0002 #define IF_CS_H_IC_DNLD_OVER 0x0004 -#define IF_CS_H_IC_HOST_EVENT 0x0008 +#define IF_CS_H_IC_POWER_DOWN 0x0008 +#define IF_CS_H_IC_HOST_EVENT 0x0010 #define IF_CS_H_IC_MASK 0x001f #define IF_CS_H_INT_MASK 0x00000004 diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 5ead08312e1e..1823b48a8ba7 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -1165,8 +1165,6 @@ wlan_private *libertas_add_card(void *card, struct device *dmdev) #ifdef WIRELESS_EXT dev->wireless_handlers = (struct iw_handler_def *)&libertas_handler_def; #endif -#define NETIF_F_DYNALLOC 16 - dev->features |= NETIF_F_DYNALLOC; dev->flags |= IFF_BROADCAST | IFF_MULTICAST; dev->set_multicast_list = libertas_set_multicast_list; @@ -1348,8 +1346,6 @@ int libertas_add_mesh(wlan_private *priv, struct device *dev) #ifdef WIRELESS_EXT mesh_dev->wireless_handlers = (struct iw_handler_def *)&mesh_handler_def; #endif -#define NETIF_F_DYNALLOC 16 - /* Register virtual mesh interface */ ret = register_netdev(mesh_dev); if (ret) { diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c index c6f5aa3cb465..395b7882d4d6 100644 --- a/drivers/net/wireless/libertas/wext.c +++ b/drivers/net/wireless/libertas/wext.c @@ -1528,7 +1528,7 @@ static int wlan_set_encodeext(struct net_device *dev, && (ext->key_len != KEY_LEN_WPA_TKIP)) || ((alg == IW_ENCODE_ALG_CCMP) && (ext->key_len != KEY_LEN_WPA_AES))) { - lbs_deb_wext("invalid size %d for key of alg" + lbs_deb_wext("invalid size %d for key of alg " "type %d\n", ext->key_len, alg); diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c index 2402cb8dd328..d2fa079fbc4c 100644 --- a/drivers/net/wireless/netwave_cs.c +++ b/drivers/net/wireless/netwave_cs.c @@ -806,7 +806,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) { for (i = 0; i < 6; i++) dev->dev_addr[i] = readb(ramBase + NETWAVE_EREG_PA + i); - printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx" + printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx, " "id %c%c, hw_addr %s\n", dev->name, dev->base_addr, dev->irq, (u_long) ramBase, diff --git a/drivers/net/wireless/p54usb.c b/drivers/net/wireless/p54usb.c index 755482a5a938..60d286eb0b8b 100644 --- a/drivers/net/wireless/p54usb.c +++ b/drivers/net/wireless/p54usb.c @@ -308,7 +308,7 @@ static int p54u_read_eeprom(struct ieee80211_hw *dev) buf = kmalloc(0x2020, GFP_KERNEL); if (!buf) { - printk(KERN_ERR "prism54usb: cannot allocate memory for" + printk(KERN_ERR "prism54usb: cannot allocate memory for " "eeprom readback!\n"); return -ENOMEM; } diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 5eace9e66e14..66ce61048361 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -768,9 +768,13 @@ lba_fixup_bus(struct pci_bus *bus) DBG("lba_fixup_bus() WTF? 0x%lx [%lx/%lx] XXX", res->flags, res->start, res->end); } - if ((i != PCI_ROM_RESOURCE) || - (res->flags & IORESOURCE_ROM_ENABLE)) - pci_claim_resource(dev, i); + + /* + ** FIXME: this will result in whinging for devices + ** that share expansion ROMs (think quad tulip), but + ** isn't harmful. + */ + pci_claim_resource(dev, i); } #ifdef FBB_SUPPORT diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index a4f56e95cf96..f1e00ff54ce8 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -293,7 +293,7 @@ int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task) return -EINVAL; /* Cannot register while the char dev is in use */ - if (test_and_set_bit(RTC_DEV_BUSY, &rtc->flags)) + if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) return -EBUSY; spin_lock_irq(&rtc->irq_task_lock); @@ -303,7 +303,7 @@ int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task) } spin_unlock_irq(&rtc->irq_task_lock); - clear_bit(RTC_DEV_BUSY, &rtc->flags); + clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); return retval; } diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index ae1bf177d625..025c60a17a4a 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -26,7 +26,7 @@ static int rtc_dev_open(struct inode *inode, struct file *file) struct rtc_device, char_dev); const struct rtc_class_ops *ops = rtc->ops; - if (test_and_set_bit(RTC_DEV_BUSY, &rtc->flags)) + if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) return -EBUSY; file->private_data = rtc; @@ -41,7 +41,7 @@ static int rtc_dev_open(struct inode *inode, struct file *file) } /* something has gone wrong */ - clear_bit(RTC_DEV_BUSY, &rtc->flags); + clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); return err; } @@ -402,7 +402,7 @@ static int rtc_dev_release(struct inode *inode, struct file *file) if (rtc->ops->release) rtc->ops->release(rtc->dev.parent); - clear_bit(RTC_DEV_BUSY, &rtc->flags); + clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); return 0; } diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c index 3e183cfee10f..1f956dc5d56e 100644 --- a/drivers/rtc/rtc-max6902.c +++ b/drivers/rtc/rtc-max6902.c @@ -89,13 +89,9 @@ static int max6902_get_reg(struct device *dev, unsigned char address, /* do the i/o */ status = spi_sync(spi, &message); - if (status == 0) - status = message.status; - else - return status; - - *data = chip->rx_buf[1]; + if (status == 0) + *data = chip->rx_buf[1]; return status; } @@ -125,9 +121,7 @@ static int max6902_get_datetime(struct device *dev, struct rtc_time *dt) /* do the i/o */ status = spi_sync(spi, &message); - if (status == 0) - status = message.status; - else + if (status) return status; /* The chip sends data in this order: diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 5e083d1f57e7..15a5789b7734 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -472,11 +472,11 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char if (rc) goto unregister_dev; - add_disk(dev_info->gd); - blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); + add_disk(dev_info->gd); + switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 6db31089d2d7..c3df2cd009a4 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -451,6 +451,7 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data) break; case -ENXIO: case -ENOMEM: + case -EIO: /* These should abort looping */ break; default: diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index 2f6bf462425e..156f3f9786b5 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -113,6 +113,7 @@ __ccw_device_sense_id_start(struct ccw_device *cdev) { struct subchannel *sch; struct ccw1 *ccw; + int ret; sch = to_subchannel(cdev->dev.parent); /* Setup sense channel program. */ @@ -124,9 +125,25 @@ __ccw_device_sense_id_start(struct ccw_device *cdev) /* Reset device status. */ memset(&cdev->private->irb, 0, sizeof(struct irb)); - cdev->private->flags.intretry = 0; - return cio_start(sch, ccw, LPM_ANYPATH); + /* Try on every path. */ + ret = -ENODEV; + while (cdev->private->imask != 0) { + if ((sch->opm & cdev->private->imask) != 0 && + cdev->private->iretry > 0) { + cdev->private->iretry--; + /* Reset internal retry indication. */ + cdev->private->flags.intretry = 0; + ret = cio_start (sch, cdev->private->iccws, + cdev->private->imask); + /* ret is 0, -EBUSY, -EACCES or -ENODEV */ + if (ret != -EACCES) + return ret; + } + cdev->private->imask >>= 1; + cdev->private->iretry = 5; + } + return ret; } void @@ -136,7 +153,8 @@ ccw_device_sense_id_start(struct ccw_device *cdev) memset (&cdev->private->senseid, 0, sizeof (struct senseid)); cdev->private->senseid.cu_type = 0xFFFF; - cdev->private->iretry = 3; + cdev->private->imask = 0x80; + cdev->private->iretry = 5; ret = __ccw_device_sense_id_start(cdev); if (ret && ret != -EBUSY) ccw_device_sense_id_done(cdev, ret); @@ -252,13 +270,14 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_sense_id_done(cdev, ret); break; case -EACCES: /* channel is not operational. */ + sch->lpm &= ~cdev->private->imask; + cdev->private->imask >>= 1; + cdev->private->iretry = 5; + /* fall through. */ case -EAGAIN: /* try again. */ - cdev->private->iretry--; - if (cdev->private->iretry > 0) { - ret = __ccw_device_sense_id_start(cdev); - if (ret == 0 || ret == -EBUSY) - break; - } + ret = __ccw_device_sense_id_start(cdev); + if (ret == 0 || ret == -EBUSY) + break; /* fall through. */ default: /* Sense ID failed. Try asking VM. */ if (MACHINE_IS_VM) { diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 6bf3ebbe985a..97adc701a819 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c @@ -2782,35 +2782,14 @@ ctc_probe_device(struct ccwgroup_device *cgdev) } /** - * Initialize everything of the net device except the name and the - * channel structs. + * Device setup function called by alloc_netdev(). + * + * @param dev Device to be setup. */ -static struct net_device * -ctc_init_netdevice(struct net_device * dev, int alloc_device, - struct ctc_priv *privptr) +void ctc_init_netdevice(struct net_device * dev) { - if (!privptr) - return NULL; - DBF_TEXT(setup, 3, __FUNCTION__); - if (alloc_device) { - dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); - if (!dev) - return NULL; - } - - dev->priv = privptr; - privptr->fsm = init_fsm("ctcdev", dev_state_names, - dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS, - dev_fsm, DEV_FSM_LEN, GFP_KERNEL); - if (privptr->fsm == NULL) { - if (alloc_device) - kfree(dev); - return NULL; - } - fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); - fsm_settimer(privptr->fsm, &privptr->restart_timer); if (dev->mtu == 0) dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2; dev->hard_start_xmit = ctc_tx; @@ -2823,7 +2802,6 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device, dev->type = ARPHRD_SLIP; dev->tx_queue_len = 100; dev->flags = IFF_POINTOPOINT | IFF_NOARP; - return dev; } @@ -2879,14 +2857,22 @@ ctc_new_device(struct ccwgroup_device *cgdev) "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret); } - dev = ctc_init_netdevice(NULL, 1, privptr); - + dev = alloc_netdev(0, "ctc%d", ctc_init_netdevice); if (!dev) { ctc_pr_warn("ctc_init_netdevice failed\n"); goto out; } + dev->priv = privptr; - strlcpy(dev->name, "ctc%d", IFNAMSIZ); + privptr->fsm = init_fsm("ctcdev", dev_state_names, + dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS, + dev_fsm, DEV_FSM_LEN, GFP_KERNEL); + if (privptr->fsm == NULL) { + free_netdev(dev); + goto out; + } + fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); + fsm_settimer(privptr->fsm, &privptr->restart_timer); for (direction = READ; direction <= WRITE; direction++) { privptr->channel[direction] = diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 5552b755c08a..07fa824d179f 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -977,7 +977,9 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) debug_text_event(adapter->erp_dbf, 2, "a_adis"); debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); - zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); + erp_action->status |= ZFCP_STATUS_ERP_DISMISSED; + if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) + zfcp_erp_action_ready(erp_action); } int @@ -1063,7 +1065,7 @@ zfcp_erp_thread(void *data) &adapter->status)) { write_lock_irqsave(&adapter->erp_lock, flags); - next = adapter->erp_ready_head.prev; + next = adapter->erp_ready_head.next; write_unlock_irqrestore(&adapter->erp_lock, flags); if (next != &adapter->erp_ready_head) { @@ -1153,15 +1155,13 @@ zfcp_erp_strategy(struct zfcp_erp_action *erp_action) /* * check for dismissed status again to avoid follow-up actions, - * failing of targets and so on for dismissed actions + * failing of targets and so on for dismissed actions, + * we go through down() here because there has been an up() */ - retval = zfcp_erp_strategy_check_action(erp_action, retval); + if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) + retval = ZFCP_ERP_CONTINUES; switch (retval) { - case ZFCP_ERP_DISMISSED: - /* leave since this action has ridden to its ancestors */ - debug_text_event(adapter->erp_dbf, 6, "a_st_dis2"); - goto unlock; case ZFCP_ERP_NOMEM: /* no memory to continue immediately, let it sleep */ if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) { @@ -3089,7 +3089,7 @@ zfcp_erp_action_enqueue(int action, ++adapter->erp_total_count; /* finally put it into 'ready' queue and kick erp thread */ - list_add(&erp_action->list, &adapter->erp_ready_head); + list_add_tail(&erp_action->list, &adapter->erp_ready_head); up(&adapter->erp_ready_sem); retval = 0; out: diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 240a0bb8986f..abce48ccc85b 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1339,10 +1339,10 @@ int aac_check_health(struct aac_dev * aac) aif = (struct aac_aifcmd *)hw_fib->data; aif->command = cpu_to_le32(AifCmdEventNotify); aif->seqnum = cpu_to_le32(0xFFFFFFFF); - aif->data[0] = cpu_to_le32(AifEnExpEvent); - aif->data[1] = cpu_to_le32(AifExeFirmwarePanic); - aif->data[2] = cpu_to_le32(AifHighPriority); - aif->data[3] = cpu_to_le32(BlinkLED); + aif->data[0] = AifEnExpEvent; + aif->data[1] = AifExeFirmwarePanic; + aif->data[2] = AifHighPriority; + aif->data[3] = BlinkLED; /* * Put the FIB onto the diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 038980be763d..9dd331bc29b0 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -636,7 +636,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file) static int aac_cfg_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_RAWIO)) return -EPERM; return aac_do_ioctl(file->private_data, cmd, (void __user *)arg); } @@ -691,7 +691,7 @@ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg) { - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_RAWIO)) return -EPERM; return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg); } @@ -950,7 +950,8 @@ static struct scsi_host_template aac_driver_template = { static void __aac_shutdown(struct aac_dev * aac) { - kthread_stop(aac->thread); + if (aac->aif_thread) + kthread_stop(aac->thread); aac_send_shutdown(aac); aac_adapter_disable_int(aac); free_irq(aac->pdev->irq, aac); diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 6f8403b82ba1..f5732d8f67fe 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -393,7 +393,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) #endif /* REAL_DMA */ - NCR5380_intr(0, 0); + NCR5380_intr(irq, dummy); #if 0 /* To be sure the int is not masked */ @@ -458,7 +458,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy) #endif /* REAL_DMA */ - NCR5380_intr(0, 0); + NCR5380_intr(irq, dummy); return IRQ_HANDLED; } @@ -684,7 +684,7 @@ int atari_scsi_detect(struct scsi_host_template *host) * interrupt after having cleared the pending flag for the DMA * interrupt. */ if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW, - "SCSI NCR5380", scsi_tt_intr)) { + "SCSI NCR5380", instance)) { printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI); scsi_unregister(atari_scsi_host); atari_stram_free(atari_dma_buffer); @@ -701,7 +701,7 @@ int atari_scsi_detect(struct scsi_host_template *host) IRQ_TYPE_PRIO, "Hades DMA emulator", hades_dma_emulator)) { printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting (MACH_IS_HADES)",IRQ_AUTO_2); - free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr); + free_irq(IRQ_TT_MFP_SCSI, instance); scsi_unregister(atari_scsi_host); atari_stram_free(atari_dma_buffer); atari_dma_buffer = 0; @@ -761,7 +761,7 @@ int atari_scsi_detect(struct scsi_host_template *host) int atari_scsi_release(struct Scsi_Host *sh) { if (IS_A_TT()) - free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr); + free_irq(IRQ_TT_MFP_SCSI, sh); if (atari_dma_buffer) atari_stram_free(atari_dma_buffer); return 1; diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index 2596165096d3..c2677ba29c74 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c @@ -277,7 +277,8 @@ found: /* With interrupts enabled, it will sometimes hang when doing heavy * reads. So better not enable them until I finger it out. */ if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED, "dtc", instance)) { + if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED, + "dtc", instance)) { printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; } @@ -459,7 +460,7 @@ static int dtc_release(struct Scsi_Host *shost) NCR5380_local_declare(); NCR5380_setup(shost); if (shost->irq) - free_irq(shost->irq, NULL); + free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 607336f56d55..75585a52c88b 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -460,7 +460,8 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt) instance->irq = NCR5380_probe_irq(instance, 0xffff); if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, generic_NCR5380_intr, IRQF_DISABLED, "NCR5380", instance)) { + if (request_irq(instance->irq, generic_NCR5380_intr, + IRQF_DISABLED, "NCR5380", instance)) { printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; } @@ -513,7 +514,7 @@ int generic_NCR5380_release_resources(struct Scsi_Host *instance) NCR5380_setup(instance); if (instance->irq != SCSI_IRQ_NONE) - free_irq(instance->irq, NULL); + free_irq(instance->irq, instance); NCR5380_exit(instance); #ifndef CONFIG_SCSI_G_NCR5380_MEM diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 4bcf916c21a7..57ce2251abc8 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -197,7 +197,7 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (unlikely(!sc)) return; - tcp_ctask->xmstate = XMSTATE_IDLE; + tcp_ctask->xmstate = XMSTATE_VALUE_IDLE; tcp_ctask->r2t = NULL; } @@ -409,7 +409,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) tcp_ctask->exp_datasn = r2tsn + 1; __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); - tcp_ctask->xmstate |= XMSTATE_SOL_HDR_INIT; + set_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate); list_move_tail(&ctask->running, &conn->xmitqueue); scsi_queue_work(session->host, &conn->xmitwork); @@ -1254,7 +1254,7 @@ static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask, tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count; debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count); - tcp_ctask->xmstate |= XMSTATE_W_PAD; + set_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate); } /** @@ -1269,7 +1269,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); - tcp_ctask->xmstate = XMSTATE_CMD_HDR_INIT; + tcp_ctask->xmstate = 1 << XMSTATE_BIT_CMD_HDR_INIT; } /** @@ -1283,10 +1283,10 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) * xmit. * * Management xmit state machine consists of these states: - * XMSTATE_IMM_HDR_INIT - calculate digest of PDU Header - * XMSTATE_IMM_HDR - PDU Header xmit in progress - * XMSTATE_IMM_DATA - PDU Data xmit in progress - * XMSTATE_IDLE - management PDU is done + * XMSTATE_BIT_IMM_HDR_INIT - calculate digest of PDU Header + * XMSTATE_BIT_IMM_HDR - PDU Header xmit in progress + * XMSTATE_BIT_IMM_DATA - PDU Data xmit in progress + * XMSTATE_VALUE_IDLE - management PDU is done **/ static int iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) @@ -1297,12 +1297,12 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", conn->id, tcp_mtask->xmstate, mtask->itt); - if (tcp_mtask->xmstate & XMSTATE_IMM_HDR_INIT) { + if (test_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate)) { iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, sizeof(struct iscsi_hdr)); if (mtask->data_count) { - tcp_mtask->xmstate |= XMSTATE_IMM_DATA; + set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate); iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, mtask->data_count); @@ -1315,21 +1315,20 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) (u8*)tcp_mtask->hdrext); tcp_mtask->sent = 0; - tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR_INIT; - tcp_mtask->xmstate |= XMSTATE_IMM_HDR; + clear_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate); + set_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate); } - if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) { + if (test_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate)) { rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf, mtask->data_count); if (rc) return rc; - tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR; + clear_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate); } - if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) { + if (test_and_clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate)) { BUG_ON(!mtask->data_count); - tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA; /* FIXME: implement. * Virtual buffer could be spreaded across multiple pages... */ @@ -1339,13 +1338,13 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf, &mtask->data_count, &tcp_mtask->sent); if (rc) { - tcp_mtask->xmstate |= XMSTATE_IMM_DATA; + set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate); return rc; } } while (mtask->data_count); } - BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE); + BUG_ON(tcp_mtask->xmstate != XMSTATE_VALUE_IDLE); if (mtask->hdr->itt == RESERVED_ITT) { struct iscsi_session *session = conn->session; @@ -1365,7 +1364,7 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; int rc = 0; - if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_INIT) { + if (test_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate)) { tcp_ctask->sent = 0; tcp_ctask->sg_count = 0; tcp_ctask->exp_datasn = 0; @@ -1390,21 +1389,21 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (conn->hdrdgst_en) iscsi_hdr_digest(conn, &tcp_ctask->headbuf, (u8*)tcp_ctask->hdrext); - tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_INIT; - tcp_ctask->xmstate |= XMSTATE_CMD_HDR_XMIT; + clear_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate); + set_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate); } - if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_XMIT) { + if (test_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate)) { rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); if (rc) return rc; - tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_XMIT; + clear_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate); if (sc->sc_data_direction != DMA_TO_DEVICE) return 0; if (ctask->imm_count) { - tcp_ctask->xmstate |= XMSTATE_IMM_DATA; + set_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate); iscsi_set_padding(tcp_ctask, ctask->imm_count); if (ctask->conn->datadgst_en) { @@ -1414,9 +1413,10 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) } } - if (ctask->unsol_count) - tcp_ctask->xmstate |= - XMSTATE_UNS_HDR | XMSTATE_UNS_INIT; + if (ctask->unsol_count) { + set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate); + set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); + } } return rc; } @@ -1428,25 +1428,25 @@ iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_tcp_conn *tcp_conn = conn->dd_data; int sent = 0, rc; - if (tcp_ctask->xmstate & XMSTATE_W_PAD) { + if (test_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate)) { iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, tcp_ctask->pad_count); if (conn->datadgst_en) crypto_hash_update(&tcp_conn->tx_hash, &tcp_ctask->sendbuf.sg, tcp_ctask->sendbuf.sg.length); - } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD)) + } else if (!test_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate)) return 0; - tcp_ctask->xmstate &= ~XMSTATE_W_PAD; - tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD; + clear_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate); + clear_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate); debug_scsi("sending %d pad bytes for itt 0x%x\n", tcp_ctask->pad_count, ctask->itt); rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, &sent); if (rc) { debug_scsi("padding send failed %d\n", rc); - tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD; + set_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate); } return rc; } @@ -1465,11 +1465,11 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, tcp_ctask = ctask->dd_data; tcp_conn = conn->dd_data; - if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) { + if (!test_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate)) { crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest); iscsi_buf_init_iov(buf, (char*)digest, 4); } - tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST; + clear_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate); rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); if (!rc) @@ -1478,7 +1478,7 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, else { debug_scsi("sending digest 0x%x failed for itt 0x%x!\n", *digest, ctask->itt); - tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST; + set_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate); } return rc; } @@ -1526,8 +1526,8 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_data_task *dtask; int rc; - tcp_ctask->xmstate |= XMSTATE_UNS_DATA; - if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) { + set_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); + if (test_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate)) { dtask = &tcp_ctask->unsol_dtask; iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr); @@ -1537,14 +1537,14 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) iscsi_hdr_digest(conn, &tcp_ctask->headbuf, (u8*)dtask->hdrext); - tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT; + clear_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); iscsi_set_padding(tcp_ctask, ctask->data_count); } rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); if (rc) { - tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; - tcp_ctask->xmstate |= XMSTATE_UNS_HDR; + clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); + set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate); return rc; } @@ -1565,16 +1565,15 @@ iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; int rc; - if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) { + if (test_and_clear_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate)) { BUG_ON(!ctask->unsol_count); - tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR; send_hdr: rc = iscsi_send_unsol_hdr(conn, ctask); if (rc) return rc; } - if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) { + if (test_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate)) { struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask; int start = tcp_ctask->sent; @@ -1584,14 +1583,14 @@ send_hdr: ctask->unsol_count -= tcp_ctask->sent - start; if (rc) return rc; - tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; + clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); /* * Done with the Data-Out. Next, check if we need * to send another unsolicited Data-Out. */ if (ctask->unsol_count) { debug_scsi("sending more uns\n"); - tcp_ctask->xmstate |= XMSTATE_UNS_INIT; + set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); goto send_hdr; } } @@ -1607,7 +1606,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn, struct iscsi_data_task *dtask; int left, rc; - if (tcp_ctask->xmstate & XMSTATE_SOL_HDR_INIT) { + if (test_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate)) { if (!tcp_ctask->r2t) { spin_lock_bh(&session->lock); __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, @@ -1621,19 +1620,19 @@ send_hdr: if (conn->hdrdgst_en) iscsi_hdr_digest(conn, &r2t->headbuf, (u8*)dtask->hdrext); - tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR_INIT; - tcp_ctask->xmstate |= XMSTATE_SOL_HDR; + clear_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate); + set_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate); } - if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) { + if (test_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate)) { r2t = tcp_ctask->r2t; dtask = &r2t->dtask; rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); if (rc) return rc; - tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; - tcp_ctask->xmstate |= XMSTATE_SOL_DATA; + clear_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate); + set_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate); if (conn->datadgst_en) { iscsi_data_digest_init(conn->dd_data, tcp_ctask); @@ -1646,7 +1645,7 @@ send_hdr: r2t->sent); } - if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) { + if (test_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate)) { r2t = tcp_ctask->r2t; dtask = &r2t->dtask; @@ -1655,7 +1654,7 @@ send_hdr: &dtask->digestbuf, &dtask->digest); if (rc) return rc; - tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; + clear_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate); /* * Done with this Data-Out. Next, check if we have @@ -1700,32 +1699,32 @@ send_hdr: * xmit stages. * *iscsi_send_cmd_hdr() - * XMSTATE_CMD_HDR_INIT - prepare Header and Data buffers Calculate - * Header Digest - * XMSTATE_CMD_HDR_XMIT - Transmit header in progress + * XMSTATE_BIT_CMD_HDR_INIT - prepare Header and Data buffers Calculate + * Header Digest + * XMSTATE_BIT_CMD_HDR_XMIT - Transmit header in progress * *iscsi_send_padding - * XMSTATE_W_PAD - Prepare and send pading - * XMSTATE_W_RESEND_PAD - retry send pading + * XMSTATE_BIT_W_PAD - Prepare and send pading + * XMSTATE_BIT_W_RESEND_PAD - retry send pading * *iscsi_send_digest - * XMSTATE_W_RESEND_DATA_DIGEST - Finalize and send Data Digest - * XMSTATE_W_RESEND_DATA_DIGEST - retry sending digest + * XMSTATE_BIT_W_RESEND_DATA_DIGEST - Finalize and send Data Digest + * XMSTATE_BIT_W_RESEND_DATA_DIGEST - retry sending digest * *iscsi_send_unsol_hdr - * XMSTATE_UNS_INIT - prepare un-solicit data header and digest - * XMSTATE_UNS_HDR - send un-solicit header + * XMSTATE_BIT_UNS_INIT - prepare un-solicit data header and digest + * XMSTATE_BIT_UNS_HDR - send un-solicit header * *iscsi_send_unsol_pdu - * XMSTATE_UNS_DATA - send un-solicit data in progress + * XMSTATE_BIT_UNS_DATA - send un-solicit data in progress * *iscsi_send_sol_pdu - * XMSTATE_SOL_HDR_INIT - solicit data header and digest initialize - * XMSTATE_SOL_HDR - send solicit header - * XMSTATE_SOL_DATA - send solicit data + * XMSTATE_BIT_SOL_HDR_INIT - solicit data header and digest initialize + * XMSTATE_BIT_SOL_HDR - send solicit header + * XMSTATE_BIT_SOL_DATA - send solicit data * *iscsi_tcp_ctask_xmit - * XMSTATE_IMM_DATA - xmit managment data (??) + * XMSTATE_BIT_IMM_DATA - xmit managment data (??) **/ static int iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) @@ -1742,13 +1741,13 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (ctask->sc->sc_data_direction != DMA_TO_DEVICE) return 0; - if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) { + if (test_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate)) { rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, &tcp_ctask->sent, &ctask->imm_count, &tcp_ctask->immbuf, &tcp_ctask->immdigest); if (rc) return rc; - tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA; + clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate); } rc = iscsi_send_unsol_pdu(conn, ctask); @@ -1981,7 +1980,7 @@ static void iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) { struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; - tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT; + tcp_mtask->xmstate = 1 << XMSTATE_BIT_IMM_HDR_INIT; } static int diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 7eba44df0a7f..68c36cc8997e 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -32,21 +32,21 @@ #define IN_PROGRESS_PAD_RECV 0x4 /* xmit state machine */ -#define XMSTATE_IDLE 0x0 -#define XMSTATE_CMD_HDR_INIT 0x1 -#define XMSTATE_CMD_HDR_XMIT 0x2 -#define XMSTATE_IMM_HDR 0x4 -#define XMSTATE_IMM_DATA 0x8 -#define XMSTATE_UNS_INIT 0x10 -#define XMSTATE_UNS_HDR 0x20 -#define XMSTATE_UNS_DATA 0x40 -#define XMSTATE_SOL_HDR 0x80 -#define XMSTATE_SOL_DATA 0x100 -#define XMSTATE_W_PAD 0x200 -#define XMSTATE_W_RESEND_PAD 0x400 -#define XMSTATE_W_RESEND_DATA_DIGEST 0x800 -#define XMSTATE_IMM_HDR_INIT 0x1000 -#define XMSTATE_SOL_HDR_INIT 0x2000 +#define XMSTATE_VALUE_IDLE 0 +#define XMSTATE_BIT_CMD_HDR_INIT 0 +#define XMSTATE_BIT_CMD_HDR_XMIT 1 +#define XMSTATE_BIT_IMM_HDR 2 +#define XMSTATE_BIT_IMM_DATA 3 +#define XMSTATE_BIT_UNS_INIT 4 +#define XMSTATE_BIT_UNS_HDR 5 +#define XMSTATE_BIT_UNS_DATA 6 +#define XMSTATE_BIT_SOL_HDR 7 +#define XMSTATE_BIT_SOL_DATA 8 +#define XMSTATE_BIT_W_PAD 9 +#define XMSTATE_BIT_W_RESEND_PAD 10 +#define XMSTATE_BIT_W_RESEND_DATA_DIGEST 11 +#define XMSTATE_BIT_IMM_HDR_INIT 12 +#define XMSTATE_BIT_SOL_HDR_INIT 13 #define ISCSI_PAD_LEN 4 #define ISCSI_SG_TABLESIZE SG_ALL @@ -122,7 +122,7 @@ struct iscsi_data_task { struct iscsi_tcp_mgmt_task { struct iscsi_hdr hdr; char hdrext[sizeof(__u32)]; /* Header-Digest */ - int xmstate; /* mgmt xmit progress */ + unsigned long xmstate; /* mgmt xmit progress */ struct iscsi_buf headbuf; /* header buffer */ struct iscsi_buf sendbuf; /* in progress buffer */ int sent; @@ -150,7 +150,7 @@ struct iscsi_tcp_cmd_task { int pad_count; /* padded bytes */ struct iscsi_buf headbuf; /* header buf (xmit) */ struct iscsi_buf sendbuf; /* in progress buffer*/ - int xmstate; /* xmit xtate machine */ + unsigned long xmstate; /* xmit xtate machine */ int sent; struct scatterlist *sg; /* per-cmd SG list */ struct scatterlist *bad_sg; /* assert statement */ diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index efceed451b46..8b57af5baaec 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -291,9 +291,6 @@ invalid_datalen: min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); } - if (sc->sc_data_direction == DMA_TO_DEVICE) - goto out; - if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) { int res_count = be32_to_cpu(rhdr->residual_count); diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index abe2bda6ac37..3b09ab21d701 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -303,7 +303,7 @@ int macscsi_detect(struct scsi_host_template * tpnt) if (instance->irq != SCSI_IRQ_NONE) if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW, - "ncr5380", instance)) { + "ncr5380", instance)) { printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; @@ -326,7 +326,7 @@ int macscsi_detect(struct scsi_host_template * tpnt) int macscsi_release (struct Scsi_Host *shpnt) { if (shpnt->irq != SCSI_IRQ_NONE) - free_irq (shpnt->irq, NCR5380_intr); + free_irq(shpnt->irq, shpnt); NCR5380_exit(shpnt); return 0; diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c index ee5965659971..f2018b46f494 100644 --- a/drivers/scsi/pas16.c +++ b/drivers/scsi/pas16.c @@ -453,7 +453,8 @@ int __init pas16_detect(struct scsi_host_template * tpnt) instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, "pas16", instance)) { + if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, + "pas16", instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; @@ -604,7 +605,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src static int pas16_release(struct Scsi_Host *shost) { if (shost->irq) - free_irq(shost->irq, NULL); + free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->dma_channel != 0xff) free_dma(shost->dma_channel); diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 3aeb68bcb7ac..146d540f6281 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -1310,14 +1310,7 @@ qla1280_done(struct scsi_qla_host *ha) } /* Release memory used for this I/O */ - if (cmd->use_sg) { - pci_unmap_sg(ha->pdev, cmd->request_buffer, - cmd->use_sg, cmd->sc_data_direction); - } else if (cmd->request_bufflen) { - pci_unmap_single(ha->pdev, sp->saved_dma_handle, - cmd->request_bufflen, - cmd->sc_data_direction); - } + scsi_dma_unmap(cmd); /* Call the mid-level driver interrupt handler */ CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE; @@ -1406,14 +1399,14 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp) break; case CS_DATA_UNDERRUN: - if ((cp->request_bufflen - residual_length) < + if ((scsi_bufflen(cp) - residual_length) < cp->underflow) { printk(KERN_WARNING "scsi: Underflow detected - retrying " "command.\n"); host_status = DID_ERROR; } else { - cp->resid = residual_length; + scsi_set_resid(cp, residual_length); host_status = DID_OK; } break; @@ -2775,33 +2768,28 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) struct device_reg __iomem *reg = ha->iobase; struct scsi_cmnd *cmd = sp->cmd; cmd_a64_entry_t *pkt; - struct scatterlist *sg = NULL, *s; __le32 *dword_ptr; dma_addr_t dma_handle; int status = 0; int cnt; int req_cnt; - u16 seg_cnt; + int seg_cnt; u8 dir; ENTER("qla1280_64bit_start_scsi:"); /* Calculate number of entries and segments required. */ req_cnt = 1; - if (cmd->use_sg) { - sg = (struct scatterlist *) cmd->request_buffer; - seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg, - cmd->sc_data_direction); - + seg_cnt = scsi_dma_map(cmd); + if (seg_cnt > 0) { if (seg_cnt > 2) { req_cnt += (seg_cnt - 2) / 5; if ((seg_cnt - 2) % 5) req_cnt++; } - } else if (cmd->request_bufflen) { /* If data transfer. */ - seg_cnt = 1; - } else { - seg_cnt = 0; + } else if (seg_cnt < 0) { + status = 1; + goto out; } if ((req_cnt + 2) >= ha->req_q_cnt) { @@ -2889,124 +2877,104 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) * Load data segments. */ if (seg_cnt) { /* If data transfer. */ + struct scatterlist *sg, *s; int remseg = seg_cnt; + + sg = scsi_sglist(cmd); + /* Setup packet address segment pointer. */ dword_ptr = (u32 *)&pkt->dseg_0_address; - if (cmd->use_sg) { /* If scatter gather */ - /* Load command entry data segments. */ - for_each_sg(sg, s, seg_cnt, cnt) { - if (cnt == 2) + /* Load command entry data segments. */ + for_each_sg(sg, s, seg_cnt, cnt) { + if (cnt == 2) + break; + + dma_handle = sg_dma_address(s); +#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) + if (ha->flags.use_pci_vchannel) + sn_pci_set_vchan(ha->pdev, + (unsigned long *)&dma_handle, + SCSI_BUS_32(cmd)); +#endif + *dword_ptr++ = + cpu_to_le32(pci_dma_lo32(dma_handle)); + *dword_ptr++ = + cpu_to_le32(pci_dma_hi32(dma_handle)); + *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); + dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", + cpu_to_le32(pci_dma_hi32(dma_handle)), + cpu_to_le32(pci_dma_lo32(dma_handle)), + cpu_to_le32(sg_dma_len(sg_next(s)))); + remseg--; + } + dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " + "command packet data - b %i, t %i, l %i \n", + SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), + SCSI_LUN_32(cmd)); + qla1280_dump_buffer(5, (char *)pkt, + REQUEST_ENTRY_SIZE); + + /* + * Build continuation packets. + */ + dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " + "remains\n", seg_cnt); + + while (remseg > 0) { + /* Update sg start */ + sg = s; + /* Adjust ring index. */ + ha->req_ring_index++; + if (ha->req_ring_index == REQUEST_ENTRY_CNT) { + ha->req_ring_index = 0; + ha->request_ring_ptr = + ha->request_ring; + } else + ha->request_ring_ptr++; + + pkt = (cmd_a64_entry_t *)ha->request_ring_ptr; + + /* Zero out packet. */ + memset(pkt, 0, REQUEST_ENTRY_SIZE); + + /* Load packet defaults. */ + ((struct cont_a64_entry *) pkt)->entry_type = + CONTINUE_A64_TYPE; + ((struct cont_a64_entry *) pkt)->entry_count = 1; + ((struct cont_a64_entry *) pkt)->sys_define = + (uint8_t)ha->req_ring_index; + /* Setup packet address segment pointer. */ + dword_ptr = + (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; + + /* Load continuation entry data segments. */ + for_each_sg(sg, s, remseg, cnt) { + if (cnt == 5) break; dma_handle = sg_dma_address(s); #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) if (ha->flags.use_pci_vchannel) sn_pci_set_vchan(ha->pdev, - (unsigned long *)&dma_handle, + (unsigned long *)&dma_handle, SCSI_BUS_32(cmd)); #endif *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); *dword_ptr++ = cpu_to_le32(pci_dma_hi32(dma_handle)); - *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); - dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", + *dword_ptr++ = + cpu_to_le32(sg_dma_len(s)); + dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", cpu_to_le32(pci_dma_hi32(dma_handle)), cpu_to_le32(pci_dma_lo32(dma_handle)), - cpu_to_le32(sg_dma_len(sg_next(s)))); - remseg--; + cpu_to_le32(sg_dma_len(s))); } - dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " - "command packet data - b %i, t %i, l %i \n", - SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), - SCSI_LUN_32(cmd)); - qla1280_dump_buffer(5, (char *)pkt, - REQUEST_ENTRY_SIZE); - - /* - * Build continuation packets. - */ - dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " - "remains\n", seg_cnt); - - while (remseg > 0) { - /* Update sg start */ - sg = s; - /* Adjust ring index. */ - ha->req_ring_index++; - if (ha->req_ring_index == REQUEST_ENTRY_CNT) { - ha->req_ring_index = 0; - ha->request_ring_ptr = - ha->request_ring; - } else - ha->request_ring_ptr++; - - pkt = (cmd_a64_entry_t *)ha->request_ring_ptr; - - /* Zero out packet. */ - memset(pkt, 0, REQUEST_ENTRY_SIZE); - - /* Load packet defaults. */ - ((struct cont_a64_entry *) pkt)->entry_type = - CONTINUE_A64_TYPE; - ((struct cont_a64_entry *) pkt)->entry_count = 1; - ((struct cont_a64_entry *) pkt)->sys_define = - (uint8_t)ha->req_ring_index; - /* Setup packet address segment pointer. */ - dword_ptr = - (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; - - /* Load continuation entry data segments. */ - for_each_sg(sg, s, remseg, cnt) { - if (cnt == 5) - break; - dma_handle = sg_dma_address(s); -#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) - if (ha->flags.use_pci_vchannel) - sn_pci_set_vchan(ha->pdev, - (unsigned long *)&dma_handle, - SCSI_BUS_32(cmd)); -#endif - *dword_ptr++ = - cpu_to_le32(pci_dma_lo32(dma_handle)); - *dword_ptr++ = - cpu_to_le32(pci_dma_hi32(dma_handle)); - *dword_ptr++ = - cpu_to_le32(sg_dma_len(s)); - dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", - cpu_to_le32(pci_dma_hi32(dma_handle)), - cpu_to_le32(pci_dma_lo32(dma_handle)), - cpu_to_le32(sg_dma_len(s))); - } - remseg -= cnt; - dprintk(5, "qla1280_64bit_start_scsi: " - "continuation packet data - b %i, t " - "%i, l %i \n", SCSI_BUS_32(cmd), - SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); - qla1280_dump_buffer(5, (char *)pkt, - REQUEST_ENTRY_SIZE); - } - } else { /* No scatter gather data transfer */ - dma_handle = pci_map_single(ha->pdev, - cmd->request_buffer, - cmd->request_bufflen, - cmd->sc_data_direction); - - sp->saved_dma_handle = dma_handle; -#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) - if (ha->flags.use_pci_vchannel) - sn_pci_set_vchan(ha->pdev, - (unsigned long *)&dma_handle, - SCSI_BUS_32(cmd)); -#endif - *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); - *dword_ptr++ = cpu_to_le32(pci_dma_hi32(dma_handle)); - *dword_ptr = cpu_to_le32(cmd->request_bufflen); - - dprintk(5, "qla1280_64bit_start_scsi: No scatter/" - "gather command packet data - b %i, t %i, " - "l %i \n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), - SCSI_LUN_32(cmd)); + remseg -= cnt; + dprintk(5, "qla1280_64bit_start_scsi: " + "continuation packet data - b %i, t " + "%i, l %i \n", SCSI_BUS_32(cmd), + SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); } @@ -3068,12 +3036,11 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) struct device_reg __iomem *reg = ha->iobase; struct scsi_cmnd *cmd = sp->cmd; struct cmd_entry *pkt; - struct scatterlist *sg = NULL, *s; __le32 *dword_ptr; int status = 0; int cnt; int req_cnt; - uint16_t seg_cnt; + int seg_cnt; dma_addr_t dma_handle; u8 dir; @@ -3083,18 +3050,8 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) cmd->cmnd[0]); /* Calculate number of entries and segments required. */ - req_cnt = 1; - if (cmd->use_sg) { - /* - * We must build an SG list in adapter format, as the kernel's - * SG list cannot be used directly because of data field size - * (__alpha__) differences and the kernel SG list uses virtual - * addresses where we need physical addresses. - */ - sg = (struct scatterlist *) cmd->request_buffer; - seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg, - cmd->sc_data_direction); - + seg_cnt = scsi_dma_map(cmd); + if (seg_cnt) { /* * if greater than four sg entries then we need to allocate * continuation entries @@ -3106,14 +3063,9 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) } dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n", cmd, seg_cnt, req_cnt); - } else if (cmd->request_bufflen) { /* If data transfer. */ - dprintk(3, "No S/G transfer t=%x cmd=%p len=%x CDB=%x\n", - SCSI_TCN_32(cmd), cmd, cmd->request_bufflen, - cmd->cmnd[0]); - seg_cnt = 1; - } else { - /* dprintk(1, "No data transfer \n"); */ - seg_cnt = 0; + } else if (seg_cnt < 0) { + status = 1; + goto out; } if ((req_cnt + 2) >= ha->req_q_cnt) { @@ -3194,91 +3146,84 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) * Load data segments. */ if (seg_cnt) { + struct scatterlist *sg, *s; int remseg = seg_cnt; + + sg = scsi_sglist(cmd); + /* Setup packet address segment pointer. */ dword_ptr = &pkt->dseg_0_address; - if (cmd->use_sg) { /* If scatter gather */ - dprintk(3, "Building S/G data segments..\n"); - qla1280_dump_buffer(1, (char *)sg, 4 * 16); + dprintk(3, "Building S/G data segments..\n"); + qla1280_dump_buffer(1, (char *)sg, 4 * 16); + + /* Load command entry data segments. */ + for_each_sg(sg, s, seg_cnt, cnt) { + if (cnt == 4) + break; + *dword_ptr++ = + cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); + *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); + dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", + (pci_dma_lo32(sg_dma_address(s))), + (sg_dma_len(s))); + remseg--; + } + /* + * Build continuation packets. + */ + dprintk(3, "S/G Building Continuation" + "...seg_cnt=0x%x remains\n", seg_cnt); + while (remseg > 0) { + /* Continue from end point */ + sg = s; + /* Adjust ring index. */ + ha->req_ring_index++; + if (ha->req_ring_index == REQUEST_ENTRY_CNT) { + ha->req_ring_index = 0; + ha->request_ring_ptr = + ha->request_ring; + } else + ha->request_ring_ptr++; + + pkt = (struct cmd_entry *)ha->request_ring_ptr; + + /* Zero out packet. */ + memset(pkt, 0, REQUEST_ENTRY_SIZE); + + /* Load packet defaults. */ + ((struct cont_entry *) pkt)-> + entry_type = CONTINUE_TYPE; + ((struct cont_entry *) pkt)->entry_count = 1; - /* Load command entry data segments. */ - for_each_sg(sg, s, seg_cnt, cnt) { - if (cnt == 4) + ((struct cont_entry *) pkt)->sys_define = + (uint8_t) ha->req_ring_index; + + /* Setup packet address segment pointer. */ + dword_ptr = + &((struct cont_entry *) pkt)->dseg_0_address; + + /* Load continuation entry data segments. */ + for_each_sg(sg, s, remseg, cnt) { + if (cnt == 7) break; *dword_ptr++ = cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); - *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); - dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", - (pci_dma_lo32(sg_dma_address(s))), - (sg_dma_len(s))); - remseg--; - } - /* - * Build continuation packets. - */ - dprintk(3, "S/G Building Continuation" - "...seg_cnt=0x%x remains\n", seg_cnt); - while (remseg > 0) { - /* Continue from end point */ - sg = s; - /* Adjust ring index. */ - ha->req_ring_index++; - if (ha->req_ring_index == REQUEST_ENTRY_CNT) { - ha->req_ring_index = 0; - ha->request_ring_ptr = - ha->request_ring; - } else - ha->request_ring_ptr++; - - pkt = (struct cmd_entry *)ha->request_ring_ptr; - - /* Zero out packet. */ - memset(pkt, 0, REQUEST_ENTRY_SIZE); - - /* Load packet defaults. */ - ((struct cont_entry *) pkt)-> - entry_type = CONTINUE_TYPE; - ((struct cont_entry *) pkt)->entry_count = 1; - - ((struct cont_entry *) pkt)->sys_define = - (uint8_t) ha->req_ring_index; - - /* Setup packet address segment pointer. */ - dword_ptr = - &((struct cont_entry *) pkt)->dseg_0_address; - - /* Load continuation entry data segments. */ - for_each_sg(sg, s, remseg, cnt) { - if (cnt == 7) - break; - *dword_ptr++ = - cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); - *dword_ptr++ = - cpu_to_le32(sg_dma_len(s)); - dprintk(1, - "S/G Segment Cont. phys_addr=0x%x, " - "len=0x%x\n", - cpu_to_le32(pci_dma_lo32(sg_dma_address(s))), - cpu_to_le32(sg_dma_len(s))); - } - remseg -= cnt; - dprintk(5, "qla1280_32bit_start_scsi: " - "continuation packet data - " - "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), - SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); - qla1280_dump_buffer(5, (char *)pkt, - REQUEST_ENTRY_SIZE); + *dword_ptr++ = + cpu_to_le32(sg_dma_len(s)); + dprintk(1, + "S/G Segment Cont. phys_addr=0x%x, " + "len=0x%x\n", + cpu_to_le32(pci_dma_lo32(sg_dma_address(s))), + cpu_to_le32(sg_dma_len(s))); } - } else { /* No S/G data transfer */ - dma_handle = pci_map_single(ha->pdev, - cmd->request_buffer, - cmd->request_bufflen, - cmd->sc_data_direction); - sp->saved_dma_handle = dma_handle; - - *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); - *dword_ptr = cpu_to_le32(cmd->request_bufflen); + remseg -= cnt; + dprintk(5, "qla1280_32bit_start_scsi: " + "continuation packet data - " + "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), + SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); + qla1280_dump_buffer(5, (char *)pkt, + REQUEST_ENTRY_SIZE); } } else { /* No data transfer at all */ dprintk(5, "qla1280_32bit_start_scsi: No data, command " @@ -4086,9 +4031,9 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd) for (i = 0; i < cmd->cmd_len; i++) { printk("0x%02x ", cmd->cmnd[i]); } - printk(" seg_cnt =%d\n", cmd->use_sg); + printk(" seg_cnt =%d\n", scsi_sg_count(cmd)); printk(" request buffer=0x%p, request buffer len=0x%x\n", - cmd->request_buffer, cmd->request_bufflen); + scsi_sglist(cmd), scsi_bufflen(cmd)); /* if (cmd->use_sg) { sg = (struct scatterlist *) cmd->request_buffer; diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 5e46d842c6f9..e606cf0a2eb7 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -268,7 +268,7 @@ int sun3scsi_detect(struct scsi_host_template * tpnt) ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; if (request_irq(instance->irq, scsi_sun3_intr, - 0, "Sun3SCSI-5380", NULL)) { + 0, "Sun3SCSI-5380", instance)) { #ifndef REAL_DMA printk("scsi%d: IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); @@ -310,7 +310,7 @@ int sun3scsi_detect(struct scsi_host_template * tpnt) int sun3scsi_release (struct Scsi_Host *shpnt) { if (shpnt->irq != SCSI_IRQ_NONE) - free_irq (shpnt->irq, NULL); + free_irq(shpnt->irq, shpnt); iounmap((void *)sun3_scsi_regp); diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index 7cb4a31453e6..02d9727f017a 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c @@ -230,7 +230,7 @@ static int sun3scsi_detect(struct scsi_host_template * tpnt) ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; if (request_irq(instance->irq, scsi_sun3_intr, - 0, "Sun3SCSI-5380VME", NULL)) { + 0, "Sun3SCSI-5380VME", instance)) { #ifndef REAL_DMA printk("scsi%d: IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); @@ -279,7 +279,7 @@ static int sun3scsi_detect(struct scsi_host_template * tpnt) int sun3scsi_release (struct Scsi_Host *shpnt) { if (shpnt->irq != SCSI_IRQ_NONE) - free_irq (shpnt->irq, NULL); + free_irq(shpnt->irq, shpnt); iounmap((void *)sun3_scsi_regp); diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index 248d60b8d899..041eaaace2c3 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c @@ -259,7 +259,8 @@ found: instance->irq = NCR5380_probe_irq(instance, T128_IRQS); if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", instance)) { + if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", + instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; @@ -295,7 +296,7 @@ static int t128_release(struct Scsi_Host *shost) NCR5380_local_declare(); NCR5380_setup(shost); if (shost->irq) - free_irq(shost->irq, NULL); + free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); diff --git a/drivers/spi/at25.c b/drivers/spi/at25.c index e007833cca59..290dbe99647a 100644 --- a/drivers/spi/at25.c +++ b/drivers/spi/at25.c @@ -21,6 +21,13 @@ #include <linux/spi/eeprom.h> +/* + * NOTE: this is an *EEPROM* driver. The vagaries of product naming + * mean that some AT25 products are EEPROMs, and others are FLASH. + * Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver, + * not this one! + */ + struct at25_data { struct spi_device *spi; struct mutex lock; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b31f4431849b..93e9de46977a 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -541,10 +541,7 @@ static void spi_complete(void *arg) * Also, the caller is guaranteeing that the memory associated with the * message will not be freed before this call returns. * - * The return value is a negative error code if the message could not be - * submitted, else zero. When the value is zero, then message->status is - * also defined; it's the completion code for the transfer, either zero - * or a negative error code from the controller driver. + * It returns zero on success, else a negative error code. */ int spi_sync(struct spi_device *spi, struct spi_message *message) { @@ -554,8 +551,10 @@ int spi_sync(struct spi_device *spi, struct spi_message *message) message->complete = spi_complete; message->context = &done; status = spi_async(spi, message); - if (status == 0) + if (status == 0) { wait_for_completion(&done); + status = message->status; + } message->context = NULL; return status; } @@ -589,7 +588,7 @@ int spi_write_then_read(struct spi_device *spi, const u8 *txbuf, unsigned n_tx, u8 *rxbuf, unsigned n_rx) { - static DECLARE_MUTEX(lock); + static DEFINE_MUTEX(lock); int status; struct spi_message message; @@ -615,7 +614,7 @@ int spi_write_then_read(struct spi_device *spi, } /* ... unless someone else is using the pre-allocated buffer */ - if (down_trylock(&lock)) { + if (!mutex_trylock(&lock)) { local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); if (!local_buf) return -ENOMEM; @@ -628,13 +627,11 @@ int spi_write_then_read(struct spi_device *spi, /* do the i/o */ status = spi_sync(spi, &message); - if (status == 0) { + if (status == 0) memcpy(rxbuf, x[1].rx_buf, n_rx); - status = message.status; - } if (x[0].tx_buf == buf) - up(&lock); + mutex_unlock(&lock); else kfree(local_buf); diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c index 2ef11bb70b2e..22697b812205 100644 --- a/drivers/spi/spi_bfin5xx.c +++ b/drivers/spi/spi_bfin5xx.c @@ -1,17 +1,22 @@ /* - * File: drivers/spi/bfin5xx_spi.c - * Based on: N/A - * Author: Luke Yang (Analog Devices Inc.) + * File: drivers/spi/bfin5xx_spi.c + * Maintainer: + * Bryan Wu <bryan.wu@analog.com> + * Original Author: + * Luke Yang (Analog Devices Inc.) * - * Created: March. 10th 2006 - * Description: SPI controller driver for Blackfin 5xx - * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * Created: March. 10th 2006 + * Description: SPI controller driver for Blackfin BF5xx + * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * Modified: * March 10, 2006 bfin5xx_spi.c Created. (Luke Yang) * August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang) + * July 17, 2007 add support for BF54x SPI0 controller (Bryan Wu) + * July 30, 2007 add platfrom_resource interface to support multi-port + * SPI controller (Bryan Wu) * - * Copyright 2004-2006 Analog Devices Inc. + * Copyright 2004-2007 Analog Devices Inc. * * This program is free software ; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -31,50 +36,39 @@ #include <linux/init.h> #include <linux/module.h> +#include <linux/delay.h> #include <linux/device.h> +#include <linux/io.h> #include <linux/ioport.h> +#include <linux/irq.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/spi/spi.h> #include <linux/workqueue.h> -#include <linux/delay.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/delay.h> #include <asm/dma.h> - +#include <asm/portmux.h> #include <asm/bfin5xx_spi.h> -MODULE_AUTHOR("Luke Yang"); -MODULE_DESCRIPTION("Blackfin 5xx SPI Contoller"); -MODULE_LICENSE("GPL"); +#define DRV_NAME "bfin-spi" +#define DRV_AUTHOR "Bryan Wu, Luke Yang" +#define DRV_DESC "Blackfin BF5xx on-chip SPI Contoller Driver" +#define DRV_VERSION "1.0" -#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_LICENSE("GPL"); -#define DEFINE_SPI_REG(reg, off) \ -static inline u16 read_##reg(void) \ - { return *(volatile unsigned short*)(SPI0_REGBASE + off); } \ -static inline void write_##reg(u16 v) \ - {*(volatile unsigned short*)(SPI0_REGBASE + off) = v;\ - SSYNC();} +#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07) == 0) -DEFINE_SPI_REG(CTRL, 0x00) -DEFINE_SPI_REG(FLAG, 0x04) -DEFINE_SPI_REG(STAT, 0x08) -DEFINE_SPI_REG(TDBR, 0x0C) -DEFINE_SPI_REG(RDBR, 0x10) -DEFINE_SPI_REG(BAUD, 0x14) -DEFINE_SPI_REG(SHAW, 0x18) -#define START_STATE ((void*)0) -#define RUNNING_STATE ((void*)1) -#define DONE_STATE ((void*)2) -#define ERROR_STATE ((void*)-1) -#define QUEUE_RUNNING 0 -#define QUEUE_STOPPED 1 -int dma_requested; +#define START_STATE ((void *)0) +#define RUNNING_STATE ((void *)1) +#define DONE_STATE ((void *)2) +#define ERROR_STATE ((void *)-1) +#define QUEUE_RUNNING 0 +#define QUEUE_STOPPED 1 struct driver_data { /* Driver model hookup */ @@ -83,6 +77,12 @@ struct driver_data { /* SPI framework hookup */ struct spi_master *master; + /* Regs base of SPI controller */ + void __iomem *regs_base; + + /* Pin request list */ + u16 *pin_req; + /* BFIN hookup */ struct bfin5xx_spi_master *master_info; @@ -107,12 +107,18 @@ struct driver_data { void *tx_end; void *rx; void *rx_end; + + /* DMA stuffs */ + int dma_channel; int dma_mapped; + int dma_requested; dma_addr_t rx_dma; dma_addr_t tx_dma; + size_t rx_map_len; size_t tx_map_len; u8 n_bytes; + int cs_change; void (*write) (struct driver_data *); void (*read) (struct driver_data *); void (*duplex) (struct driver_data *); @@ -129,28 +135,40 @@ struct chip_data { u8 enable_dma; u8 bits_per_word; /* 8 or 16 */ u8 cs_change_per_word; - u8 cs_chg_udelay; + u16 cs_chg_udelay; /* Some devices require > 255usec delay */ void (*write) (struct driver_data *); void (*read) (struct driver_data *); void (*duplex) (struct driver_data *); }; +#define DEFINE_SPI_REG(reg, off) \ +static inline u16 read_##reg(struct driver_data *drv_data) \ + { return bfin_read16(drv_data->regs_base + off); } \ +static inline void write_##reg(struct driver_data *drv_data, u16 v) \ + { bfin_write16(drv_data->regs_base + off, v); } + +DEFINE_SPI_REG(CTRL, 0x00) +DEFINE_SPI_REG(FLAG, 0x04) +DEFINE_SPI_REG(STAT, 0x08) +DEFINE_SPI_REG(TDBR, 0x0C) +DEFINE_SPI_REG(RDBR, 0x10) +DEFINE_SPI_REG(BAUD, 0x14) +DEFINE_SPI_REG(SHAW, 0x18) + static void bfin_spi_enable(struct driver_data *drv_data) { u16 cr; - cr = read_CTRL(); - write_CTRL(cr | BIT_CTL_ENABLE); - SSYNC(); + cr = read_CTRL(drv_data); + write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); } static void bfin_spi_disable(struct driver_data *drv_data) { u16 cr; - cr = read_CTRL(); - write_CTRL(cr & (~BIT_CTL_ENABLE)); - SSYNC(); + cr = read_CTRL(drv_data); + write_CTRL(drv_data, (cr & (~BIT_CTL_ENABLE))); } /* Caculate the SPI_BAUD register value based on input HZ */ @@ -170,83 +188,71 @@ static int flush(struct driver_data *drv_data) unsigned long limit = loops_per_jiffy << 1; /* wait for stop and clear stat */ - while (!(read_STAT() & BIT_STAT_SPIF) && limit--) - continue; + while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && limit--) + cpu_relax(); - write_STAT(BIT_STAT_CLR); + write_STAT(drv_data, BIT_STAT_CLR); return limit; } +/* Chip select operation functions for cs_change flag */ +static void cs_active(struct driver_data *drv_data, struct chip_data *chip) +{ + u16 flag = read_FLAG(drv_data); + + flag |= chip->flag; + flag &= ~(chip->flag << 8); + + write_FLAG(drv_data, flag); +} + +static void cs_deactive(struct driver_data *drv_data, struct chip_data *chip) +{ + u16 flag = read_FLAG(drv_data); + + flag |= (chip->flag << 8); + + write_FLAG(drv_data, flag); + + /* Move delay here for consistency */ + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); +} + +#define MAX_SPI_SSEL 7 + /* stop controller and re-config current chip*/ -static void restore_state(struct driver_data *drv_data) +static int restore_state(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; + int ret = 0; /* Clear status and disable clock */ - write_STAT(BIT_STAT_CLR); + write_STAT(drv_data, BIT_STAT_CLR); bfin_spi_disable(drv_data); dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); -#if defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537) - dev_dbg(&drv_data->pdev->dev, - "chip select number is %d\n", chip->chip_select_num); - - switch (chip->chip_select_num) { - case 1: - bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3c00); - SSYNC(); - break; - - case 2: - case 3: - bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJSE_SPI); - SSYNC(); - bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800); - SSYNC(); - break; - - case 4: - bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS4E_SPI); - SSYNC(); - bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3840); - SSYNC(); - break; + /* Load the registers */ + write_CTRL(drv_data, chip->ctl_reg); + write_BAUD(drv_data, chip->baud); - case 5: - bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS5E_SPI); - SSYNC(); - bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3820); - SSYNC(); - break; + bfin_spi_enable(drv_data); + cs_active(drv_data, chip); - case 6: - bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS6E_SPI); - SSYNC(); - bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3810); - SSYNC(); - break; + if (ret) + dev_dbg(&drv_data->pdev->dev, + ": request chip select number %d failed\n", + chip->chip_select_num); - case 7: - bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJCE_SPI); - SSYNC(); - bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800); - SSYNC(); - break; - } -#endif - - /* Load the registers */ - write_CTRL(chip->ctl_reg); - write_BAUD(chip->baud); - write_FLAG(chip->flag); + return ret; } /* used to kick off transfer in rx mode */ -static unsigned short dummy_read(void) +static unsigned short dummy_read(struct driver_data *drv_data) { unsigned short tmp; - tmp = read_RDBR(); + tmp = read_RDBR(drv_data); return tmp; } @@ -255,9 +261,9 @@ static void null_writer(struct driver_data *drv_data) u8 n_bytes = drv_data->n_bytes; while (drv_data->tx < drv_data->tx_end) { - write_TDBR(0); - while ((read_STAT() & BIT_STAT_TXS)) - continue; + write_TDBR(drv_data, 0); + while ((read_STAT(drv_data) & BIT_STAT_TXS)) + cpu_relax(); drv_data->tx += n_bytes; } } @@ -265,75 +271,78 @@ static void null_writer(struct driver_data *drv_data) static void null_reader(struct driver_data *drv_data) { u8 n_bytes = drv_data->n_bytes; - dummy_read(); + dummy_read(drv_data); while (drv_data->rx < drv_data->rx_end) { - while (!(read_STAT() & BIT_STAT_RXS)) - continue; - dummy_read(); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + dummy_read(drv_data); drv_data->rx += n_bytes; } } static void u8_writer(struct driver_data *drv_data) { - dev_dbg(&drv_data->pdev->dev, - "cr8-s is 0x%x\n", read_STAT()); + dev_dbg(&drv_data->pdev->dev, + "cr8-s is 0x%x\n", read_STAT(drv_data)); + + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); + while (drv_data->tx < drv_data->tx_end) { - write_TDBR(*(u8 *) (drv_data->tx)); - while (read_STAT() & BIT_STAT_TXS) - continue; + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + while (read_STAT(drv_data) & BIT_STAT_TXS) + cpu_relax(); ++drv_data->tx; } - - /* poll for SPI completion before returning */ - while (!(read_STAT() & BIT_STAT_SPIF)) - continue; } static void u8_cs_chg_writer(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); + while (drv_data->tx < drv_data->tx_end) { - write_FLAG(chip->flag); - SSYNC(); - - write_TDBR(*(u8 *) (drv_data->tx)); - while (read_STAT() & BIT_STAT_TXS) - continue; - while (!(read_STAT() & BIT_STAT_SPIF)) - continue; - write_FLAG(0xFF00 | chip->flag); - SSYNC(); - if (chip->cs_chg_udelay) - udelay(chip->cs_chg_udelay); + cs_active(drv_data, chip); + + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + while (read_STAT(drv_data) & BIT_STAT_TXS) + cpu_relax(); + + cs_deactive(drv_data, chip); + ++drv_data->tx; } - write_FLAG(0xFF00); - SSYNC(); } static void u8_reader(struct driver_data *drv_data) { - dev_dbg(&drv_data->pdev->dev, - "cr-8 is 0x%x\n", read_STAT()); + dev_dbg(&drv_data->pdev->dev, + "cr-8 is 0x%x\n", read_STAT(drv_data)); + + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); /* clear TDBR buffer before read(else it will be shifted out) */ - write_TDBR(0xFFFF); + write_TDBR(drv_data, 0xFFFF); - dummy_read(); + dummy_read(drv_data); while (drv_data->rx < drv_data->rx_end - 1) { - while (!(read_STAT() & BIT_STAT_RXS)) - continue; - *(u8 *) (drv_data->rx) = read_RDBR(); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); ++drv_data->rx; } - while (!(read_STAT() & BIT_STAT_RXS)) - continue; - *(u8 *) (drv_data->rx) = read_SHAW(); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u8 *) (drv_data->rx) = read_SHAW(drv_data); ++drv_data->rx; } @@ -341,36 +350,47 @@ static void u8_cs_chg_reader(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; - while (drv_data->rx < drv_data->rx_end) { - write_FLAG(chip->flag); - SSYNC(); - - read_RDBR(); /* kick off */ - while (!(read_STAT() & BIT_STAT_RXS)) - continue; - while (!(read_STAT() & BIT_STAT_SPIF)) - continue; - *(u8 *) (drv_data->rx) = read_SHAW(); - write_FLAG(0xFF00 | chip->flag); - SSYNC(); - if (chip->cs_chg_udelay) - udelay(chip->cs_chg_udelay); + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); + + /* clear TDBR buffer before read(else it will be shifted out) */ + write_TDBR(drv_data, 0xFFFF); + + cs_active(drv_data, chip); + dummy_read(drv_data); + + while (drv_data->rx < drv_data->rx_end - 1) { + cs_deactive(drv_data, chip); + + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + cs_active(drv_data, chip); + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); ++drv_data->rx; } - write_FLAG(0xFF00); - SSYNC(); + cs_deactive(drv_data, chip); + + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u8 *) (drv_data->rx) = read_SHAW(drv_data); + ++drv_data->rx; } static void u8_duplex(struct driver_data *drv_data) { + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); + /* in duplex mode, clk is triggered by writing of TDBR */ while (drv_data->rx < drv_data->rx_end) { - write_TDBR(*(u8 *) (drv_data->tx)); - while (!(read_STAT() & BIT_STAT_SPIF)) - continue; - while (!(read_STAT() & BIT_STAT_RXS)) - continue; - *(u8 *) (drv_data->rx) = read_RDBR(); + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + while (read_STAT(drv_data) & BIT_STAT_TXS) + cpu_relax(); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); ++drv_data->rx; ++drv_data->tx; } @@ -380,83 +400,89 @@ static void u8_cs_chg_duplex(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); + while (drv_data->rx < drv_data->rx_end) { - write_FLAG(chip->flag); - SSYNC(); - - write_TDBR(*(u8 *) (drv_data->tx)); - while (!(read_STAT() & BIT_STAT_SPIF)) - continue; - while (!(read_STAT() & BIT_STAT_RXS)) - continue; - *(u8 *) (drv_data->rx) = read_RDBR(); - write_FLAG(0xFF00 | chip->flag); - SSYNC(); - if (chip->cs_chg_udelay) - udelay(chip->cs_chg_udelay); + cs_active(drv_data, chip); + + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + while (read_STAT(drv_data) & BIT_STAT_TXS) + cpu_relax(); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); + + cs_deactive(drv_data, chip); + ++drv_data->rx; ++drv_data->tx; } - write_FLAG(0xFF00); - SSYNC(); } static void u16_writer(struct driver_data *drv_data) { - dev_dbg(&drv_data->pdev->dev, - "cr16 is 0x%x\n", read_STAT()); + dev_dbg(&drv_data->pdev->dev, + "cr16 is 0x%x\n", read_STAT(drv_data)); + + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); while (drv_data->tx < drv_data->tx_end) { - write_TDBR(*(u16 *) (drv_data->tx)); - while ((read_STAT() & BIT_STAT_TXS)) - continue; + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + while ((read_STAT(drv_data) & BIT_STAT_TXS)) + cpu_relax(); drv_data->tx += 2; } - - /* poll for SPI completion before returning */ - while (!(read_STAT() & BIT_STAT_SPIF)) - continue; } static void u16_cs_chg_writer(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); + while (drv_data->tx < drv_data->tx_end) { - write_FLAG(chip->flag); - SSYNC(); - - write_TDBR(*(u16 *) (drv_data->tx)); - while ((read_STAT() & BIT_STAT_TXS)) - continue; - while (!(read_STAT() & BIT_STAT_SPIF)) - continue; - write_FLAG(0xFF00 | chip->flag); - SSYNC(); - if (chip->cs_chg_udelay) - udelay(chip->cs_chg_udelay); + cs_active(drv_data, chip); + + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + while ((read_STAT(drv_data) & BIT_STAT_TXS)) + cpu_relax(); + + cs_deactive(drv_data, chip); + drv_data->tx += 2; } - write_FLAG(0xFF00); - SSYNC(); } static void u16_reader(struct driver_data *drv_data) { dev_dbg(&drv_data->pdev->dev, - "cr-16 is 0x%x\n", read_STAT()); - dummy_read(); + "cr-16 is 0x%x\n", read_STAT(drv_data)); + + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); + + /* clear TDBR buffer before read(else it will be shifted out) */ + write_TDBR(drv_data, 0xFFFF); + + dummy_read(drv_data); while (drv_data->rx < (drv_data->rx_end - 2)) { - while (!(read_STAT() & BIT_STAT_RXS)) - continue; - *(u16 *) (drv_data->rx) = read_RDBR(); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); drv_data->rx += 2; } - while (!(read_STAT() & BIT_STAT_RXS)) - continue; - *(u16 *) (drv_data->rx) = read_SHAW(); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u16 *) (drv_data->rx) = read_SHAW(drv_data); drv_data->rx += 2; } @@ -464,36 +490,47 @@ static void u16_cs_chg_reader(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; - while (drv_data->rx < drv_data->rx_end) { - write_FLAG(chip->flag); - SSYNC(); - - read_RDBR(); /* kick off */ - while (!(read_STAT() & BIT_STAT_RXS)) - continue; - while (!(read_STAT() & BIT_STAT_SPIF)) - continue; - *(u16 *) (drv_data->rx) = read_SHAW(); - write_FLAG(0xFF00 | chip->flag); - SSYNC(); - if (chip->cs_chg_udelay) - udelay(chip->cs_chg_udelay); + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); + + /* clear TDBR buffer before read(else it will be shifted out) */ + write_TDBR(drv_data, 0xFFFF); + + cs_active(drv_data, chip); + dummy_read(drv_data); + + while (drv_data->rx < drv_data->rx_end - 2) { + cs_deactive(drv_data, chip); + + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + cs_active(drv_data, chip); + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); drv_data->rx += 2; } - write_FLAG(0xFF00); - SSYNC(); + cs_deactive(drv_data, chip); + + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u16 *) (drv_data->rx) = read_SHAW(drv_data); + drv_data->rx += 2; } static void u16_duplex(struct driver_data *drv_data) { + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); + /* in duplex mode, clk is triggered by writing of TDBR */ while (drv_data->tx < drv_data->tx_end) { - write_TDBR(*(u16 *) (drv_data->tx)); - while (!(read_STAT() & BIT_STAT_SPIF)) - continue; - while (!(read_STAT() & BIT_STAT_RXS)) - continue; - *(u16 *) (drv_data->rx) = read_RDBR(); + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + while (read_STAT(drv_data) & BIT_STAT_TXS) + cpu_relax(); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); drv_data->rx += 2; drv_data->tx += 2; } @@ -503,25 +540,25 @@ static void u16_cs_chg_duplex(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); + while (drv_data->tx < drv_data->tx_end) { - write_FLAG(chip->flag); - SSYNC(); - - write_TDBR(*(u16 *) (drv_data->tx)); - while (!(read_STAT() & BIT_STAT_SPIF)) - continue; - while (!(read_STAT() & BIT_STAT_RXS)) - continue; - *(u16 *) (drv_data->rx) = read_RDBR(); - write_FLAG(0xFF00 | chip->flag); - SSYNC(); - if (chip->cs_chg_udelay) - udelay(chip->cs_chg_udelay); + cs_active(drv_data, chip); + + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + while (read_STAT(drv_data) & BIT_STAT_TXS) + cpu_relax(); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + + cs_deactive(drv_data, chip); + drv_data->rx += 2; drv_data->tx += 2; } - write_FLAG(0xFF00); - SSYNC(); } /* test if ther is more transfer to be done */ @@ -546,6 +583,7 @@ static void *next_transfer(struct driver_data *drv_data) */ static void giveback(struct driver_data *drv_data) { + struct chip_data *chip = drv_data->cur_chip; struct spi_transfer *last_transfer; unsigned long flags; struct spi_message *msg; @@ -565,10 +603,13 @@ static void giveback(struct driver_data *drv_data) /* disable chip select signal. And not stop spi in autobuffer mode */ if (drv_data->tx_dma != 0xFFFF) { - write_FLAG(0xFF00); + cs_deactive(drv_data, chip); bfin_spi_disable(drv_data); } + if (!drv_data->cs_change) + cs_deactive(drv_data, chip); + if (msg->complete) msg->complete(msg->context); } @@ -576,14 +617,15 @@ static void giveback(struct driver_data *drv_data) static irqreturn_t dma_irq_handler(int irq, void *dev_id) { struct driver_data *drv_data = (struct driver_data *)dev_id; + struct chip_data *chip = drv_data->cur_chip; struct spi_message *msg = drv_data->cur_msg; dev_dbg(&drv_data->pdev->dev, "in dma_irq_handler\n"); - clear_dma_irqstat(CH_SPI); + clear_dma_irqstat(drv_data->dma_channel); /* Wait for DMA to complete */ - while (get_dma_curr_irqstat(CH_SPI) & DMA_RUN) - continue; + while (get_dma_curr_irqstat(drv_data->dma_channel) & DMA_RUN) + cpu_relax(); /* * wait for the last transaction shifted out. HRM states: @@ -592,18 +634,19 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) * register until it goes low for 2 successive reads */ if (drv_data->tx != NULL) { - while ((bfin_read_SPI_STAT() & TXS) || - (bfin_read_SPI_STAT() & TXS)) - continue; + while ((read_STAT(drv_data) & TXS) || + (read_STAT(drv_data) & TXS)) + cpu_relax(); } - while (!(bfin_read_SPI_STAT() & SPIF)) - continue; - - bfin_spi_disable(drv_data); + while (!(read_STAT(drv_data) & SPIF)) + cpu_relax(); msg->actual_length += drv_data->len_in_bytes; + if (drv_data->cs_change) + cs_deactive(drv_data, chip); + /* Move to next transfer */ msg->state = next_transfer(drv_data); @@ -613,8 +656,8 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) /* free the irq handler before next transfer */ dev_dbg(&drv_data->pdev->dev, "disable dma channel irq%d\n", - CH_SPI); - dma_disable_irq(CH_SPI); + drv_data->dma_channel); + dma_disable_irq(drv_data->dma_channel); return IRQ_HANDLED; } @@ -690,31 +733,67 @@ static void pump_transfers(unsigned long data) drv_data->rx_dma = transfer->rx_dma; drv_data->tx_dma = transfer->tx_dma; drv_data->len_in_bytes = transfer->len; + drv_data->cs_change = transfer->cs_change; + + /* Bits per word setup */ + switch (transfer->bits_per_word) { + case 8: + drv_data->n_bytes = 1; + width = CFG_SPI_WORDSIZE8; + drv_data->read = chip->cs_change_per_word ? + u8_cs_chg_reader : u8_reader; + drv_data->write = chip->cs_change_per_word ? + u8_cs_chg_writer : u8_writer; + drv_data->duplex = chip->cs_change_per_word ? + u8_cs_chg_duplex : u8_duplex; + break; + + case 16: + drv_data->n_bytes = 2; + width = CFG_SPI_WORDSIZE16; + drv_data->read = chip->cs_change_per_word ? + u16_cs_chg_reader : u16_reader; + drv_data->write = chip->cs_change_per_word ? + u16_cs_chg_writer : u16_writer; + drv_data->duplex = chip->cs_change_per_word ? + u16_cs_chg_duplex : u16_duplex; + break; + + default: + /* No change, the same as default setting */ + drv_data->n_bytes = chip->n_bytes; + width = chip->width; + drv_data->write = drv_data->tx ? chip->write : null_writer; + drv_data->read = drv_data->rx ? chip->read : null_reader; + drv_data->duplex = chip->duplex ? chip->duplex : null_writer; + break; + } + cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); + cr |= (width << 8); + write_CTRL(drv_data, cr); - width = chip->width; if (width == CFG_SPI_WORDSIZE16) { drv_data->len = (transfer->len) >> 1; } else { drv_data->len = transfer->len; } - drv_data->write = drv_data->tx ? chip->write : null_writer; - drv_data->read = drv_data->rx ? chip->read : null_reader; - drv_data->duplex = chip->duplex ? chip->duplex : null_writer; - dev_dbg(&drv_data->pdev->dev, - "transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n", - drv_data->write, chip->write, null_writer); + dev_dbg(&drv_data->pdev->dev, "transfer: ", + "drv_data->write is %p, chip->write is %p, null_wr is %p\n", + drv_data->write, chip->write, null_writer); /* speed and width has been set on per message */ message->state = RUNNING_STATE; dma_config = 0; - /* restore spi status for each spi transfer */ - if (transfer->speed_hz) { - write_BAUD(hz_to_spi_baud(transfer->speed_hz)); - } else { - write_BAUD(chip->baud); - } - write_FLAG(chip->flag); + /* Speed setup (surely valid because already checked) */ + if (transfer->speed_hz) + write_BAUD(drv_data, hz_to_spi_baud(transfer->speed_hz)); + else + write_BAUD(drv_data, chip->baud); + + write_STAT(drv_data, BIT_STAT_CLR); + cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); + cs_active(drv_data, chip); dev_dbg(&drv_data->pdev->dev, "now pumping a transfer: width is %d, len is %d\n", @@ -727,25 +806,25 @@ static void pump_transfers(unsigned long data) */ if (drv_data->cur_chip->enable_dma && drv_data->len > 6) { - write_STAT(BIT_STAT_CLR); - disable_dma(CH_SPI); - clear_dma_irqstat(CH_SPI); + disable_dma(drv_data->dma_channel); + clear_dma_irqstat(drv_data->dma_channel); bfin_spi_disable(drv_data); /* config dma channel */ dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); if (width == CFG_SPI_WORDSIZE16) { - set_dma_x_count(CH_SPI, drv_data->len); - set_dma_x_modify(CH_SPI, 2); + set_dma_x_count(drv_data->dma_channel, drv_data->len); + set_dma_x_modify(drv_data->dma_channel, 2); dma_width = WDSIZE_16; } else { - set_dma_x_count(CH_SPI, drv_data->len); - set_dma_x_modify(CH_SPI, 1); + set_dma_x_count(drv_data->dma_channel, drv_data->len); + set_dma_x_modify(drv_data->dma_channel, 1); dma_width = WDSIZE_8; } - /* set transfer width,direction. And enable spi */ - cr = (read_CTRL() & (~BIT_CTL_TIMOD)); + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); /* dirty hack for autobuffer DMA mode */ if (drv_data->tx_dma == 0xFFFF) { @@ -755,13 +834,18 @@ static void pump_transfers(unsigned long data) /* no irq in autobuffer mode */ dma_config = (DMAFLOW_AUTO | RESTART | dma_width | DI_EN); - set_dma_config(CH_SPI, dma_config); - set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx); - enable_dma(CH_SPI); - write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) | - (CFG_SPI_ENABLE << 14)); - - /* just return here, there can only be one transfer in this mode */ + set_dma_config(drv_data->dma_channel, dma_config); + set_dma_start_addr(drv_data->dma_channel, + (unsigned long)drv_data->tx); + enable_dma(drv_data->dma_channel); + + /* start SPI transfer */ + write_CTRL(drv_data, + (cr | CFG_SPI_DMAWRITE | BIT_CTL_ENABLE)); + + /* just return here, there can only be one transfer + * in this mode + */ message->status = 0; giveback(drv_data); return; @@ -772,58 +856,51 @@ static void pump_transfers(unsigned long data) /* set transfer mode, and enable SPI */ dev_dbg(&drv_data->pdev->dev, "doing DMA in.\n"); - /* disable SPI before write to TDBR */ - write_CTRL(cr & ~BIT_CTL_ENABLE); - /* clear tx reg soformer data is not shifted out */ - write_TDBR(0xFF); + write_TDBR(drv_data, 0xFFFF); - set_dma_x_count(CH_SPI, drv_data->len); + set_dma_x_count(drv_data->dma_channel, drv_data->len); /* start dma */ - dma_enable_irq(CH_SPI); + dma_enable_irq(drv_data->dma_channel); dma_config = (WNR | RESTART | dma_width | DI_EN); - set_dma_config(CH_SPI, dma_config); - set_dma_start_addr(CH_SPI, (unsigned long)drv_data->rx); - enable_dma(CH_SPI); + set_dma_config(drv_data->dma_channel, dma_config); + set_dma_start_addr(drv_data->dma_channel, + (unsigned long)drv_data->rx); + enable_dma(drv_data->dma_channel); + + /* start SPI transfer */ + write_CTRL(drv_data, + (cr | CFG_SPI_DMAREAD | BIT_CTL_ENABLE)); - cr |= - CFG_SPI_DMAREAD | (width << 8) | (CFG_SPI_ENABLE << - 14); - /* set transfer mode, and enable SPI */ - write_CTRL(cr); } else if (drv_data->tx != NULL) { dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n"); /* start dma */ - dma_enable_irq(CH_SPI); + dma_enable_irq(drv_data->dma_channel); dma_config = (RESTART | dma_width | DI_EN); - set_dma_config(CH_SPI, dma_config); - set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx); - enable_dma(CH_SPI); - - write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) | - (CFG_SPI_ENABLE << 14)); - + set_dma_config(drv_data->dma_channel, dma_config); + set_dma_start_addr(drv_data->dma_channel, + (unsigned long)drv_data->tx); + enable_dma(drv_data->dma_channel); + + /* start SPI transfer */ + write_CTRL(drv_data, + (cr | CFG_SPI_DMAWRITE | BIT_CTL_ENABLE)); } } else { /* IO mode write then read */ dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); - write_STAT(BIT_STAT_CLR); - if (drv_data->tx != NULL && drv_data->rx != NULL) { /* full duplex mode */ BUG_ON((drv_data->tx_end - drv_data->tx) != (drv_data->rx_end - drv_data->rx)); - cr = (read_CTRL() & (~BIT_CTL_TIMOD)); - cr |= CFG_SPI_WRITE | (width << 8) | - (CFG_SPI_ENABLE << 14); dev_dbg(&drv_data->pdev->dev, "IO duplex: cr is 0x%x\n", cr); - write_CTRL(cr); - SSYNC(); + /* set SPI transfer mode */ + write_CTRL(drv_data, (cr | CFG_SPI_WRITE)); drv_data->duplex(drv_data); @@ -831,14 +908,11 @@ static void pump_transfers(unsigned long data) tranf_success = 0; } else if (drv_data->tx != NULL) { /* write only half duplex */ - cr = (read_CTRL() & (~BIT_CTL_TIMOD)); - cr |= CFG_SPI_WRITE | (width << 8) | - (CFG_SPI_ENABLE << 14); - dev_dbg(&drv_data->pdev->dev, + dev_dbg(&drv_data->pdev->dev, "IO write: cr is 0x%x\n", cr); - write_CTRL(cr); - SSYNC(); + /* set SPI transfer mode */ + write_CTRL(drv_data, (cr | CFG_SPI_WRITE)); drv_data->write(drv_data); @@ -846,14 +920,11 @@ static void pump_transfers(unsigned long data) tranf_success = 0; } else if (drv_data->rx != NULL) { /* read only half duplex */ - cr = (read_CTRL() & (~BIT_CTL_TIMOD)); - cr |= CFG_SPI_READ | (width << 8) | - (CFG_SPI_ENABLE << 14); - dev_dbg(&drv_data->pdev->dev, + dev_dbg(&drv_data->pdev->dev, "IO read: cr is 0x%x\n", cr); - write_CTRL(cr); - SSYNC(); + /* set SPI transfer mode */ + write_CTRL(drv_data, (cr | CFG_SPI_READ)); drv_data->read(drv_data); if (drv_data->rx != drv_data->rx_end) @@ -861,7 +932,7 @@ static void pump_transfers(unsigned long data) } if (!tranf_success) { - dev_dbg(&drv_data->pdev->dev, + dev_dbg(&drv_data->pdev->dev, "IO write error!\n"); message->state = ERROR_STATE; } else { @@ -881,9 +952,11 @@ static void pump_transfers(unsigned long data) /* pop a msg from queue and kick off real transfer */ static void pump_messages(struct work_struct *work) { - struct driver_data *drv_data = container_of(work, struct driver_data, pump_messages); + struct driver_data *drv_data; unsigned long flags; + drv_data = container_of(work, struct driver_data, pump_messages); + /* Lock queue and check for queue work */ spin_lock_irqsave(&drv_data->lock, flags); if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { @@ -902,6 +975,14 @@ static void pump_messages(struct work_struct *work) /* Extract head of queue */ drv_data->cur_msg = list_entry(drv_data->queue.next, struct spi_message, queue); + + /* Setup the SSP using the per chip configuration */ + drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); + if (restore_state(drv_data)) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + }; + list_del_init(&drv_data->cur_msg->queue); /* Initial message state */ @@ -909,15 +990,12 @@ static void pump_messages(struct work_struct *work) drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, struct spi_transfer, transfer_list); - /* Setup the SSP using the per chip configuration */ - drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); - restore_state(drv_data); + dev_dbg(&drv_data->pdev->dev, "got a message to pump, " + "state is set to: baud %d, flag 0x%x, ctl 0x%x\n", + drv_data->cur_chip->baud, drv_data->cur_chip->flag, + drv_data->cur_chip->ctl_reg); + dev_dbg(&drv_data->pdev->dev, - "got a message to pump, state is set to: baud %d, flag 0x%x, ctl 0x%x\n", - drv_data->cur_chip->baud, drv_data->cur_chip->flag, - drv_data->cur_chip->ctl_reg); - - dev_dbg(&drv_data->pdev->dev, "the first transfer len is %d\n", drv_data->cur_transfer->len); @@ -959,6 +1037,22 @@ static int transfer(struct spi_device *spi, struct spi_message *msg) return 0; } +#define MAX_SPI_SSEL 7 + +static u16 ssel[3][MAX_SPI_SSEL] = { + {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3, + P_SPI0_SSEL4, P_SPI0_SSEL5, + P_SPI0_SSEL6, P_SPI0_SSEL7}, + + {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3, + P_SPI1_SSEL4, P_SPI1_SSEL5, + P_SPI1_SSEL6, P_SPI1_SSEL7}, + + {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3, + P_SPI2_SSEL4, P_SPI2_SSEL5, + P_SPI2_SSEL6, P_SPI2_SSEL7}, +}; + /* first setup for new devices */ static int setup(struct spi_device *spi) { @@ -993,6 +1087,18 @@ static int setup(struct spi_device *spi) /* chip_info isn't always needed */ if (chip_info) { + /* Make sure people stop trying to set fields via ctl_reg + * when they should actually be using common SPI framework. + * Currently we let through: WOM EMISO PSSE GM SZ TIMOD. + * Not sure if a user actually needs/uses any of these, + * but let's assume (for now) they do. + */ + if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) { + dev_err(&spi->dev, "do not set bits in ctl_reg " + "that the SPI framework manages\n"); + return -EINVAL; + } + chip->enable_dma = chip_info->enable_dma != 0 && drv_data->master_info->enable_dma; chip->ctl_reg = chip_info->ctl_reg; @@ -1015,20 +1121,20 @@ static int setup(struct spi_device *spi) * if any one SPI chip is registered and wants DMA, request the * DMA channel for it */ - if (chip->enable_dma && !dma_requested) { + if (chip->enable_dma && !drv_data->dma_requested) { /* register dma irq handler */ - if (request_dma(CH_SPI, "BF53x_SPI_DMA") < 0) { + if (request_dma(drv_data->dma_channel, "BF53x_SPI_DMA") < 0) { dev_dbg(&spi->dev, "Unable to request BlackFin SPI DMA channel\n"); return -ENODEV; } - if (set_dma_callback(CH_SPI, (void *)dma_irq_handler, drv_data) - < 0) { + if (set_dma_callback(drv_data->dma_channel, + (void *)dma_irq_handler, drv_data) < 0) { dev_dbg(&spi->dev, "Unable to set dma callback\n"); return -EPERM; } - dma_disable_irq(CH_SPI); - dma_requested = 1; + dma_disable_irq(drv_data->dma_channel); + drv_data->dma_requested = 1; } /* @@ -1077,6 +1183,14 @@ static int setup(struct spi_device *spi) spi_set_ctldata(spi, chip); + dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); + if ((chip->chip_select_num > 0) + && (chip->chip_select_num <= spi->master->num_chipselect)) + peripheral_request(ssel[spi->master->bus_num] + [chip->chip_select_num-1], DRV_NAME); + + cs_deactive(drv_data, chip); + return 0; } @@ -1088,6 +1202,11 @@ static void cleanup(struct spi_device *spi) { struct chip_data *chip = spi_get_ctldata(spi); + if ((chip->chip_select_num > 0) + && (chip->chip_select_num <= spi->master->num_chipselect)) + peripheral_free(ssel[spi->master->bus_num] + [chip->chip_select_num-1]); + kfree(chip); } @@ -1183,6 +1302,7 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev) struct bfin5xx_spi_master *platform_info; struct spi_master *master; struct driver_data *drv_data = 0; + struct resource *res; int status = 0; platform_info = dev->platform_data; @@ -1193,10 +1313,12 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev) dev_err(&pdev->dev, "can not alloc spi_master\n"); return -ENOMEM; } + drv_data = spi_master_get_devdata(master); drv_data->master = master; drv_data->master_info = platform_info; drv_data->pdev = pdev; + drv_data->pin_req = platform_info->pin_req; master->bus_num = pdev->id; master->num_chipselect = platform_info->num_chipselect; @@ -1204,15 +1326,38 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev) master->setup = setup; master->transfer = transfer; + /* Find and map our resources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(dev, "Cannot get IORESOURCE_MEM\n"); + status = -ENOENT; + goto out_error_get_res; + } + + drv_data->regs_base = ioremap(res->start, (res->end - res->start + 1)); + if (drv_data->regs_base == NULL) { + dev_err(dev, "Cannot map IO\n"); + status = -ENXIO; + goto out_error_ioremap; + } + + drv_data->dma_channel = platform_get_irq(pdev, 0); + if (drv_data->dma_channel < 0) { + dev_err(dev, "No DMA channel specified\n"); + status = -ENOENT; + goto out_error_no_dma_ch; + } + /* Initial and start queue */ status = init_queue(drv_data); if (status != 0) { - dev_err(&pdev->dev, "problem initializing queue\n"); + dev_err(dev, "problem initializing queue\n"); goto out_error_queue_alloc; } + status = start_queue(drv_data); if (status != 0) { - dev_err(&pdev->dev, "problem starting queue\n"); + dev_err(dev, "problem starting queue\n"); goto out_error_queue_alloc; } @@ -1220,15 +1365,30 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drv_data); status = spi_register_master(master); if (status != 0) { - dev_err(&pdev->dev, "problem registering spi master\n"); + dev_err(dev, "problem registering spi master\n"); goto out_error_queue_alloc; } - dev_dbg(&pdev->dev, "controller probe successfully\n"); + + status = peripheral_request_list(drv_data->pin_req, DRV_NAME); + if (status != 0) { + dev_err(&pdev->dev, ": Requesting Peripherals failed\n"); + goto out_error; + } + + dev_info(dev, "%s, Version %s, regs_base@%p, dma channel@%d\n", + DRV_DESC, DRV_VERSION, drv_data->regs_base, + drv_data->dma_channel); return status; - out_error_queue_alloc: +out_error_queue_alloc: destroy_queue(drv_data); +out_error_no_dma_ch: + iounmap((void *) drv_data->regs_base); +out_error_ioremap: +out_error_get_res: +out_error: spi_master_put(master); + return status; } @@ -1251,13 +1411,15 @@ static int __devexit bfin5xx_spi_remove(struct platform_device *pdev) /* Release DMA */ if (drv_data->master_info->enable_dma) { - if (dma_channel_active(CH_SPI)) - free_dma(CH_SPI); + if (dma_channel_active(drv_data->dma_channel)) + free_dma(drv_data->dma_channel); } /* Disconnect from the SPI framework */ spi_unregister_master(drv_data->master); + peripheral_free_list(drv_data->pin_req); + /* Prevent double remove */ platform_set_drvdata(pdev, NULL); @@ -1305,7 +1467,7 @@ static int bfin5xx_spi_resume(struct platform_device *pdev) MODULE_ALIAS("bfin-spi-master"); /* for platform bus hotplug */ static struct platform_driver bfin5xx_spi_driver = { .driver = { - .name = "bfin-spi-master", + .name = DRV_NAME, .owner = THIS_MODULE, }, .suspend = bfin5xx_spi_suspend, |