diff options
Diffstat (limited to 'drivers')
478 files changed, 6868 insertions, 2944 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index e88fe3632dd6..0972ec0e2eb8 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -418,7 +418,7 @@ static int video_set_report_key_events(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id video_dmi_table[] = { +static const struct dmi_system_id video_dmi_table[] = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 */ diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index bf22c29d2517..11b113f8e367 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -66,7 +66,7 @@ void __init acpi_watchdog_init(void) for (i = 0; i < wdat->entries; i++) { const struct acpi_generic_address *gas; struct resource_entry *rentry; - struct resource res; + struct resource res = {}; bool found; gas = &entries[i].register_region; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 077f9bad6f44..3c3a37b8503b 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -743,17 +743,19 @@ static int ghes_proc(struct ghes *ghes) } ghes_do_proc(ghes, ghes->estatus); +out: + ghes_clear_estatus(ghes); + + if (rc == -ENOENT) + return rc; + /* * GHESv2 type HEST entries introduce support for error acknowledgment, * so only acknowledge the error if this support is present. */ - if (is_hest_type_generic_v2(ghes)) { - rc = ghes_ack_error(ghes->generic_v2); - if (rc) - return rc; - } -out: - ghes_clear_estatus(ghes); + if (is_hest_type_generic_v2(ghes)) + return ghes_ack_error(ghes->generic_v2); + return rc; } diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 037fd537bbf6..995c4d8922b1 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -30,7 +30,7 @@ #include "internal.h" -static struct dmi_system_id acpi_rev_dmi_table[] __initdata; +static const struct dmi_system_id acpi_rev_dmi_table[] __initconst; /* * POLICY: If *anything* doesn't work, put it on the blacklist. @@ -89,7 +89,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d) } #endif -static struct dmi_system_id acpi_rev_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = { #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE /* * DELL XPS 13 (2015) switches sound between HDA and I2S diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 59f2f96fdb7e..4d0979e02a28 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id) } #endif -static struct dmi_system_id dsdt_dmi_table[] __initdata = { +static const struct dmi_system_id dsdt_dmi_table[] __initconst = { /* * Invoke DSDT corruption work-around on all Toshiba Satellite. * https://bugzilla.kernel.org/show_bug.cgi?id=14679 @@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = { {} }; #else -static struct dmi_system_id dsdt_dmi_table[] __initdata = { +static const struct dmi_system_id dsdt_dmi_table[] __initconst = { {} }; #endif diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index fdfae6f3c0b1..236b14324780 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1809,7 +1809,7 @@ static int ec_honor_ecdt_gpe(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id ec_dmi_table[] __initdata = { +static const struct dmi_system_id ec_dmi_table[] __initconst = { { ec_correct_ecdt, "MSI MS-171F", { DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c index 19cdd8a783a9..76998a51bf99 100644 --- a/drivers/acpi/osi.c +++ b/drivers/acpi/osi.c @@ -312,7 +312,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d) * Note that _OSI("Linux")/_OSI("Darwin") determined here can be overridden * by acpi_osi=!Linux/acpi_osi=!Darwin command line options. */ -static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = { { .callback = dmi_disable_osi_vista, .ident = "Fujitsu Siemens", diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c index f62c68e24317..e90b61f7d2db 100644 --- a/drivers/acpi/pci_slot.c +++ b/drivers/acpi/pci_slot.c @@ -174,7 +174,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = { /* * Fujitsu Primequest machines will return 1023 to indicate an * error if the _SUN method is evaluated on SxFy objects that diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index 7cfbda4d7c51..74f738cb6073 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c @@ -173,7 +173,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id processor_idle_dmi_table[] __initdata = { +static const struct dmi_system_id processor_idle_dmi_table[] __initconst = { { set_no_mwait, "Extensa 5220", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index c1c216163de3..3fb8ff513461 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -908,11 +908,12 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { const struct acpi_device *adev = to_acpi_device_node(fwnode); - struct acpi_device *child_adev = NULL; const struct list_head *head; struct list_head *next; if (!child || is_acpi_device_node(child)) { + struct acpi_device *child_adev; + if (adev) head = &adev->children; else @@ -922,8 +923,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, goto nondev; if (child) { - child_adev = to_acpi_device_node(child); - next = child_adev->node.next; + adev = to_acpi_device_node(child); + next = adev->node.next; if (next == head) { child = NULL; goto nondev; @@ -941,8 +942,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, const struct acpi_data_node *data = to_acpi_data_node(fwnode); struct acpi_data_node *dn; - if (child_adev) - head = &child_adev->data.subnodes; + if (adev) + head = &adev->data.subnodes; else if (data) head = &data->data.subnodes; else @@ -1293,3 +1294,16 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops); DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops); const struct fwnode_operations acpi_static_fwnode_ops; + +bool is_acpi_device_node(const struct fwnode_handle *fwnode) +{ + return !IS_ERR_OR_NULL(fwnode) && + fwnode->ops == &acpi_device_fwnode_ops; +} +EXPORT_SYMBOL(is_acpi_device_node); + +bool is_acpi_data_node(const struct fwnode_handle *fwnode) +{ + return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops; +} +EXPORT_SYMBOL(is_acpi_data_node); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 9fdd014759f8..6804ddab3052 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -160,7 +160,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id acpisleep_dmi_table[] __initdata = { +static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { { .callback = init_old_suspend_ordering, .ident = "Abit KN9 (nForce4 variant)", diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 1d0417b87cb7..551b71a24b85 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) { return 0; } -static struct dmi_system_id thermal_dmi_table[] __initdata = { +static const struct dmi_system_id thermal_dmi_table[] __initconst = { /* * Award BIOS on this AOpen makes thermal control almost worthless. * http://bugzilla.kernel.org/show_bug.cgi?id=8842 diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index f046d21de57d..1a5f6a157a57 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -140,13 +140,10 @@ config EXTRA_FIRMWARE config EXTRA_FIRMWARE_DIR string "Firmware blobs root directory" depends on EXTRA_FIRMWARE != "" - default "firmware" + default "/lib/firmware" help This option controls the directory in which the kernel build system looks for the firmware files listed in the EXTRA_FIRMWARE option. - The default is firmware/ in the kernel source tree, but by changing - this option you can point it elsewhere, such as /lib/firmware/ or - some other directory containing the firmware files. config FW_LOADER_USER_HELPER bool diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index a39b2166b145..744f64f43454 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -348,16 +348,15 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) struct dma_coherent_mem *mem = rmem->priv; int ret; - if (!mem) - return -ENODEV; - - ret = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, - DMA_MEMORY_EXCLUSIVE, &mem); - - if (ret) { - pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", - &rmem->base, (unsigned long)rmem->size / SZ_1M); - return ret; + if (!mem) { + ret = dma_init_coherent_memory(rmem->base, rmem->base, + rmem->size, + DMA_MEMORY_EXCLUSIVE, &mem); + if (ret) { + pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", + &rmem->base, (unsigned long)rmem->size / SZ_1M); + return ret; + } } mem->use_dev_dma_pfn_offset = true; rmem->priv = mem; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index ea1732ed7a9d..770b1539a083 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1860,10 +1860,13 @@ void device_pm_check_callbacks(struct device *dev) { spin_lock_irq(&dev->power.lock); dev->power.no_pm_callbacks = - (!dev->bus || pm_ops_is_empty(dev->bus->pm)) && - (!dev->class || pm_ops_is_empty(dev->class->pm)) && + (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && + !dev->bus->suspend && !dev->bus->resume)) && + (!dev->class || (pm_ops_is_empty(dev->class->pm) && + !dev->class->suspend && !dev->class->resume)) && (!dev->type || pm_ops_is_empty(dev->type->pm)) && (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && - (!dev->driver || pm_ops_is_empty(dev->driver->pm)); + (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && + !dev->driver->suspend && !dev->driver->resume)); spin_unlock_irq(&dev->power.lock); } diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index a8cc14fd8ae4..a6de32530693 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, opp->available = availability_req; + dev_pm_opp_get(opp); + mutex_unlock(&opp_table->lock); + /* Notify the change of the OPP availability */ if (availability_req) blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, @@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_DISABLE, opp); + dev_pm_opp_put(opp); + goto put_table; + unlock: mutex_unlock(&opp_table->lock); +put_table: dev_pm_opp_put_opp_table(opp_table); return r; } diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index f850daeffba4..277d43a83f53 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -277,11 +277,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) mutex_unlock(&dev_pm_qos_sysfs_mtx); } -static bool dev_pm_qos_invalid_request(struct device *dev, - struct dev_pm_qos_request *req) +static bool dev_pm_qos_invalid_req_type(struct device *dev, + enum dev_pm_qos_req_type type) { - return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE - && !dev->power.set_latency_tolerance); + return type == DEV_PM_QOS_LATENCY_TOLERANCE && + !dev->power.set_latency_tolerance; } static int __dev_pm_qos_add_request(struct device *dev, @@ -290,7 +290,7 @@ static int __dev_pm_qos_add_request(struct device *dev, { int ret = 0; - if (!dev || dev_pm_qos_invalid_request(dev, req)) + if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type)) return -EINVAL; if (WARN(dev_pm_qos_request_active(req), diff --git a/drivers/block/brd.c b/drivers/block/brd.c index bbd0d186cfc0..2d7178f7754e 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -342,7 +342,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff, if (!brd) return -ENODEV; - page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512); + page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT); if (!page) return -ENOSPC; *kaddr = page_address(page); diff --git a/drivers/block/loop.h b/drivers/block/loop.h index f68c1d50802f..1f3956702993 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -67,10 +67,8 @@ struct loop_device { struct loop_cmd { struct kthread_work work; struct request *rq; - union { - bool use_aio; /* use AIO interface to handle I/O */ - atomic_t ref; /* only for aio */ - }; + bool use_aio; /* use AIO interface to handle I/O */ + atomic_t ref; /* only for aio */ long ret; struct kiocb iocb; struct bio_vec *bvec; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 2aa87cbdede0..3684e21d543f 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1194,6 +1194,12 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, if (!capable(CAP_SYS_ADMIN)) return -EPERM; + /* The block layer will pass back some non-nbd ioctls in case we have + * special handling for them, but we don't so just return an error. + */ + if (_IOC_TYPE(cmd) != 0xab) + return -EINVAL; + mutex_lock(&nbd->config_lock); /* Don't allow ioctl operations on a nbd device that was created with diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index f4f866ee54bc..d3a979e25724 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -1491,7 +1491,7 @@ static struct platform_driver sonypi_driver = { static struct platform_device *sonypi_platform_device; -static struct dmi_system_id __initdata sonypi_dmi_table[] = { +static const struct dmi_system_id sonypi_dmi_table[] __initconst = { { .ident = "Sony Vaio", .matches = { diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index fe597e6c55c4..1d6729be4cd6 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -455,7 +455,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, goto out; } - msleep(TPM_TIMEOUT); /* CHECK */ + tpm_msleep(TPM_TIMEOUT); rmb(); } while (time_before(jiffies, stop)); @@ -970,7 +970,7 @@ int tpm_do_selftest(struct tpm_chip *chip) dev_info( &chip->dev, HW_ERR "TPM command timed out during continue self test"); - msleep(delay_msec); + tpm_msleep(delay_msec); continue; } @@ -985,7 +985,7 @@ int tpm_do_selftest(struct tpm_chip *chip) } if (rc != TPM_WARN_DOING_SELFTEST) return rc; - msleep(delay_msec); + tpm_msleep(delay_msec); } while (--loops > 0); return rc; @@ -1085,7 +1085,7 @@ again: } } else { do { - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; @@ -1150,7 +1150,7 @@ int tpm_pm_suspend(struct device *dev) */ if (rc != TPM_WARN_RETRY) break; - msleep(TPM_TIMEOUT_RETRY); + tpm_msleep(TPM_TIMEOUT_RETRY); } if (rc) diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 04fbff2edbf3..2d5466a72e40 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -50,7 +50,8 @@ enum tpm_const { enum tpm_timeout { TPM_TIMEOUT = 5, /* msecs */ - TPM_TIMEOUT_RETRY = 100 /* msecs */ + TPM_TIMEOUT_RETRY = 100, /* msecs */ + TPM_TIMEOUT_RANGE_US = 300 /* usecs */ }; /* TPM addresses */ @@ -527,6 +528,12 @@ int tpm_pm_resume(struct device *dev); int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue, bool check_cancel); +static inline void tpm_msleep(unsigned int delay_msec) +{ + usleep_range(delay_msec * 1000, + (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US); +}; + struct tpm_chip *tpm_chip_find_get(int chip_num); __must_check int tpm_try_get_ops(struct tpm_chip *chip); void tpm_put_ops(struct tpm_chip *chip); diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index f7f34b2aa981..e1a41b788f08 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -899,7 +899,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip) if (rc != TPM2_RC_TESTING) break; - msleep(delay_msec); + tpm_msleep(delay_msec); } return rc; diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index a4ac63a21d8a..8f0a98dea327 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -665,7 +665,7 @@ static const struct dev_pm_ops crb_pm = { SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL) }; -static struct acpi_device_id crb_device_ids[] = { +static const struct acpi_device_id crb_device_ids[] = { {"MSFT0101", 0}, {"", 0}, }; diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index f01d083eced2..25f6e2665385 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -32,26 +32,70 @@ static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm"; -static struct vio_device_id tpm_ibmvtpm_device_table[] = { +static const struct vio_device_id tpm_ibmvtpm_device_table[] = { { "IBM,vtpm", "IBM,vtpm"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table); /** + * + * ibmvtpm_send_crq_word - Send a CRQ request + * @vdev: vio device struct + * @w1: pre-constructed first word of tpm crq (second word is reserved) + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1) +{ + return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0); +} + +/** + * * ibmvtpm_send_crq - Send a CRQ request * * @vdev: vio device struct - * @w1: first word - * @w2: second word + * @valid: Valid field + * @msg: Type field + * @len: Length field + * @data: Data field + * + * The ibmvtpm crq is defined as follows: + * + * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 + * ----------------------------------------------------------------------- + * Word0 | Valid | Type | Length | Data + * ----------------------------------------------------------------------- + * Word1 | Reserved + * ----------------------------------------------------------------------- + * + * Which matches the following structure (on bigendian host): + * + * struct ibmvtpm_crq { + * u8 valid; + * u8 msg; + * __be16 len; + * __be32 data; + * __be64 reserved; + * } __attribute__((packed, aligned(8))); + * + * However, the value is passed in a register so just compute the numeric value + * to load into the register avoiding byteswap altogether. Endian only affects + * memory loads and stores - registers are internally represented the same. * * Return: - * 0 -Sucess + * 0 (H_SUCCESS) - Success * Non-zero - Failure */ -static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2) +static int ibmvtpm_send_crq(struct vio_dev *vdev, + u8 valid, u8 msg, u16 len, u32 data) { - return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2); + u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) | + (u64)data; + return ibmvtpm_send_crq_word(vdev, w1); } /** @@ -109,8 +153,6 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); - struct ibmvtpm_crq crq; - __be64 *word = (__be64 *)&crq; int rc, sig; if (!ibmvtpm->rtce_buf) { @@ -137,10 +179,6 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) spin_lock(&ibmvtpm->rtce_lock); ibmvtpm->res_len = 0; memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_TPM_COMMAND; - crq.len = cpu_to_be16(count); - crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); /* * set the processing flag before the Hcall, since we may get the @@ -148,8 +186,9 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) */ ibmvtpm->tpm_processing_cmd = true; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), - be64_to_cpu(word[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND, + count, ibmvtpm->rtce_dma_handle); if (rc != H_SUCCESS) { dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); rc = 0; @@ -182,15 +221,10 @@ static u8 tpm_ibmvtpm_status(struct tpm_chip *chip) */ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) { - struct ibmvtpm_crq crq; - u64 *buf = (u64 *) &crq; int rc; - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE; - - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), - cpu_to_be64(buf[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); @@ -210,15 +244,10 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) */ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) { - struct ibmvtpm_crq crq; - u64 *buf = (u64 *) &crq; int rc; - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_GET_VERSION; - - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), - cpu_to_be64(buf[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_get_version failed rc=%d\n", rc); @@ -238,7 +267,7 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) { int rc; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0); + rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc); @@ -258,7 +287,7 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) { int rc; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0); + rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_send_init failed rc=%d\n", rc); @@ -340,15 +369,10 @@ static int tpm_ibmvtpm_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); - struct ibmvtpm_crq crq; - u64 *buf = (u64 *) &crq; int rc = 0; - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND; - - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), - cpu_to_be64(buf[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "tpm_ibmvtpm_suspend failed rc=%d\n", rc); diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 3b1b9f9322d5..d8f10047fbba 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -191,7 +191,7 @@ static int wait(struct tpm_chip *chip, int wait_for_bit) /* check the status-register if wait_for_bit is set */ if (status & 1 << wait_for_bit) break; - msleep(TPM_MSLEEP_TIME); + tpm_msleep(TPM_MSLEEP_TIME); } if (i == TPM_MAX_TRIES) { /* timeout occurs */ if (wait_for_bit == STAT_XFE) @@ -226,7 +226,7 @@ static void tpm_wtx(struct tpm_chip *chip) wait_and_send(chip, TPM_CTRL_WTX); wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); - msleep(TPM_WTX_MSLEEP_TIME); + tpm_msleep(TPM_WTX_MSLEEP_TIME); } static void tpm_wtx_abort(struct tpm_chip *chip) @@ -237,7 +237,7 @@ static void tpm_wtx_abort(struct tpm_chip *chip) wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); number_of_wtx = 0; - msleep(TPM_WTX_MSLEEP_TIME); + tpm_msleep(TPM_WTX_MSLEEP_TIME); } static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count) diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index b617b2eeb080..63bc6c3b949e 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -51,7 +51,7 @@ static int wait_startup(struct tpm_chip *chip, int l) if (access & TPM_ACCESS_VALID) return 0; - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); return -1; } @@ -117,7 +117,7 @@ again: do { if (check_locality(chip, l)) return l; - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); } return -1; @@ -164,7 +164,7 @@ static int get_burstcount(struct tpm_chip *chip) burstcnt = (value >> 8) & 0xFFFF; if (burstcnt) return burstcnt; - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); return -EBUSY; } @@ -396,7 +396,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) priv->irq = irq; chip->flags |= TPM_CHIP_FLAG_IRQ; if (!priv->irq_tested) - msleep(1); + tpm_msleep(1); if (!priv->irq_tested) disable_interrupts(chip); priv->irq_tested = true; diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 17b861ea2626..ae3167c28b12 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -10,25 +10,45 @@ #include <linux/cpu.h> #include <linux/init.h> #include <linux/interrupt.h> -#include <linux/irqchip/mips-gic.h> #include <linux/notifier.h> #include <linux/of_irq.h> #include <linux/percpu.h> #include <linux/smp.h> #include <linux/time.h> +#include <asm/mips-cps.h> static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device); static int gic_timer_irq; static unsigned int gic_frequency; +static u64 notrace gic_read_count(void) +{ + unsigned int hi, hi2, lo; + + if (mips_cm_is64) + return read_gic_counter(); + + do { + hi = read_gic_counter_32h(); + lo = read_gic_counter_32l(); + hi2 = read_gic_counter_32h(); + } while (hi2 != hi); + + return (((u64) hi) << 32) + lo; +} + static int gic_next_event(unsigned long delta, struct clock_event_device *evt) { + unsigned long flags; u64 cnt; int res; cnt = gic_read_count(); cnt += (u64)delta; - gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask)); + local_irq_save(flags); + write_gic_vl_other(mips_cm_vp_id(cpumask_first(evt->cpumask))); + write_gic_vo_compare(cnt); + local_irq_restore(flags); res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; return res; } @@ -37,7 +57,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id) { struct clock_event_device *cd = dev_id; - gic_write_compare(gic_read_compare()); + write_gic_vl_compare(read_gic_vl_compare()); cd->event_handler(cd); return IRQ_HANDLED; } @@ -139,10 +159,15 @@ static struct clocksource gic_clocksource = { static int __init __gic_clocksource_init(void) { + unsigned int count_width; int ret; /* Set clocksource mask. */ - gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width()); + count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; + count_width >>= __fls(GIC_CONFIG_COUNTBITS); + count_width *= 4; + count_width += 32; + gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); /* Calculate a somewhat reasonable rating value. */ gic_clocksource.rating = 200 + gic_frequency / 10000000; @@ -159,7 +184,7 @@ static int __init gic_clocksource_of_init(struct device_node *node) struct clk *clk; int ret; - if (!gic_present || !node->parent || + if (!mips_gic_present() || !node->parent || !of_device_is_compatible(node->parent, "mti,gic")) { pr_warn("No DT definition for the mips gic driver\n"); return -ENXIO; @@ -197,7 +222,7 @@ static int __init gic_clocksource_of_init(struct device_node *node) } /* And finally start the counter */ - gic_start_count(); + clear_gic_config(GIC_CONFIG_COUNTSTOP); return 0; } diff --git a/drivers/clocksource/numachip.c b/drivers/clocksource/numachip.c index 6a20dc8b253f..9a7d7f0f23fe 100644 --- a/drivers/clocksource/numachip.c +++ b/drivers/clocksource/numachip.c @@ -43,7 +43,7 @@ static int numachip2_set_next_event(unsigned long delta, struct clock_event_devi return 0; } -static struct clock_event_device numachip2_clockevent = { +static const struct clock_event_device numachip2_clockevent __initconst = { .name = "numachip2", .rating = 400, .set_next_event = numachip2_set_next_event, diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c index 2ff64d9d4fb3..62d24690ba02 100644 --- a/drivers/clocksource/timer-integrator-ap.c +++ b/drivers/clocksource/timer-integrator-ap.c @@ -36,8 +36,8 @@ static u64 notrace integrator_read_sched_clock(void) return -readl(sched_clk_base + TIMER_VALUE); } -static int integrator_clocksource_init(unsigned long inrate, - void __iomem *base) +static int __init integrator_clocksource_init(unsigned long inrate, + void __iomem *base) { u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; unsigned long rate = inrate; diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index a020da7940d6..a753c50e9e41 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -106,6 +106,22 @@ static const struct of_device_id whitelist[] __initconst = { * platforms using "operating-points-v2" property. */ static const struct of_device_id blacklist[] __initconst = { + { .compatible = "calxeda,highbank", }, + { .compatible = "calxeda,ecx-2000", }, + + { .compatible = "marvell,armadaxp", }, + + { .compatible = "nvidia,tegra124", }, + + { .compatible = "st,stih407", }, + { .compatible = "st,stih410", }, + + { .compatible = "sigma,tango4", }, + + { .compatible = "ti,am33xx", }, + { .compatible = "ti,am43", }, + { .compatible = "ti,dra7", }, + { } }; diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 9f013ed42977..80ac313e6c59 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -578,7 +578,7 @@ static int acer_cpufreq_pst(const struct dmi_system_id *d) * A BIOS update is all that can save them. * Mention this, and disable cpufreq. */ -static struct dmi_system_id powernow_dmi_table[] = { +static const struct dmi_system_id powernow_dmi_table[] = { { .callback = acer_cpufreq_pst, .ident = "Acer Aspire", diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index b29cd3398463..4bf47de6101f 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -190,7 +190,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data) static const struct of_device_id ti_cpufreq_of_match[] = { { .compatible = "ti,am33xx", .data = &am3x_soc_data, }, - { .compatible = "ti,am4372", .data = &am4x_soc_data, }, + { .compatible = "ti,am43", .data = &am4x_soc_data, }, { .compatible = "ti,dra7", .data = &dra7_soc_data }, {}, }; diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 7080c384ad5d..52a75053ee03 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -104,13 +104,13 @@ static int __init arm_idle_init(void) ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); if (ret <= 0) { ret = ret ? : -ENODEV; - goto out_fail; + goto init_fail; } ret = cpuidle_register_driver(drv); if (ret) { pr_err("Failed to register cpuidle driver\n"); - goto out_fail; + goto init_fail; } /* @@ -149,6 +149,8 @@ static int __init arm_idle_init(void) } return 0; +init_fail: + kfree(drv); out_fail: while (--cpu >= 0) { dev = per_cpu(cpuidle_devices, cpu); diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c index 12b9145913de..72b5e47286b4 100644 --- a/drivers/cpuidle/cpuidle-cps.c +++ b/drivers/cpuidle/cpuidle-cps.c @@ -37,7 +37,7 @@ static int cps_nc_enter(struct cpuidle_device *dev, * TODO: don't treat core 0 specially, just prevent the final core * TODO: remap interrupt affinity temporarily */ - if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT)) + if (cpus_are_siblings(0, dev->cpu) && (index > STATE_NC_WAIT)) index = STATE_NC_WAIT; /* Select the appropriate cps_pm_state */ diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index e36aeacd7635..1eb852765469 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -1,6 +1,7 @@ config CRYPTO_DEV_FSL_CAAM tristate "Freescale CAAM-Multicore driver backend" depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE + select SOC_BUS help Enables the driver module for Freescale's Cryptographic Accelerator and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). @@ -141,10 +142,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API To compile this as a module, choose M here: the module will be called caamrng. -config CRYPTO_DEV_FSL_CAAM_IMX - def_bool SOC_IMX6 || SOC_IMX7D - depends on CRYPTO_DEV_FSL_CAAM - config CRYPTO_DEV_FSL_CAAM_DEBUG bool "Enable debug output in CAAM driver" depends on CRYPTO_DEV_FSL_CAAM diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index dacb53fb690e..027e121c6f70 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -7,6 +7,7 @@ #include <linux/device.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sys_soc.h> #include "compat.h" #include "regs.h" @@ -19,6 +20,8 @@ bool caam_little_end; EXPORT_SYMBOL(caam_little_end); bool caam_dpaa2; EXPORT_SYMBOL(caam_dpaa2); +bool caam_imx; +EXPORT_SYMBOL(caam_imx); #ifdef CONFIG_CAAM_QI #include "qi.h" @@ -28,19 +31,11 @@ EXPORT_SYMBOL(caam_dpaa2); * i.MX targets tend to have clock control subsystems that can * enable/disable clocking to our device. */ -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX static inline struct clk *caam_drv_identify_clk(struct device *dev, char *clk_name) { - return devm_clk_get(dev, clk_name); + return caam_imx ? devm_clk_get(dev, clk_name) : NULL; } -#else -static inline struct clk *caam_drv_identify_clk(struct device *dev, - char *clk_name) -{ - return NULL; -} -#endif /* * Descriptor to instantiate RNG State Handle 0 in normal mode and @@ -430,6 +425,10 @@ static int caam_probe(struct platform_device *pdev) { int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; u64 caam_id; + static const struct soc_device_attribute imx_soc[] = { + {.family = "Freescale i.MX"}, + {}, + }; struct device *dev; struct device_node *nprop, *np; struct caam_ctrl __iomem *ctrl; @@ -451,6 +450,8 @@ static int caam_probe(struct platform_device *pdev) dev_set_drvdata(dev, ctrlpriv); nprop = pdev->dev.of_node; + caam_imx = (bool)soc_device_match(imx_soc); + /* Enable clocking */ clk = caam_drv_identify_clk(&pdev->dev, "ipg"); if (IS_ERR(clk)) { diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 2b5efff9ec3c..17cfd23a38fa 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -67,6 +67,7 @@ */ extern bool caam_little_end; +extern bool caam_imx; #define caam_to_cpu(len) \ static inline u##len caam##len ## _to_cpu(u##len val) \ @@ -154,13 +155,10 @@ static inline u64 rd_reg64(void __iomem *reg) #else /* CONFIG_64BIT */ static inline void wr_reg64(void __iomem *reg, u64 data) { -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX - if (caam_little_end) { + if (!caam_imx && caam_little_end) { wr_reg32((u32 __iomem *)(reg) + 1, data >> 32); wr_reg32((u32 __iomem *)(reg), data); - } else -#endif - { + } else { wr_reg32((u32 __iomem *)(reg), data >> 32); wr_reg32((u32 __iomem *)(reg) + 1, data); } @@ -168,41 +166,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data) static inline u64 rd_reg64(void __iomem *reg) { -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX - if (caam_little_end) + if (!caam_imx && caam_little_end) return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 | (u64)rd_reg32((u32 __iomem *)(reg))); - else -#endif - return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | - (u64)rd_reg32((u32 __iomem *)(reg) + 1)); + + return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | + (u64)rd_reg32((u32 __iomem *)(reg) + 1)); } #endif /* CONFIG_64BIT */ +static inline u64 cpu_to_caam_dma64(dma_addr_t value) +{ + if (caam_imx) + return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | + (u64)cpu_to_caam32(upper_32_bits(value))); + + return cpu_to_caam64(value); +} + +static inline u64 caam_dma64_to_cpu(u64 value) +{ + if (caam_imx) + return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | + (u64)caam32_to_cpu(upper_32_bits(value))); + + return caam64_to_cpu(value); +} + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT -#ifdef CONFIG_SOC_IMX7D -#define cpu_to_caam_dma(value) \ - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ - (u64)cpu_to_caam32(upper_32_bits(value))) -#define caam_dma_to_cpu(value) \ - (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \ - (u64)caam32_to_cpu(upper_32_bits(value))) -#else -#define cpu_to_caam_dma(value) cpu_to_caam64(value) -#define caam_dma_to_cpu(value) caam64_to_cpu(value) -#endif /* CONFIG_SOC_IMX7D */ +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value) +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value) #else #define cpu_to_caam_dma(value) cpu_to_caam32(value) #define caam_dma_to_cpu(value) caam32_to_cpu(value) -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ - -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX -#define cpu_to_caam_dma64(value) \ - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ - (u64)cpu_to_caam32(upper_32_bits(value))) -#else -#define cpu_to_caam_dma64(value) cpu_to_caam64(value) -#endif +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ /* * jr_outentry diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index d2207ac5ba19..5438552bc6d7 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -386,7 +386,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; struct skcipher_request req; - struct safexcel_inv_result result = { 0 }; + struct safexcel_inv_result result = {}; int ring = ctx->base.ring; memset(&req, 0, sizeof(struct skcipher_request)); diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 3f819399cd95..3980f946874f 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -419,7 +419,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; struct ahash_request req; - struct safexcel_inv_result result = { 0 }; + struct safexcel_inv_result result = {}; int ring = ctx->base.ring; memset(&req, 0, sizeof(struct ahash_request)); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 79791c690858..dff88838dce7 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, req_ctx->swinit = 0; } else { desc->ptr[1] = zero_entry; - /* Indicate next op is not the first. */ - req_ctx->first = 0; } + /* Indicate next op is not the first. */ + req_ctx->first = 0; /* HMAC key */ if (ctx->keylen) @@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, sg_count = edesc->src_nents ?: 1; if (is_sec1 && sg_count > 1) - sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length); + sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length); else sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, DMA_TO_DEVICE); @@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, t_alg->algt.alg.hash.final = ahash_final; t_alg->algt.alg.hash.finup = ahash_finup; t_alg->algt.alg.hash.digest = ahash_digest; - t_alg->algt.alg.hash.setkey = ahash_setkey; + if (!strncmp(alg->cra_name, "hmac", 4)) + t_alg->algt.alg.hash.setkey = ahash_setkey; t_alg->algt.alg.hash.import = ahash_import; t_alg->algt.alg.hash.export = ahash_export; diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 3600ff786646..557b93703532 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -201,8 +201,10 @@ static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) if (!dax_dev) return 0; - if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush) +#ifndef CONFIG_ARCH_HAS_PMEM_API + if (a == &dev_attr_write_cache.attr) return 0; +#endif return a->mode; } @@ -267,18 +269,23 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, } EXPORT_SYMBOL_GPL(dax_copy_from_iter); -void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, - size_t size) +#ifdef CONFIG_ARCH_HAS_PMEM_API +void arch_wb_cache_pmem(void *addr, size_t size); +void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) { - if (!dax_alive(dax_dev)) + if (unlikely(!dax_alive(dax_dev))) return; - if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)) + if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))) return; - if (dax_dev->ops->flush) - dax_dev->ops->flush(dax_dev, pgoff, addr, size); + arch_wb_cache_pmem(addr, size); } +#else +void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) +{ +} +#endif EXPORT_SYMBOL_GPL(dax_flush); void dax_write_cache(struct dax_device *dax_dev, bool wc) diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index c46387160976..c8f169bf2e27 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8]) return local_hash_64(input, 32); } -static struct dmi_system_id gsmi_dmi_table[] __initdata = { +static const struct dmi_system_id gsmi_dmi_table[] __initconst = { { .ident = "Google Board", .matches = { diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c index 8c1bf6dbdaa6..19bcbd10855b 100644 --- a/drivers/firmware/google/memconsole-x86-legacy.c +++ b/drivers/firmware/google/memconsole-x86-legacy.c @@ -126,7 +126,7 @@ static bool memconsole_ebda_init(void) return false; } -static struct dmi_system_id memconsole_dmi_table[] __initdata = { +static const struct dmi_system_id memconsole_dmi_table[] __initconst = { { .ident = "Google Board", .matches = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 12e71bbfd222..103635ab784c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -76,7 +76,7 @@ extern int amdgpu_modeset; extern int amdgpu_vram_limit; extern int amdgpu_vis_vram_limit; -extern unsigned amdgpu_gart_size; +extern int amdgpu_gart_size; extern int amdgpu_gtt_size; extern int amdgpu_moverate; extern int amdgpu_benchmarking; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index fb6e5dbd5a03..309f2419c6d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -155,7 +155,6 @@ static const struct kfd2kgd_calls kfd2kgd = { struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) { return (struct kfd2kgd_calls *)&kfd2kgd; - return (struct kfd2kgd_calls *)&kfd2kgd; } static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 269b835571eb..60d8bedb694d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1079,6 +1079,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, GFP_KERNEL); p->num_post_dep_syncobjs = 0; + if (!p->post_dep_syncobjs) + return -ENOMEM; + for (i = 0; i < num_deps; ++i) { p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); if (!p->post_dep_syncobjs[i]) @@ -1150,7 +1153,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; amdgpu_job_free_resources(job); - amdgpu_cs_parser_fini(p, 0, true); trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); @@ -1208,10 +1210,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; r = amdgpu_cs_submit(&parser, cs); - if (r) - goto out; - return 0; out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1a459ac63df4..e630d918fefc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1062,11 +1062,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); } - if (amdgpu_gart_size < 32) { + if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { /* gart size must be greater or equal to 32M */ dev_warn(adev->dev, "gart size (%d) too small\n", amdgpu_gart_size); - amdgpu_gart_size = 32; + amdgpu_gart_size = -1; } if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { @@ -2622,12 +2622,6 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, goto err; } - r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem); - if (r) { - DRM_ERROR("%p bind failed\n", bo->shadow); - goto err; - } - r = amdgpu_bo_restore_from_shadow(adev, ring, bo, NULL, fence, true); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e39ec981b11c..0f16986ec5bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -76,7 +76,7 @@ int amdgpu_vram_limit = 0; int amdgpu_vis_vram_limit = 0; -unsigned amdgpu_gart_size = 256; +int amdgpu_gart_size = -1; /* auto */ int amdgpu_gtt_size = -1; /* auto */ int amdgpu_moverate = -1; /* auto */ int amdgpu_benchmarking = 0; @@ -128,7 +128,7 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); -MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)"); +MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)"); module_param_named(gartsize, amdgpu_gart_size, uint, 0600); MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 94c1e2e8e34c..f4370081f6e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -57,18 +57,6 @@ */ /** - * amdgpu_gart_set_defaults - set the default gart_size - * - * @adev: amdgpu_device pointer - * - * Set the default gart_size based on parameters and available VRAM. - */ -void amdgpu_gart_set_defaults(struct amdgpu_device *adev) -{ - adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20; -} - -/** * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table * * @adev: amdgpu_device pointer diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index d4cce6936200..afbe803b1a13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -56,7 +56,6 @@ struct amdgpu_gart { const struct amdgpu_gart_funcs *gart_funcs; }; -void amdgpu_gart_set_defaults(struct amdgpu_device *adev); int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 9e05e257729f..0d15eb7d31d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -108,10 +108,10 @@ bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem) * * Allocate the address space for a node. */ -int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *tbo, - const struct ttm_place *place, - struct ttm_mem_reg *mem) +static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *tbo, + const struct ttm_place *place, + struct ttm_mem_reg *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr = man->priv; @@ -143,12 +143,8 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, fpfn, lpfn, mode); spin_unlock(&mgr->lock); - if (!r) { + if (!r) mem->start = node->start; - if (&tbo->mem == mem) - tbo->offset = (tbo->mem.start << PAGE_SHIFT) + - tbo->bdev->man[tbo->mem.mem_type].gpu_offset; - } return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 4bdd851f56d0..538e5f27d120 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -221,8 +221,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev) spin_lock_init(&adev->irq.lock); - /* Disable vblank irqs aggressively for power-saving */ - adev->ddev->vblank_disable_immediate = true; + if (!adev->enable_virtual_display) + /* Disable vblank irqs aggressively for power-saving */ + adev->ddev->vblank_disable_immediate = true; r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e7e899190bef..9e495da0bb03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -91,7 +91,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; - places[c].lpfn = 0; + if (flags & AMDGPU_GEM_CREATE_SHADOW) + places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT; + else + places[c].lpfn = 0; places[c].flags = TTM_PL_FLAG_TT; if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) places[c].flags |= TTM_PL_FLAG_WC | @@ -446,17 +449,16 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, if (bo->shadow) return 0; - bo->flags |= AMDGPU_GEM_CREATE_SHADOW; - memset(&placements, 0, - (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); - - amdgpu_ttm_placement_init(adev, &placement, - placements, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC); + memset(&placements, 0, sizeof(placements)); + amdgpu_ttm_placement_init(adev, &placement, placements, + AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_SHADOW); r = amdgpu_bo_create_restricted(adev, size, byte_align, true, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC, + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_SHADOW, NULL, &placement, bo->tbo.resv, 0, @@ -484,30 +486,28 @@ int amdgpu_bo_create(struct amdgpu_device *adev, { struct ttm_placement placement = {0}; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; int r; - memset(&placements, 0, - (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); + memset(&placements, 0, sizeof(placements)); + amdgpu_ttm_placement_init(adev, &placement, placements, + domain, parent_flags); - amdgpu_ttm_placement_init(adev, &placement, - placements, domain, flags); - - r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, - domain, flags, sg, &placement, - resv, init_value, bo_ptr); + r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain, + parent_flags, sg, &placement, resv, + init_value, bo_ptr); if (r) return r; - if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { - if (!resv) { - r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL); - WARN_ON(r != 0); - } + if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { + if (!resv) + WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, + NULL)); r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); if (!resv) - ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock); + reservation_object_unlock((*bo_ptr)->tbo.resv); if (r) amdgpu_bo_unref(bo_ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 6c5646b48d1a..5ce65280b396 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -170,6 +170,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned irq_type) { int r; + int sched_hw_submission = amdgpu_sched_hw_submission; + + /* Set the hw submission limit higher for KIQ because + * it's used for a number of gfx/compute tasks by both + * KFD and KGD which may have outstanding fences and + * it doesn't really use the gpu scheduler anyway; + * KIQ tasks get submitted directly to the ring. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + sched_hw_submission = max(sched_hw_submission, 256); if (ring->adev == NULL) { if (adev->num_rings >= AMDGPU_MAX_RINGS) @@ -178,8 +188,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->adev = adev; ring->idx = adev->num_rings++; adev->rings[ring->idx] = ring; - r = amdgpu_fence_driver_init_ring(ring, - amdgpu_sched_hw_submission); + r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission); if (r) return r; } @@ -218,8 +227,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - ring->ring_size = roundup_pow_of_two(max_dw * 4 * - amdgpu_sched_hw_submission); + ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); ring->buf_mask = (ring->ring_size / 4) - 1; ring->ptr_mask = ring->funcs->support_64bit_ptrs ? diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8b2c294f6f79..7ef6c28a34d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -761,35 +761,11 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) sg_free_table(ttm->sg); } -static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) -{ - struct amdgpu_ttm_tt *gtt = (void *)ttm; - uint64_t flags; - int r; - - spin_lock(>t->adev->gtt_list_lock); - flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem); - gtt->offset = (u64)mem->start << PAGE_SHIFT; - r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); - - if (r) { - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", - ttm->num_pages, gtt->offset); - goto error_gart_bind; - } - - list_add_tail(>t->list, >t->adev->gtt_list); -error_gart_bind: - spin_unlock(>t->adev->gtt_list_lock); - return r; - -} - static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct amdgpu_ttm_tt *gtt = (void*)ttm; + uint64_t flags; int r = 0; if (gtt->userptr) { @@ -809,9 +785,24 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; - if (amdgpu_gtt_mgr_is_allocated(bo_mem)) - r = amdgpu_ttm_do_bind(ttm, bo_mem); + if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) + return 0; + spin_lock(>t->adev->gtt_list_lock); + flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); + gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; + r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, + ttm->pages, gtt->ttm.dma_address, flags); + + if (r) { + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + ttm->num_pages, gtt->offset); + goto error_gart_bind; + } + + list_add_tail(>t->list, >t->adev->gtt_list); +error_gart_bind: + spin_unlock(>t->adev->gtt_list_lock); return r; } @@ -824,20 +815,39 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_tt *ttm = bo->ttm; + struct ttm_mem_reg tmp; + + struct ttm_placement placement; + struct ttm_place placements; int r; if (!ttm || amdgpu_ttm_is_bound(ttm)) return 0; - r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo, - NULL, bo_mem); - if (r) { - DRM_ERROR("Failed to allocate GTT address space (%d)\n", r); + tmp = bo->mem; + tmp.mm_node = NULL; + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements.fpfn = 0; + placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; + placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + + r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); + if (unlikely(r)) return r; - } - return amdgpu_ttm_do_bind(ttm, bo_mem); + r = ttm_bo_move_ttm(bo, true, false, &tmp); + if (unlikely(r)) + ttm_bo_mem_put(bo, &tmp); + else + bo->offset = (bo->mem.start << PAGE_SHIFT) + + bo->bdev->man[bo->mem.mem_type].gpu_offset; + + return r; } int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index f22a4758719d..43093bffa2cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -62,10 +62,6 @@ extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); -int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *tbo, - const struct ttm_place *place, - struct ttm_mem_reg *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b9a5a77eedaf..bd20ff018512 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -165,14 +165,6 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, unsigned i; int r; - if (parent->bo->shadow) { - struct amdgpu_bo *shadow = parent->bo->shadow; - - r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); - if (r) - return r; - } - if (use_cpu_for_update) { r = amdgpu_bo_kmap(parent->bo, NULL); if (r) @@ -1277,7 +1269,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, /* In the case of a mixed PT the PDE must point to it*/ if (p->adev->asic_type < CHIP_VEGA10 || nptes != AMDGPU_VM_PTE_COUNT(p->adev) || - p->func == amdgpu_vm_do_copy_ptes || + p->src || !(flags & AMDGPU_PTE_VALID)) { dst = amdgpu_bo_gpu_offset(entry->bo); @@ -1294,9 +1286,23 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, entry->addr = (dst | flags); if (use_cpu_update) { + /* In case a huge page is replaced with a system + * memory mapping, p->pages_addr != NULL and + * amdgpu_vm_cpu_set_ptes would try to translate dst + * through amdgpu_vm_map_gart. But dst is already a + * GPU address (of the page table). Disable + * amdgpu_vm_map_gart temporarily. + */ + dma_addr_t *tmp; + + tmp = p->pages_addr; + p->pages_addr = NULL; + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); + + p->pages_addr = tmp; } else { if (parent->bo->shadow) { pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); @@ -1610,7 +1616,6 @@ error_free: * * @adev: amdgpu_device pointer * @exclusive: fence we need to sync to - * @gtt_flags: flags as they are used for GTT * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @mapping: mapped range and flags to use for the update @@ -1624,7 +1629,6 @@ error_free: */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, struct dma_fence *exclusive, - uint64_t gtt_flags, dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, @@ -1679,11 +1683,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, } if (pages_addr) { - if (flags == gtt_flags) - src = adev->gart.table_addr + - (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8; - else - max_entries = min(max_entries, 16ull * 1024ull); + max_entries = min(max_entries, 16ull * 1024ull); addr = 0; } else if (flags & AMDGPU_PTE_VALID) { addr += adev->vm_manager.vram_base_offset; @@ -1728,10 +1728,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; - uint64_t gtt_flags, flags; struct ttm_mem_reg *mem; struct drm_mm_node *nodes; struct dma_fence *exclusive; + uint64_t flags; int r; if (clear || !bo_va->base.bo) { @@ -1751,15 +1751,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, exclusive = reservation_object_get_excl(bo->tbo.resv); } - if (bo) { + if (bo) flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); - gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) && - adev == amdgpu_ttm_adev(bo->tbo.bdev)) ? - flags : 0; - } else { + else flags = 0x0; - gtt_flags = ~0x0; - } spin_lock(&vm->status_lock); if (!list_empty(&bo_va->base.vm_status)) @@ -1767,8 +1762,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_split_mapping(adev, exclusive, - gtt_flags, pages_addr, vm, + r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, mapping, flags, nodes, &bo_va->last_pt_update); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index d228f5a99044..dbbe986f90f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -636,7 +636,194 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) NUM_BANKS(ADDR_SURF_2_BANK); for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]); - } else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) { + } else if (adev->asic_type == CHIP_OLAND) { + tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + NUM_BANKS(ADDR_SURF_8_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1); + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]); + } else if (adev->asic_type == CHIP_HAINAN) { tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 832e592fcd07..fc260c13b1da 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4579,9 +4579,9 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->compute_misc_reserved = 0x00000003; if (!(adev->flags & AMD_IS_APU)) { mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr - + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr - + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); } eop_base_addr = ring->eop_gpu_addr >> 8; mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; @@ -4768,8 +4768,8 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); } else { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); - ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; - ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); @@ -4792,8 +4792,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); - ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; - ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 4f2788b61a08..6c8040e616c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -124,7 +124,7 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp, field; + uint32_t tmp; /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL); @@ -143,9 +143,8 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); - field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 12b0c4cd7a5a..5be9c83dfcf7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -332,7 +332,24 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.visible_vram_size = adev->mc.aper_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_HAINAN: /* no MM engines */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_VERDE: /* UVD, VCE do not support GPUVM */ + case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */ + case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */ + case CHIP_OLAND: /* UVD, VCE do not support GPUVM */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v6_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index e42c1ad3af5e..eace9e7182c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -386,7 +386,27 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_TOPAZ: /* no MM engines */ + default: + adev->mc.gart_size = 256ULL << 20; + break; +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ + case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */ + case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ + case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ + case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ + adev->mc.gart_size = 1024ULL << 20; + break; +#endif + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v7_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 7ca2dae8237a..3b3326daf32b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -562,7 +562,26 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_POLARIS11: /* all engines support GPUVM */ + case CHIP_POLARIS10: /* all engines support GPUVM */ + case CHIP_POLARIS12: /* all engines support GPUVM */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ + case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ + case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ + case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v8_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2769c2b3b56e..d04d0b123212 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -499,7 +499,21 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_VEGA10: /* all engines support GPUVM */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_RAVEN: /* DCE SG support */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v9_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 4395a4f12149..74cb647da30e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -138,7 +138,7 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp, field; + uint32_t tmp; /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); @@ -157,9 +157,8 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); - field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index e4a8c2e52cb2..660b3fbade41 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -892,6 +892,8 @@ static int kfd_ioctl_get_tile_config(struct file *filep, int err = 0; dev = kfd_device_by_id(args->gpu_id); + if (!dev) + return -EINVAL; dev->kfd2kgd->get_tile_config(dev->kgd, &config); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 5979158c3f7b..944abfad39c1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -292,7 +292,10 @@ static int create_signal_event(struct file *devkfd, struct kfd_event *ev) { if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) { - pr_warn("Signal event wasn't created because limit was reached\n"); + if (!p->signal_event_limit_reached) { + pr_warn("Signal event wasn't created because limit was reached\n"); + p->signal_event_limit_reached = true; + } return -ENOMEM; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 681b639f5133..ed71ad40e8f7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -183,8 +183,8 @@ static void uninitialize(struct kernel_queue *kq) { if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) kq->mqd->destroy_mqd(kq->mqd, - NULL, - false, + kq->queue->mqd, + KFD_PREEMPT_TYPE_WAVEFRONT_RESET, QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, kq->queue->pipe, kq->queue->queue); @@ -210,6 +210,11 @@ static int acquire_packet_buffer(struct kernel_queue *kq, uint32_t wptr, rptr; unsigned int *queue_address; + /* When rptr == wptr, the buffer is empty. + * When rptr == wptr + 1, the buffer is full. + * It is always rptr that advances to the position of wptr, rather than + * the opposite. So we can only use up to queue_size_dwords - 1 dwords. + */ rptr = *kq->rptr_kernel; wptr = *kq->wptr_kernel; queue_address = (unsigned int *)kq->pq_kernel_addr; @@ -219,11 +224,10 @@ static int acquire_packet_buffer(struct kernel_queue *kq, pr_debug("wptr: %d\n", wptr); pr_debug("queue_address 0x%p\n", queue_address); - available_size = (rptr - 1 - wptr + queue_size_dwords) % + available_size = (rptr + queue_size_dwords - 1 - wptr) % queue_size_dwords; - if (packet_size_in_dwords >= queue_size_dwords || - packet_size_in_dwords >= available_size) { + if (packet_size_in_dwords > available_size) { /* * make sure calling functions know * acquire_packet_buffer() failed @@ -233,6 +237,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq, } if (wptr + packet_size_in_dwords >= queue_size_dwords) { + /* make sure after rolling back to position 0, there is + * still enough space. + */ + if (packet_size_in_dwords >= rptr) { + *buffer_ptr = NULL; + return -ENOMEM; + } + /* fill nops, roll back and start at position 0 */ while (wptr > 0) { queue_address[wptr] = kq->nop_packet; wptr = (wptr + 1) % queue_size_dwords; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b397ec726400..b87e96cee5fa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -521,6 +521,7 @@ struct kfd_process { struct list_head signal_event_pages; u32 next_nonsignal_event_id; size_t signal_event_count; + bool signal_event_limit_reached; }; /** diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 1cae95e2b13a..03bec765b03d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -143,7 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm, int num_queues = 0; struct queue *cur; - memset(&q_properties, 0, sizeof(struct queue_properties)); memcpy(&q_properties, properties, sizeof(struct queue_properties)); q = NULL; kq = NULL; diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h index ca93b5160ba6..3e606a761d0e 100644 --- a/drivers/gpu/drm/amd/include/vi_structs.h +++ b/drivers/gpu/drm/amd/include/vi_structs.h @@ -419,8 +419,8 @@ struct vi_mqd_allocation { struct vi_mqd mqd; uint32_t wptr_poll_mem; uint32_t rptr_report_mem; - uint32_t dyamic_cu_mask; - uint32_t dyamic_rb_mask; + uint32_t dynamic_cu_mask; + uint32_t dynamic_rb_mask; }; struct cz_mqd { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 9d71a259d97d..f8f02e70b8bc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -1558,7 +1558,8 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr) */ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, - uint32_t gfx_clock, PllSetting_t *current_gfxclk_level) + uint32_t gfx_clock, PllSetting_t *current_gfxclk_level, + uint32_t *acg_freq) { struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); @@ -1609,6 +1610,8 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, cpu_to_le16(dividers.usPll_ss_slew_frac); current_gfxclk_level->Did = (uint8_t)(dividers.ulDid); + *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */ + return 0; } @@ -1689,7 +1692,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) for (i = 0; i < dpm_table->count; i++) { result = vega10_populate_single_gfx_level(hwmgr, dpm_table->dpm_levels[i].value, - &(pp_table->GfxclkLevel[i])); + &(pp_table->GfxclkLevel[i]), + &(pp_table->AcgFreqTable[i])); if (result) return result; } @@ -1698,7 +1702,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) while (i < NUM_GFXCLK_DPM_LEVELS) { result = vega10_populate_single_gfx_level(hwmgr, dpm_table->dpm_levels[j].value, - &(pp_table->GfxclkLevel[i])); + &(pp_table->GfxclkLevel[i]), + &(pp_table->AcgFreqTable[i])); if (result) return result; i++; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h index f6d6c61f796a..2818c98ff5ca 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h @@ -315,10 +315,12 @@ typedef struct { uint8_t AcgEnable[NUM_GFXCLK_DPM_LEVELS]; GbVdroopTable_t AcgBtcGbVdroopTable; QuadraticInt_t AcgAvfsGb; - uint32_t Reserved[4]; + + /* ACG Frequency Table, in Mhz */ + uint32_t AcgFreqTable[NUM_GFXCLK_DPM_LEVELS]; /* Padding - ignore */ - uint32_t MmHubPadding[7]; /* SMU internal use */ + uint32_t MmHubPadding[3]; /* SMU internal use */ } PPTable_t; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 76347ff6d655..c49a6f22002f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -380,7 +380,8 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, entry->num_register_entries = 0; } - if (fw_type == UCODE_ID_RLC_G) + if ((fw_type == UCODE_ID_RLC_G) + || (fw_type == UCODE_ID_CP_MEC)) entry->flags = 1; else entry->flags = 0; diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 38cea6fb25a8..97c94f9683fa 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -205,17 +205,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity) { struct amd_sched_rq *rq = entity->rq; + int r; if (!amd_sched_entity_is_initialized(sched, entity)) return; - /** * The client will not queue more IBs during this fini, consume existing - * queued IBs + * queued IBs or discard them on SIGKILL */ - wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); - + if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) + r = -ERESTARTSYS; + else + r = wait_event_killable(sched->job_scheduled, + amd_sched_entity_is_idle(entity)); amd_sched_rq_remove_entity(rq, entity); + if (r) { + struct amd_sched_job *job; + + /* Park the kernel for a moment to make sure it isn't processing + * our enity. + */ + kthread_park(sched->thread); + kthread_unpark(sched->thread); + while (kfifo_out(&entity->job_queue, &job, sizeof(job))) + sched->ops->free_job(job); + + } kfifo_free(&entity->job_queue); } diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index db6aeec50b82..2e5e089dd912 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -319,7 +319,7 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc, DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", crtc->base.id, crtc->name); - states = kmalloc_array(total_planes, sizeof(*states), GFP_TEMPORARY); + states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL); if (!states) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 80e62f669321..0ef9011a1856 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -111,7 +111,7 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, void *data; int ret; - data = kmalloc(msg.len, GFP_TEMPORARY); + data = kmalloc(msg.len, GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_scdc_helper.c b/drivers/gpu/drm/drm_scdc_helper.c index 7d1b0f011d33..935653eb3616 100644 --- a/drivers/gpu/drm/drm_scdc_helper.c +++ b/drivers/gpu/drm/drm_scdc_helper.c @@ -102,7 +102,7 @@ ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset, void *data; int err; - data = kmalloc(1 + size, GFP_TEMPORARY); + data = kmalloc(1 + size, GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5a634594a6ce..57881167ccd2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -551,12 +551,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { void etnaviv_gem_free_object(struct drm_gem_object *obj) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct etnaviv_drm_private *priv = obj->dev->dev_private; struct etnaviv_vram_mapping *mapping, *tmp; /* object should not be active */ WARN_ON(is_active(etnaviv_obj)); + mutex_lock(&priv->gem_lock); list_del(&etnaviv_obj->gem_node); + mutex_unlock(&priv->gem_lock); list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, obj_node) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index a7ff2e4c00d2..46dfe0737f43 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -37,7 +37,7 @@ static struct etnaviv_gem_submit *submit_create(struct drm_device *dev, struct etnaviv_gem_submit *submit; size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit)); - submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (submit) { submit->dev = dev; submit->gpu = gpu; @@ -445,8 +445,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, cmdbuf->user_size = ALIGN(args->stream_size, 8); ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); - if (ret == 0) - cmdbuf = NULL; + if (ret) + goto out; + + cmdbuf = NULL; if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { /* diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 730b8d9db187..6be5b53c3b27 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -14,6 +14,7 @@ #include <linux/clk.h> #include <linux/component.h> #include <linux/iopoll.h> +#include <linux/irq.h> #include <linux/mfd/syscon.h> #include <linux/of_device.h> #include <linux/of_gpio.h> diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index b1f7299600f0..e651a58c18cf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -168,23 +168,19 @@ static struct drm_driver exynos_drm_driver = { static int exynos_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + struct exynos_drm_private *private = drm_dev->dev_private; if (pm_runtime_suspended(dev) || !drm_dev) return 0; - drm_connector_list_iter_begin(drm_dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - int old_dpms = connector->dpms; - - if (connector->funcs->dpms) - connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); - - /* Set the old mode back to the connector for resume */ - connector->dpms = old_dpms; + drm_kms_helper_poll_disable(drm_dev); + exynos_drm_fbdev_suspend(drm_dev); + private->suspend_state = drm_atomic_helper_suspend(drm_dev); + if (IS_ERR(private->suspend_state)) { + exynos_drm_fbdev_resume(drm_dev); + drm_kms_helper_poll_enable(drm_dev); + return PTR_ERR(private->suspend_state); } - drm_connector_list_iter_end(&conn_iter); return 0; } @@ -192,22 +188,14 @@ static int exynos_drm_suspend(struct device *dev) static int exynos_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + struct exynos_drm_private *private = drm_dev->dev_private; if (pm_runtime_suspended(dev) || !drm_dev) return 0; - drm_connector_list_iter_begin(drm_dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->funcs->dpms) { - int dpms = connector->dpms; - - connector->dpms = DRM_MODE_DPMS_OFF; - connector->funcs->dpms(connector, dpms); - } - } - drm_connector_list_iter_end(&conn_iter); + drm_atomic_helper_resume(drm_dev, private->suspend_state); + exynos_drm_fbdev_resume(drm_dev); + drm_kms_helper_poll_enable(drm_dev); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index cf131c2aa23e..f8bae4cb4823 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -202,6 +202,7 @@ struct drm_exynos_file_private { */ struct exynos_drm_private { struct drm_fb_helper *fb_helper; + struct drm_atomic_state *suspend_state; struct device *dma_dev; void *mapping; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index c3a068409b48..dfb66ecf417b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -18,6 +18,8 @@ #include <drm/drm_crtc_helper.h> #include <drm/exynos_drm.h> +#include <linux/console.h> + #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_fbdev.h" @@ -285,3 +287,21 @@ void exynos_drm_output_poll_changed(struct drm_device *dev) drm_fb_helper_hotplug_event(fb_helper); } + +void exynos_drm_fbdev_suspend(struct drm_device *dev) +{ + struct exynos_drm_private *private = dev->dev_private; + + console_lock(); + drm_fb_helper_set_suspend(private->fb_helper, 1); + console_unlock(); +} + +void exynos_drm_fbdev_resume(struct drm_device *dev) +{ + struct exynos_drm_private *private = dev->dev_private; + + console_lock(); + drm_fb_helper_set_suspend(private->fb_helper, 0); + console_unlock(); +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h index 330eef87f718..645d1bb7f665 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h @@ -21,6 +21,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev); void exynos_drm_fbdev_fini(struct drm_device *dev); void exynos_drm_fbdev_restore_mode(struct drm_device *dev); void exynos_drm_output_poll_changed(struct drm_device *dev); +void exynos_drm_fbdev_suspend(struct drm_device *drm); +void exynos_drm_fbdev_resume(struct drm_device *drm); #else @@ -39,6 +41,14 @@ static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev) #define exynos_drm_output_poll_changed (NULL) +static inline void exynos_drm_fbdev_suspend(struct drm_device *drm) +{ +} + +static inline void exynos_drm_fbdev_resume(struct drm_device *drm) +{ +} + #endif #endif diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 214fa5e51963..0109ff40b1db 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -944,22 +944,27 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct drm_display_mode *m; + struct drm_connector_list_iter conn_iter; int mode_ok; drm_mode_set_crtcinfo(adjusted_mode, 0); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (connector->encoder == encoder) break; } + if (connector) + drm_connector_get(connector); + drm_connector_list_iter_end(&conn_iter); - if (connector->encoder != encoder) + if (!connector) return true; mode_ok = hdmi_mode_valid(connector, adjusted_mode); if (mode_ok == MODE_OK) - return true; + goto cleanup; /* * Find the most suitable mode and copy it to adjusted_mode. @@ -979,6 +984,9 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder, } } +cleanup: + drm_connector_put(connector); + return true; } diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 40af17ec6312..ff3154fe6588 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -197,78 +197,65 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - unsigned int bar_index = - (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8; u32 new = *(u32 *)(p_data); bool lo = IS_ALIGNED(offset, 8); u64 size; int ret = 0; bool mmio_enabled = vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; + struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar; - if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX)) - return -EINVAL; - + /* + * Power-up software can determine how much address + * space the device requires by writing a value of + * all 1's to the register and then reading the value + * back. The device will return 0's in all don't-care + * address bits. + */ if (new == 0xffffffff) { - /* - * Power-up software can determine how much address - * space the device requires by writing a value of - * all 1's to the register and then reading the value - * back. The device will return 0's in all don't-care - * address bits. - */ - size = vgpu->cfg_space.bar[bar_index].size; - if (lo) { - new = rounddown(new, size); - } else { - u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)]; - /* for 32bit mode bar it returns all-0 in upper 32 - * bit, for 64bit mode bar it will calculate the - * size with lower 32bit and return the corresponding - * value + switch (offset) { + case PCI_BASE_ADDRESS_0: + case PCI_BASE_ADDRESS_1: + size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1); + intel_vgpu_write_pci_bar(vgpu, offset, + size >> (lo ? 0 : 32), lo); + /* + * Untrap the BAR, since guest hasn't configured a + * valid GPA */ - if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) - new &= (~(size-1)) >> 32; - else - new = 0; - } - /* - * Unmapp & untrap the BAR, since guest hasn't configured a - * valid GPA - */ - switch (bar_index) { - case INTEL_GVT_PCI_BAR_GTTMMIO: ret = trap_gttmmio(vgpu, false); break; - case INTEL_GVT_PCI_BAR_APERTURE: + case PCI_BASE_ADDRESS_2: + case PCI_BASE_ADDRESS_3: + size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1); + intel_vgpu_write_pci_bar(vgpu, offset, + size >> (lo ? 0 : 32), lo); ret = map_aperture(vgpu, false); break; + default: + /* Unimplemented BARs */ + intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false); } - intel_vgpu_write_pci_bar(vgpu, offset, new, lo); } else { - /* - * Unmapp & untrap the old BAR first, since guest has - * re-configured the BAR - */ - switch (bar_index) { - case INTEL_GVT_PCI_BAR_GTTMMIO: - ret = trap_gttmmio(vgpu, false); + switch (offset) { + case PCI_BASE_ADDRESS_0: + case PCI_BASE_ADDRESS_1: + /* + * Untrap the old BAR first, since guest has + * re-configured the BAR + */ + trap_gttmmio(vgpu, false); + intel_vgpu_write_pci_bar(vgpu, offset, new, lo); + ret = trap_gttmmio(vgpu, mmio_enabled); break; - case INTEL_GVT_PCI_BAR_APERTURE: - ret = map_aperture(vgpu, false); + case PCI_BASE_ADDRESS_2: + case PCI_BASE_ADDRESS_3: + map_aperture(vgpu, false); + intel_vgpu_write_pci_bar(vgpu, offset, new, lo); + ret = map_aperture(vgpu, mmio_enabled); break; - } - intel_vgpu_write_pci_bar(vgpu, offset, new, lo); - /* Track the new BAR */ - if (mmio_enabled) { - switch (bar_index) { - case INTEL_GVT_PCI_BAR_GTTMMIO: - ret = trap_gttmmio(vgpu, true); - break; - case INTEL_GVT_PCI_BAR_APERTURE: - ret = map_aperture(vgpu, true); - break; - } + default: + intel_vgpu_write_pci_bar(vgpu, offset, new, lo); } } return ret; @@ -299,10 +286,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, } switch (rounddown(offset, 4)) { - case PCI_BASE_ADDRESS_0: - case PCI_BASE_ADDRESS_1: - case PCI_BASE_ADDRESS_2: - case PCI_BASE_ADDRESS_3: + case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: if (WARN_ON(!IS_ALIGNED(offset, 4))) return -EINVAL; return emulate_pci_bar_write(vgpu, offset, p_data, bytes); @@ -344,7 +328,6 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; u16 *gmch_ctl; - int i; memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, info->cfg_space_size); @@ -371,13 +354,13 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, */ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8); memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); - for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { - vgpu->cfg_space.bar[i].size = pci_resource_len( - gvt->dev_priv->drm.pdev, i * 2); - vgpu->cfg_space.bar[i].tracked = false; - } + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = + pci_resource_len(gvt->dev_priv->drm.pdev, 0); + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = + pci_resource_len(gvt->dev_priv->drm.pdev, 2); } /** diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 57317715977f..19404c96eeb1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2540,7 +2540,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, if (n_pages > ARRAY_SIZE(stack_pages)) { /* Too big for stack -- allocate temporary array instead */ - pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_TEMPORARY); + pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); if (!pages) return NULL; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 50d5e24f91a9..92437f455b43 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -293,7 +293,7 @@ static int eb_create(struct i915_execbuffer *eb) * as possible to perform the allocation and warn * if it fails. */ - flags = GFP_TEMPORARY; + flags = GFP_KERNEL; if (size > 1) flags |= __GFP_NORETRY | __GFP_NOWARN; @@ -1515,7 +1515,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); size = nreloc * sizeof(*relocs); - relocs = kvmalloc_array(size, 1, GFP_TEMPORARY); + relocs = kvmalloc_array(size, 1, GFP_KERNEL); if (!relocs) { kvfree(relocs); err = -ENOMEM; @@ -2077,7 +2077,7 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args, return ERR_PTR(-EFAULT); fences = kvmalloc_array(args->num_cliprects, sizeof(*fences), - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); if (!fences) return ERR_PTR(-ENOMEM); @@ -2463,9 +2463,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, /* Copy in the exec list from userland */ exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list), - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); exec2_list = kvmalloc_array(args->buffer_count + 1, sz, - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); if (exec_list == NULL || exec2_list == NULL) { DRM_DEBUG("Failed to allocate exec list for %d buffers\n", args->buffer_count); @@ -2543,7 +2543,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, /* Allocate an extra slot for use by the command parser */ exec2_list = kvmalloc_array(args->buffer_count + 1, sz, - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); if (exec2_list == NULL) { DRM_DEBUG("Failed to allocate exec list for %d buffers\n", args->buffer_count); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0d5a988b3867..e2410eb5d96e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3231,7 +3231,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, /* Allocate a temporary list of source pages for random access. */ page_addr_list = kvmalloc_array(n_pages, sizeof(dma_addr_t), - GFP_TEMPORARY); + GFP_KERNEL); if (!page_addr_list) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 23fd18bd1b56..709efe2357ea 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ret = -ENOMEM; pinned = 0; - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY); + pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (pvec != NULL) { struct mm_struct *mm = obj->userptr.mm->mm; unsigned int flags = 0; @@ -643,7 +643,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) if (mm == current->mm) { pvec = kvmalloc_array(num_pages, sizeof(struct page *), - GFP_TEMPORARY | + GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); if (pvec) /* defer to worker if malloc fails */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index ed5a1eb839ad..0c779671fe2d 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -787,16 +787,16 @@ int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, */ ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; ebuf->buf = kmalloc(ebuf->size, - GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); + GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); if (ebuf->buf == NULL) { ebuf->size = PAGE_SIZE; - ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); + ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); } if (ebuf->buf == NULL) { ebuf->size = 128; - ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); + ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); } if (ebuf->buf == NULL) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e21ce9c18b6e..b63893eeca73 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -839,7 +839,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, pipe); int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; - bool in_vbl = true; unsigned long irqflags; if (WARN_ON(!mode->crtc_clock)) { @@ -922,8 +921,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); - in_vbl = position >= vbl_start && position < vbl_end; - /* * While in vblank, position will be negative * counting up towards 0 at vbl_end. And outside diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f17275519484..00cd17c76fdc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14030,7 +14030,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, if (mode_cmd->handles[i] != mode_cmd->handles[0]) { DRM_DEBUG_KMS("bad plane %d handle\n", i); - return -EINVAL; + goto err; } stride_alignment = intel_fb_stride_alignment(fb, i); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index f0c11aec5ea5..7442891762be 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -892,8 +892,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder, struct intel_crtc_state *old_crtc_state, struct drm_connector_state *old_conn_state) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -903,15 +901,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder, intel_panel_disable_backlight(old_conn_state); /* - * Disable Device ready before the port shutdown in order - * to avoid split screen - */ - if (IS_BROXTON(dev_priv)) { - for_each_dsi_port(port, intel_dsi->ports) - I915_WRITE(MIPI_DEVICE_READY(port), 0); - } - - /* * According to the spec we should send SHUTDOWN before * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing * has shown that the v3 sequence works for v2 VBTs too diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a17b1de7d7e0..3b1c5d783ee7 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1699,6 +1699,8 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) if (!panel->backlight.max) return -ENODEV; + panel->backlight.min = get_backlight_min_vbt(connector); + val = bxt_get_backlight(connector); val = intel_panel_compute_brightness(connector, val); panel->backlight.level = clamp(val, panel->backlight.min, @@ -1735,6 +1737,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) if (!panel->backlight.max) return -ENODEV; + panel->backlight.min = get_backlight_min_vbt(connector); + val = bxt_get_backlight(connector); val = intel_panel_compute_brightness(connector, val); panel->backlight.level = clamp(val, panel->backlight.min, diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c index d044bf9a6feb..222c511bea49 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.c +++ b/drivers/gpu/drm/i915/selftests/i915_random.c @@ -62,7 +62,7 @@ unsigned int *i915_random_order(unsigned int count, struct rnd_state *state) { unsigned int *order, i; - order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); + order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); if (!order) return order; diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c index 7276194c04f7..828904b7d468 100644 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c @@ -117,12 +117,12 @@ static int igt_random_insert_remove(void *arg) mock_engine_reset(engine); - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); if (!waiters) goto out_engines; bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto out_waiters; @@ -187,12 +187,12 @@ static int igt_insert_complete(void *arg) mock_engine_reset(engine); - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); if (!waiters) goto out_engines; bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto out_waiters; @@ -368,7 +368,7 @@ static int igt_wakeup(void *arg) mock_engine_reset(engine); - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); if (!waiters) goto out_engines; diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 2d0fef2cfca6..3cac22eb47ce 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -127,7 +127,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri return 0; valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid), - GFP_TEMPORARY); + GFP_KERNEL); if (!valid) return -ENOMEM; diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c index 7b12a68c3b54..a78c4b483e8d 100644 --- a/drivers/gpu/drm/lib/drm_random.c +++ b/drivers/gpu/drm/lib/drm_random.c @@ -28,7 +28,7 @@ unsigned int *drm_random_order(unsigned int count, struct rnd_state *state) { unsigned int *order, i; - order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); + order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); if (!order) return order; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 8a75c0bd8a78..5d0a75d4b249 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -40,7 +40,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, if (sz > SIZE_MAX) return NULL; - submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (!submit) return NULL; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c index 4a57defc99b3..1399d923d446 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c @@ -171,7 +171,7 @@ nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend) return 0; } -static struct dmi_system_id gpio_reset_ids[] = { +static const struct dmi_system_id gpio_reset_ids[] = { { .ident = "Apple Macbook 10,1", .matches = { diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 14c5613b4388..afbf50d0c08f 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -509,23 +509,25 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, .y2 = qfb->base.height }; - if (!old_state->fb) { - qxl_io_log(qdev, - "create primary fb: %dx%d,%d,%d\n", - bo->surf.width, bo->surf.height, - bo->surf.stride, bo->surf.format); + if (old_state->fb) { + qfb_old = to_qxl_framebuffer(old_state->fb); + bo_old = gem_to_qxl_bo(qfb_old->obj); + } else { + bo_old = NULL; + } - qxl_io_create_primary(qdev, 0, bo); - bo->is_primary = true; + if (bo == bo_old) return; - } else { - qfb_old = to_qxl_framebuffer(old_state->fb); - bo_old = gem_to_qxl_bo(qfb_old->obj); + if (bo_old && bo_old->is_primary) { + qxl_io_destroy_primary(qdev); bo_old->is_primary = false; } - bo->is_primary = true; + if (!bo->is_primary) { + qxl_io_create_primary(qdev, 0, bo); + bo->is_primary = true; + } qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); } @@ -534,13 +536,15 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane, { struct qxl_device *qdev = plane->dev->dev_private; - if (old_state->fb) - { struct qxl_framebuffer *qfb = + if (old_state->fb) { + struct qxl_framebuffer *qfb = to_qxl_framebuffer(old_state->fb); struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); - qxl_io_destroy_primary(qdev); - bo->is_primary = false; + if (bo->is_primary) { + qxl_io_destroy_primary(qdev); + bo->is_primary = false; + } } } @@ -698,14 +702,15 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane, struct drm_gem_object *obj; struct qxl_bo *user_bo; - if (!plane->state->fb) { - /* we never executed prepare_fb, so there's nothing to + if (!old_state->fb) { + /* + * we never executed prepare_fb, so there's nothing to * unpin. */ return; } - obj = to_qxl_framebuffer(plane->state->fb)->obj; + obj = to_qxl_framebuffer(old_state->fb)->obj; user_bo = gem_to_qxl_bo(obj); qxl_bo_unpin(user_bo); } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 997131d58c7f..ffc10cadcf34 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1663,7 +1663,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, radeon_agp_suspend(rdev); pci_save_state(dev->pdev); - if (freeze && rdev->family >= CHIP_CEDAR) { + if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) { rdev->asic->asic_reset(rdev, true); pci_restore_state(dev->pdev); } else if (suspend) { diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index dfdd858eda0a..86eb4c185a28 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -1627,7 +1627,7 @@ static int igt_topdown(void *ignored) goto err; bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto err_nodes; @@ -1741,7 +1741,7 @@ static int igt_bottomup(void *ignored) goto err; bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto err_nodes; diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 06f05302ee75..882d85db9053 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig @@ -26,7 +26,7 @@ config DRM_SUN4I_HDMI_CEC bool "Allwinner A10 HDMI CEC Support" depends on DRM_SUN4I_HDMI select CEC_CORE - depends on CEC_PIN + select CEC_PIN help Choose this option if you have an Allwinner SoC with an HDMI controller and want to use CEC. diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h index 1457750988da..a1f8cba251a2 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h @@ -15,7 +15,7 @@ #include <drm/drm_connector.h> #include <drm/drm_encoder.h> -#include <media/cec.h> +#include <media/cec-pin.h> #define SUN4I_HDMI_CTRL_REG 0x004 #define SUN4I_HDMI_CTRL_ENABLE BIT(31) diff --git a/drivers/gpu/drm/tegra/trace.h b/drivers/gpu/drm/tegra/trace.h index e9b7cdad5c4c..5a1ab4046e92 100644 --- a/drivers/gpu/drm/tegra/trace.h +++ b/drivers/gpu/drm/tegra/trace.h @@ -63,6 +63,6 @@ DEFINE_EVENT(register_access, sor_readl, /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/tegra #define TRACE_INCLUDE_FILE trace #include <trace/define_trace.h> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cba11f13d994..180ce6296416 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -109,8 +109,8 @@ static ssize_t ttm_bo_global_show(struct kobject *kobj, struct ttm_bo_global *glob = container_of(kobj, struct ttm_bo_global, kobj); - return snprintf(buffer, PAGE_SIZE, "%lu\n", - (unsigned long) atomic_read(&glob->bo_count)); + return snprintf(buffer, PAGE_SIZE, "%d\n", + atomic_read(&glob->bo_count)); } static struct attribute *ttm_bo_global_attrs[] = { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d0459b392e5e..c934ad5b3903 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -469,6 +469,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ + atomic_inc(&bo->glob->bo_count); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 579bdf93be43..14a94d90c028 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata pm_dmi_table[] = { +static const struct dmi_system_id pm_dmi_table[] __initconst = { { enable_cap_knobs, "IBM Active Energy Manager", { diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 76c34f4fde13..5c677ba44014 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -1247,7 +1247,7 @@ static int applesmc_dmi_match(const struct dmi_system_id *id) * Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". * So we need to put "Apple MacBook Pro" before "Apple MacBook". */ -static __initdata struct dmi_system_id applesmc_whitelist[] = { +static const struct dmi_system_id applesmc_whitelist[] __initconst = { { applesmc_dmi_match, "Apple MacBook Air", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 3189246302a6..c7c9e95e58a8 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -890,7 +890,7 @@ static const struct i8k_config_data i8k_config_data[] = { }, }; -static struct dmi_system_id i8k_dmi_table[] __initdata = { +static const struct dmi_system_id i8k_dmi_table[] __initconst = { { .ident = "Dell Inspiron", .matches = { @@ -1013,7 +1013,7 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call. * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121 */ -static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = { +static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst = { { .ident = "Dell Studio XPS 8000", .matches = { diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index edc0cfb6fc1a..c06dce2c1da7 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -336,6 +336,16 @@ config I2C_POWERMAC comment "I2C system bus drivers (mostly embedded / system-on-chip)" +config I2C_ALTERA + tristate "Altera Soft IP I2C" + depends on (ARCH_SOCFPGA || NIOS2) && OF + help + If you say yes to this option, support will be included for the + Altera Soft IP I2C interfaces on SoCFPGA and Nios2 architectures. + + This driver can also be built as a module. If so, the module + will be called i2c-altera. + config I2C_ASPEED tristate "Aspeed I2C Controller" depends on ARCH_ASPEED || COMPILE_TEST @@ -935,6 +945,16 @@ config I2C_STM32F4 This driver can also be built as module. If so, the module will be called i2c-stm32f4. +config I2C_STM32F7 + tristate "STMicroelectronics STM32F7 I2C support" + depends on ARCH_STM32 || COMPILE_TEST + help + Enable this option to add support for STM32 I2C controller embedded + in STM32F7 SoCs. + + This driver can also be built as module. If so, the module + will be called i2c-stm32f7. + config I2C_STU300 tristate "ST Microelectronics DDC I2C interface" depends on MACH_U300 diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 562daf738048..47f3ac9a695a 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o # Embedded system I2C/SMBus host controller drivers +obj-$(CONFIG_I2C_ALTERA) += i2c-altera.o obj-$(CONFIG_I2C_ASPEED) += i2c-aspeed.o obj-$(CONFIG_I2C_AT91) += i2c-at91.o obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o @@ -93,6 +94,7 @@ obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o obj-$(CONFIG_I2C_SPRD) += i2c-sprd.o obj-$(CONFIG_I2C_ST) += i2c-st.o obj-$(CONFIG_I2C_STM32F4) += i2c-stm32f4.o +obj-$(CONFIG_I2C_STM32F7) += i2c-stm32f7.o obj-$(CONFIG_I2C_STU300) += i2c-stu300.o obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c new file mode 100644 index 000000000000..f5e1941e65b5 --- /dev/null +++ b/drivers/i2c/busses/i2c-altera.c @@ -0,0 +1,511 @@ +/* + * Copyright Intel Corporation (C) 2017. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + * + * Based on the i2c-axxia.c driver. + */ +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/iopoll.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> + +#define ALTR_I2C_TFR_CMD 0x00 /* Transfer Command register */ +#define ALTR_I2C_TFR_CMD_STA BIT(9) /* send START before byte */ +#define ALTR_I2C_TFR_CMD_STO BIT(8) /* send STOP after byte */ +#define ALTR_I2C_TFR_CMD_RW_D BIT(0) /* Direction of transfer */ +#define ALTR_I2C_RX_DATA 0x04 /* RX data FIFO register */ +#define ALTR_I2C_CTRL 0x08 /* Control register */ +#define ALTR_I2C_CTRL_RXT_SHFT 4 /* RX FIFO Threshold */ +#define ALTR_I2C_CTRL_TCT_SHFT 2 /* TFER CMD FIFO Threshold */ +#define ALTR_I2C_CTRL_BSPEED BIT(1) /* Bus Speed (1=Fast) */ +#define ALTR_I2C_CTRL_EN BIT(0) /* Enable Core (1=Enable) */ +#define ALTR_I2C_ISER 0x0C /* Interrupt Status Enable register */ +#define ALTR_I2C_ISER_RXOF_EN BIT(4) /* Enable RX OVERFLOW IRQ */ +#define ALTR_I2C_ISER_ARB_EN BIT(3) /* Enable ARB LOST IRQ */ +#define ALTR_I2C_ISER_NACK_EN BIT(2) /* Enable NACK DET IRQ */ +#define ALTR_I2C_ISER_RXRDY_EN BIT(1) /* Enable RX Ready IRQ */ +#define ALTR_I2C_ISER_TXRDY_EN BIT(0) /* Enable TX Ready IRQ */ +#define ALTR_I2C_ISR 0x10 /* Interrupt Status register */ +#define ALTR_I2C_ISR_RXOF BIT(4) /* RX OVERFLOW IRQ */ +#define ALTR_I2C_ISR_ARB BIT(3) /* ARB LOST IRQ */ +#define ALTR_I2C_ISR_NACK BIT(2) /* NACK DET IRQ */ +#define ALTR_I2C_ISR_RXRDY BIT(1) /* RX Ready IRQ */ +#define ALTR_I2C_ISR_TXRDY BIT(0) /* TX Ready IRQ */ +#define ALTR_I2C_STATUS 0x14 /* Status register */ +#define ALTR_I2C_STAT_CORE BIT(0) /* Core Status (0=idle) */ +#define ALTR_I2C_TC_FIFO_LVL 0x18 /* Transfer FIFO LVL register */ +#define ALTR_I2C_RX_FIFO_LVL 0x1C /* Receive FIFO LVL register */ +#define ALTR_I2C_SCL_LOW 0x20 /* SCL low count register */ +#define ALTR_I2C_SCL_HIGH 0x24 /* SCL high count register */ +#define ALTR_I2C_SDA_HOLD 0x28 /* SDA hold count register */ + +#define ALTR_I2C_ALL_IRQ (ALTR_I2C_ISR_RXOF | ALTR_I2C_ISR_ARB | \ + ALTR_I2C_ISR_NACK | ALTR_I2C_ISR_RXRDY | \ + ALTR_I2C_ISR_TXRDY) + +#define ALTR_I2C_THRESHOLD 0 /* IRQ Threshold at 1 element */ +#define ALTR_I2C_DFLT_FIFO_SZ 4 +#define ALTR_I2C_TIMEOUT 100000 /* 100ms */ +#define ALTR_I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) + +/** + * altr_i2c_dev - I2C device context + * @base: pointer to register struct + * @msg: pointer to current message + * @msg_len: number of bytes transferred in msg + * @msg_err: error code for completed message + * @msg_complete: xfer completion object + * @dev: device reference + * @adapter: core i2c abstraction + * @i2c_clk: clock reference for i2c input clock + * @bus_clk_rate: current i2c bus clock rate + * @buf: ptr to msg buffer for easier use. + * @fifo_size: size of the FIFO passed in. + * @isr_mask: cached copy of local ISR enables. + * @isr_status: cached copy of local ISR status. + * @lock: spinlock for IRQ synchronization. + */ +struct altr_i2c_dev { + void __iomem *base; + struct i2c_msg *msg; + size_t msg_len; + int msg_err; + struct completion msg_complete; + struct device *dev; + struct i2c_adapter adapter; + struct clk *i2c_clk; + u32 bus_clk_rate; + u8 *buf; + u32 fifo_size; + u32 isr_mask; + u32 isr_status; + spinlock_t lock; /* IRQ synchronization */ +}; + +static void +altr_i2c_int_enable(struct altr_i2c_dev *idev, u32 mask, bool enable) +{ + unsigned long flags; + u32 int_en; + + spin_lock_irqsave(&idev->lock, flags); + + int_en = readl(idev->base + ALTR_I2C_ISER); + if (enable) + idev->isr_mask = int_en | mask; + else + idev->isr_mask = int_en & ~mask; + + writel(idev->isr_mask, idev->base + ALTR_I2C_ISER); + + spin_unlock_irqrestore(&idev->lock, flags); +} + +static void altr_i2c_int_clear(struct altr_i2c_dev *idev, u32 mask) +{ + u32 int_en = readl(idev->base + ALTR_I2C_ISR); + + writel(int_en | mask, idev->base + ALTR_I2C_ISR); +} + +static void altr_i2c_core_disable(struct altr_i2c_dev *idev) +{ + u32 tmp = readl(idev->base + ALTR_I2C_CTRL); + + writel(tmp & ~ALTR_I2C_CTRL_EN, idev->base + ALTR_I2C_CTRL); +} + +static void altr_i2c_core_enable(struct altr_i2c_dev *idev) +{ + u32 tmp = readl(idev->base + ALTR_I2C_CTRL); + + writel(tmp | ALTR_I2C_CTRL_EN, idev->base + ALTR_I2C_CTRL); +} + +static void altr_i2c_reset(struct altr_i2c_dev *idev) +{ + altr_i2c_core_disable(idev); + altr_i2c_core_enable(idev); +} + +static inline void altr_i2c_stop(struct altr_i2c_dev *idev) +{ + writel(ALTR_I2C_TFR_CMD_STO, idev->base + ALTR_I2C_TFR_CMD); +} + +static void altr_i2c_init(struct altr_i2c_dev *idev) +{ + u32 divisor = clk_get_rate(idev->i2c_clk) / idev->bus_clk_rate; + u32 clk_mhz = clk_get_rate(idev->i2c_clk) / 1000000; + u32 tmp = (ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_RXT_SHFT) | + (ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_TCT_SHFT); + u32 t_high, t_low; + + if (idev->bus_clk_rate <= 100000) { + tmp &= ~ALTR_I2C_CTRL_BSPEED; + /* Standard mode SCL 50/50 */ + t_high = divisor * 1 / 2; + t_low = divisor * 1 / 2; + } else { + tmp |= ALTR_I2C_CTRL_BSPEED; + /* Fast mode SCL 33/66 */ + t_high = divisor * 1 / 3; + t_low = divisor * 2 / 3; + } + writel(tmp, idev->base + ALTR_I2C_CTRL); + + dev_dbg(idev->dev, "rate=%uHz per_clk=%uMHz -> ratio=1:%u\n", + idev->bus_clk_rate, clk_mhz, divisor); + + /* Reset controller */ + altr_i2c_reset(idev); + + /* SCL High Time */ + writel(t_high, idev->base + ALTR_I2C_SCL_HIGH); + /* SCL Low Time */ + writel(t_low, idev->base + ALTR_I2C_SCL_LOW); + /* SDA Hold Time, 300ns */ + writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD); + + /* Mask all master interrupt bits */ + altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false); +} + +/** + * altr_i2c_transfer - On the last byte to be transmitted, send + * a Stop bit on the last byte. + */ +static void altr_i2c_transfer(struct altr_i2c_dev *idev, u32 data) +{ + /* On the last byte to be transmitted, send STOP */ + if (idev->msg_len == 1) + data |= ALTR_I2C_TFR_CMD_STO; + if (idev->msg_len > 0) + writel(data, idev->base + ALTR_I2C_TFR_CMD); +} + +/** + * altr_i2c_empty_rx_fifo - Fetch data from RX FIFO until end of + * transfer. Send a Stop bit on the last byte. + */ +static void altr_i2c_empty_rx_fifo(struct altr_i2c_dev *idev) +{ + size_t rx_fifo_avail = readl(idev->base + ALTR_I2C_RX_FIFO_LVL); + int bytes_to_transfer = min(rx_fifo_avail, idev->msg_len); + + while (bytes_to_transfer-- > 0) { + *idev->buf++ = readl(idev->base + ALTR_I2C_RX_DATA); + idev->msg_len--; + altr_i2c_transfer(idev, 0); + } +} + +/** + * altr_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer. + * @return: Number of bytes left to transfer. + */ +static int altr_i2c_fill_tx_fifo(struct altr_i2c_dev *idev) +{ + size_t tx_fifo_avail = idev->fifo_size - readl(idev->base + + ALTR_I2C_TC_FIFO_LVL); + int bytes_to_transfer = min(tx_fifo_avail, idev->msg_len); + int ret = idev->msg_len - bytes_to_transfer; + + while (bytes_to_transfer-- > 0) { + altr_i2c_transfer(idev, *idev->buf++); + idev->msg_len--; + } + + return ret; +} + +static irqreturn_t altr_i2c_isr_quick(int irq, void *_dev) +{ + struct altr_i2c_dev *idev = _dev; + irqreturn_t ret = IRQ_HANDLED; + + /* Read IRQ status but only interested in Enabled IRQs. */ + idev->isr_status = readl(idev->base + ALTR_I2C_ISR) & idev->isr_mask; + if (idev->isr_status) + ret = IRQ_WAKE_THREAD; + + return ret; +} + +static irqreturn_t altr_i2c_isr(int irq, void *_dev) +{ + int ret; + bool read, finish = false; + struct altr_i2c_dev *idev = _dev; + u32 status = idev->isr_status; + + if (!idev->msg) { + dev_warn(idev->dev, "unexpected interrupt\n"); + altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); + return IRQ_HANDLED; + } + read = (idev->msg->flags & I2C_M_RD) != 0; + + /* handle Lost Arbitration */ + if (unlikely(status & ALTR_I2C_ISR_ARB)) { + altr_i2c_int_clear(idev, ALTR_I2C_ISR_ARB); + idev->msg_err = -EAGAIN; + finish = true; + } else if (unlikely(status & ALTR_I2C_ISR_NACK)) { + dev_dbg(idev->dev, "Could not get ACK\n"); + idev->msg_err = -ENXIO; + altr_i2c_int_clear(idev, ALTR_I2C_ISR_NACK); + altr_i2c_stop(idev); + finish = true; + } else if (read && unlikely(status & ALTR_I2C_ISR_RXOF)) { + /* handle RX FIFO Overflow */ + altr_i2c_empty_rx_fifo(idev); + altr_i2c_int_clear(idev, ALTR_I2C_ISR_RXRDY); + altr_i2c_stop(idev); + dev_err(idev->dev, "RX FIFO Overflow\n"); + finish = true; + } else if (read && (status & ALTR_I2C_ISR_RXRDY)) { + /* RX FIFO needs service? */ + altr_i2c_empty_rx_fifo(idev); + altr_i2c_int_clear(idev, ALTR_I2C_ISR_RXRDY); + if (!idev->msg_len) + finish = true; + } else if (!read && (status & ALTR_I2C_ISR_TXRDY)) { + /* TX FIFO needs service? */ + altr_i2c_int_clear(idev, ALTR_I2C_ISR_TXRDY); + if (idev->msg_len > 0) + altr_i2c_fill_tx_fifo(idev); + else + finish = true; + } else { + dev_warn(idev->dev, "Unexpected interrupt: 0x%x\n", status); + altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); + } + + if (finish) { + /* Wait for the Core to finish */ + ret = readl_poll_timeout_atomic(idev->base + ALTR_I2C_STATUS, + status, + !(status & ALTR_I2C_STAT_CORE), + 1, ALTR_I2C_TIMEOUT); + if (ret) + dev_err(idev->dev, "message timeout\n"); + altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false); + altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); + complete(&idev->msg_complete); + dev_dbg(idev->dev, "Message Complete\n"); + } + + return IRQ_HANDLED; +} + +static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) +{ + u32 imask = ALTR_I2C_ISR_RXOF | ALTR_I2C_ISR_ARB | ALTR_I2C_ISR_NACK; + unsigned long time_left; + u32 value; + u8 addr = i2c_8bit_addr_from_msg(msg); + + idev->msg = msg; + idev->msg_len = msg->len; + idev->buf = msg->buf; + idev->msg_err = 0; + reinit_completion(&idev->msg_complete); + altr_i2c_core_enable(idev); + + /* Make sure RX FIFO is empty */ + do { + readl(idev->base + ALTR_I2C_RX_DATA); + } while (readl(idev->base + ALTR_I2C_RX_FIFO_LVL)); + + writel(ALTR_I2C_TFR_CMD_STA | addr, idev->base + ALTR_I2C_TFR_CMD); + + if ((msg->flags & I2C_M_RD) != 0) { + imask |= ALTR_I2C_ISER_RXOF_EN | ALTR_I2C_ISER_RXRDY_EN; + altr_i2c_int_enable(idev, imask, true); + /* write the first byte to start the RX */ + altr_i2c_transfer(idev, 0); + } else { + imask |= ALTR_I2C_ISR_TXRDY; + altr_i2c_int_enable(idev, imask, true); + altr_i2c_fill_tx_fifo(idev); + } + + time_left = wait_for_completion_timeout(&idev->msg_complete, + ALTR_I2C_XFER_TIMEOUT); + altr_i2c_int_enable(idev, imask, false); + + value = readl(idev->base + ALTR_I2C_STATUS) & ALTR_I2C_STAT_CORE; + if (value) + dev_err(idev->dev, "Core Status not IDLE...\n"); + + if (time_left == 0) { + idev->msg_err = -ETIMEDOUT; + dev_dbg(idev->dev, "Transaction timed out.\n"); + } + + altr_i2c_core_disable(idev); + + return idev->msg_err; +} + +static int +altr_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + struct altr_i2c_dev *idev = i2c_get_adapdata(adap); + int i, ret; + + for (i = 0; i < num; i++) { + ret = altr_i2c_xfer_msg(idev, msgs++); + if (ret) + return ret; + } + return num; +} + +static u32 altr_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm altr_i2c_algo = { + .master_xfer = altr_i2c_xfer, + .functionality = altr_i2c_func, +}; + +static int altr_i2c_probe(struct platform_device *pdev) +{ + struct altr_i2c_dev *idev = NULL; + struct resource *res; + int irq, ret; + u32 val; + + idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); + if (!idev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + idev->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(idev->base)) + return PTR_ERR(idev->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "missing interrupt resource\n"); + return irq; + } + + idev->i2c_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(idev->i2c_clk)) { + dev_err(&pdev->dev, "missing clock\n"); + return PTR_ERR(idev->i2c_clk); + } + + idev->dev = &pdev->dev; + init_completion(&idev->msg_complete); + spin_lock_init(&idev->lock); + + val = device_property_read_u32(idev->dev, "fifo-size", + &idev->fifo_size); + if (val) { + dev_err(&pdev->dev, "FIFO size set to default of %d\n", + ALTR_I2C_DFLT_FIFO_SZ); + idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ; + } + + val = device_property_read_u32(idev->dev, "clock-frequency", + &idev->bus_clk_rate); + if (val) { + dev_err(&pdev->dev, "Default to 100kHz\n"); + idev->bus_clk_rate = 100000; /* default clock rate */ + } + + if (idev->bus_clk_rate > 400000) { + dev_err(&pdev->dev, "invalid clock-frequency %d\n", + idev->bus_clk_rate); + return -EINVAL; + } + + ret = devm_request_threaded_irq(&pdev->dev, irq, altr_i2c_isr_quick, + altr_i2c_isr, IRQF_ONESHOT, + pdev->name, idev); + if (ret) { + dev_err(&pdev->dev, "failed to claim IRQ %d\n", irq); + return ret; + } + + ret = clk_prepare_enable(idev->i2c_clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable clock\n"); + return ret; + } + + altr_i2c_init(idev); + + i2c_set_adapdata(&idev->adapter, idev); + strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name)); + idev->adapter.owner = THIS_MODULE; + idev->adapter.algo = &altr_i2c_algo; + idev->adapter.dev.parent = &pdev->dev; + idev->adapter.dev.of_node = pdev->dev.of_node; + + platform_set_drvdata(pdev, idev); + + ret = i2c_add_adapter(&idev->adapter); + if (ret) { + clk_disable_unprepare(idev->i2c_clk); + return ret; + } + dev_info(&pdev->dev, "Altera SoftIP I2C Probe Complete\n"); + + return 0; +} + +static int altr_i2c_remove(struct platform_device *pdev) +{ + struct altr_i2c_dev *idev = platform_get_drvdata(pdev); + + clk_disable_unprepare(idev->i2c_clk); + i2c_del_adapter(&idev->adapter); + + return 0; +} + +/* Match table for of_platform binding */ +static const struct of_device_id altr_i2c_of_match[] = { + { .compatible = "altr,softip-i2c-v1.0" }, + {}, +}; +MODULE_DEVICE_TABLE(of, altr_i2c_of_match); + +static struct platform_driver altr_i2c_driver = { + .probe = altr_i2c_probe, + .remove = altr_i2c_remove, + .driver = { + .name = "altera-i2c", + .of_match_table = altr_i2c_of_match, + }, +}; + +module_platform_driver(altr_i2c_driver); + +MODULE_DESCRIPTION("Altera Soft IP I2C bus driver"); +MODULE_AUTHOR("Thor Thayer <thor.thayer@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h new file mode 100644 index 000000000000..dab51761f8c5 --- /dev/null +++ b/drivers/i2c/busses/i2c-stm32.h @@ -0,0 +1,20 @@ +/* + * i2c-stm32.h + * + * Copyright (C) M'boumba Cedric Madianga 2017 + * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _I2C_STM32_H +#define _I2C_STM32_H + +enum stm32_i2c_speed { + STM32_I2C_SPEED_STANDARD, /* 100 kHz */ + STM32_I2C_SPEED_FAST, /* 400 kHz */ + STM32_I2C_SPEED_FAST_PLUS, /* 1 MHz */ + STM32_I2C_SPEED_END, +}; + +#endif /* _I2C_STM32_H */ diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c index aceb6f788564..4ec108496f15 100644 --- a/drivers/i2c/busses/i2c-stm32f4.c +++ b/drivers/i2c/busses/i2c-stm32f4.c @@ -27,6 +27,8 @@ #include <linux/platform_device.h> #include <linux/reset.h> +#include "i2c-stm32.h" + /* STM32F4 I2C offset registers */ #define STM32F4_I2C_CR1 0x00 #define STM32F4_I2C_CR2 0x04 @@ -90,12 +92,6 @@ #define STM32F4_I2C_MAX_FREQ 46U #define HZ_TO_MHZ 1000000 -enum stm32f4_i2c_speed { - STM32F4_I2C_SPEED_STANDARD, /* 100 kHz */ - STM32F4_I2C_SPEED_FAST, /* 400 kHz */ - STM32F4_I2C_SPEED_END, -}; - /** * struct stm32f4_i2c_msg - client specific data * @addr: 8-bit slave addr, including r/w bit @@ -159,7 +155,7 @@ static int stm32f4_i2c_set_periph_clk_freq(struct stm32f4_i2c_dev *i2c_dev) i2c_dev->parent_rate = clk_get_rate(i2c_dev->clk); freq = DIV_ROUND_UP(i2c_dev->parent_rate, HZ_TO_MHZ); - if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) { + if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) { /* * To reach 100 kHz, the parent clk frequency should be between * a minimum value of 2 MHz and a maximum value of 46 MHz due @@ -216,7 +212,7 @@ static void stm32f4_i2c_set_rise_time(struct stm32f4_i2c_dev *i2c_dev) * is not higher than 46 MHz . As a result trise is at most 4 bits wide * and so fits into the TRISE bits [5:0]. */ - if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) + if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) trise = freq + 1; else trise = freq * 3 / 10 + 1; @@ -230,7 +226,7 @@ static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev) u32 val; u32 ccr = 0; - if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) { + if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) { /* * In standard mode: * t_scl_high = t_scl_low = CCR * I2C parent clk period @@ -808,10 +804,10 @@ static int stm32f4_i2c_probe(struct platform_device *pdev) udelay(2); reset_control_deassert(rst); - i2c_dev->speed = STM32F4_I2C_SPEED_STANDARD; + i2c_dev->speed = STM32_I2C_SPEED_STANDARD; ret = of_property_read_u32(np, "clock-frequency", &clk_rate); if (!ret && clk_rate >= 400000) - i2c_dev->speed = STM32F4_I2C_SPEED_FAST; + i2c_dev->speed = STM32_I2C_SPEED_FAST; i2c_dev->dev = &pdev->dev; diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c new file mode 100644 index 000000000000..47c67b0ca896 --- /dev/null +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -0,0 +1,972 @@ +/* + * Driver for STMicroelectronics STM32F7 I2C controller + * + * This I2C controller is described in the STM32F75xxx and STM32F74xxx Soc + * reference manual. + * Please see below a link to the documentation: + * http://www.st.com/resource/en/reference_manual/dm00124865.pdf + * + * Copyright (C) M'boumba Cedric Madianga 2017 + * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> + * + * This driver is based on i2c-stm32f4.c + * + * License terms: GNU General Public License (GPL), version 2 + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include "i2c-stm32.h" + +/* STM32F7 I2C registers */ +#define STM32F7_I2C_CR1 0x00 +#define STM32F7_I2C_CR2 0x04 +#define STM32F7_I2C_TIMINGR 0x10 +#define STM32F7_I2C_ISR 0x18 +#define STM32F7_I2C_ICR 0x1C +#define STM32F7_I2C_RXDR 0x24 +#define STM32F7_I2C_TXDR 0x28 + +/* STM32F7 I2C control 1 */ +#define STM32F7_I2C_CR1_ANFOFF BIT(12) +#define STM32F7_I2C_CR1_ERRIE BIT(7) +#define STM32F7_I2C_CR1_TCIE BIT(6) +#define STM32F7_I2C_CR1_STOPIE BIT(5) +#define STM32F7_I2C_CR1_NACKIE BIT(4) +#define STM32F7_I2C_CR1_ADDRIE BIT(3) +#define STM32F7_I2C_CR1_RXIE BIT(2) +#define STM32F7_I2C_CR1_TXIE BIT(1) +#define STM32F7_I2C_CR1_PE BIT(0) +#define STM32F7_I2C_ALL_IRQ_MASK (STM32F7_I2C_CR1_ERRIE \ + | STM32F7_I2C_CR1_TCIE \ + | STM32F7_I2C_CR1_STOPIE \ + | STM32F7_I2C_CR1_NACKIE \ + | STM32F7_I2C_CR1_RXIE \ + | STM32F7_I2C_CR1_TXIE) + +/* STM32F7 I2C control 2 */ +#define STM32F7_I2C_CR2_RELOAD BIT(24) +#define STM32F7_I2C_CR2_NBYTES_MASK GENMASK(23, 16) +#define STM32F7_I2C_CR2_NBYTES(n) (((n) & 0xff) << 16) +#define STM32F7_I2C_CR2_NACK BIT(15) +#define STM32F7_I2C_CR2_STOP BIT(14) +#define STM32F7_I2C_CR2_START BIT(13) +#define STM32F7_I2C_CR2_RD_WRN BIT(10) +#define STM32F7_I2C_CR2_SADD7_MASK GENMASK(7, 1) +#define STM32F7_I2C_CR2_SADD7(n) (((n) & 0x7f) << 1) + +/* STM32F7 I2C Interrupt Status */ +#define STM32F7_I2C_ISR_BUSY BIT(15) +#define STM32F7_I2C_ISR_ARLO BIT(9) +#define STM32F7_I2C_ISR_BERR BIT(8) +#define STM32F7_I2C_ISR_TCR BIT(7) +#define STM32F7_I2C_ISR_TC BIT(6) +#define STM32F7_I2C_ISR_STOPF BIT(5) +#define STM32F7_I2C_ISR_NACKF BIT(4) +#define STM32F7_I2C_ISR_RXNE BIT(2) +#define STM32F7_I2C_ISR_TXIS BIT(1) + +/* STM32F7 I2C Interrupt Clear */ +#define STM32F7_I2C_ICR_ARLOCF BIT(9) +#define STM32F7_I2C_ICR_BERRCF BIT(8) +#define STM32F7_I2C_ICR_STOPCF BIT(5) +#define STM32F7_I2C_ICR_NACKCF BIT(4) + +/* STM32F7 I2C Timing */ +#define STM32F7_I2C_TIMINGR_PRESC(n) (((n) & 0xf) << 28) +#define STM32F7_I2C_TIMINGR_SCLDEL(n) (((n) & 0xf) << 20) +#define STM32F7_I2C_TIMINGR_SDADEL(n) (((n) & 0xf) << 16) +#define STM32F7_I2C_TIMINGR_SCLH(n) (((n) & 0xff) << 8) +#define STM32F7_I2C_TIMINGR_SCLL(n) ((n) & 0xff) + +#define STM32F7_I2C_MAX_LEN 0xff + +#define STM32F7_I2C_DNF_DEFAULT 0 +#define STM32F7_I2C_DNF_MAX 16 + +#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1 +#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */ +#define STM32F7_I2C_ANALOG_FILTER_DELAY_MAX 260 /* ns */ + +#define STM32F7_I2C_RISE_TIME_DEFAULT 25 /* ns */ +#define STM32F7_I2C_FALL_TIME_DEFAULT 10 /* ns */ + +#define STM32F7_PRESC_MAX BIT(4) +#define STM32F7_SCLDEL_MAX BIT(4) +#define STM32F7_SDADEL_MAX BIT(4) +#define STM32F7_SCLH_MAX BIT(8) +#define STM32F7_SCLL_MAX BIT(8) + +/** + * struct stm32f7_i2c_spec - private i2c specification timing + * @rate: I2C bus speed (Hz) + * @rate_min: 80% of I2C bus speed (Hz) + * @rate_max: 100% of I2C bus speed (Hz) + * @fall_max: Max fall time of both SDA and SCL signals (ns) + * @rise_max: Max rise time of both SDA and SCL signals (ns) + * @hddat_min: Min data hold time (ns) + * @vddat_max: Max data valid time (ns) + * @sudat_min: Min data setup time (ns) + * @l_min: Min low period of the SCL clock (ns) + * @h_min: Min high period of the SCL clock (ns) + */ +struct stm32f7_i2c_spec { + u32 rate; + u32 rate_min; + u32 rate_max; + u32 fall_max; + u32 rise_max; + u32 hddat_min; + u32 vddat_max; + u32 sudat_min; + u32 l_min; + u32 h_min; +}; + +/** + * struct stm32f7_i2c_setup - private I2C timing setup parameters + * @speed: I2C speed mode (standard, Fast Plus) + * @speed_freq: I2C speed frequency (Hz) + * @clock_src: I2C clock source frequency (Hz) + * @rise_time: Rise time (ns) + * @fall_time: Fall time (ns) + * @dnf: Digital filter coefficient (0-16) + * @analog_filter: Analog filter delay (On/Off) + */ +struct stm32f7_i2c_setup { + enum stm32_i2c_speed speed; + u32 speed_freq; + u32 clock_src; + u32 rise_time; + u32 fall_time; + u8 dnf; + bool analog_filter; +}; + +/** + * struct stm32f7_i2c_timings - private I2C output parameters + * @prec: Prescaler value + * @scldel: Data setup time + * @sdadel: Data hold time + * @sclh: SCL high period (master mode) + * @sclh: SCL low period (master mode) + */ +struct stm32f7_i2c_timings { + struct list_head node; + u8 presc; + u8 scldel; + u8 sdadel; + u8 sclh; + u8 scll; +}; + +/** + * struct stm32f7_i2c_msg - client specific data + * @addr: 8-bit slave addr, including r/w bit + * @count: number of bytes to be transferred + * @buf: data buffer + * @result: result of the transfer + * @stop: last I2C msg to be sent, i.e. STOP to be generated + */ +struct stm32f7_i2c_msg { + u8 addr; + u32 count; + u8 *buf; + int result; + bool stop; +}; + +/** + * struct stm32f7_i2c_dev - private data of the controller + * @adap: I2C adapter for this controller + * @dev: device for this controller + * @base: virtual memory area + * @complete: completion of I2C message + * @clk: hw i2c clock + * @speed: I2C clock frequency of the controller. Standard, Fast or Fast+ + * @msg: Pointer to data to be written + * @msg_num: number of I2C messages to be executed + * @msg_id: message identifiant + * @f7_msg: customized i2c msg for driver usage + * @setup: I2C timing input setup + * @timing: I2C computed timings + */ +struct stm32f7_i2c_dev { + struct i2c_adapter adap; + struct device *dev; + void __iomem *base; + struct completion complete; + struct clk *clk; + int speed; + struct i2c_msg *msg; + unsigned int msg_num; + unsigned int msg_id; + struct stm32f7_i2c_msg f7_msg; + struct stm32f7_i2c_setup *setup; + struct stm32f7_i2c_timings timing; +}; + +/** + * All these values are coming from I2C Specification, Version 6.0, 4th of + * April 2014. + * + * Table10. Characteristics of the SDA and SCL bus lines for Standard, Fast, + * and Fast-mode Plus I2C-bus devices + */ +static struct stm32f7_i2c_spec i2c_specs[] = { + [STM32_I2C_SPEED_STANDARD] = { + .rate = 100000, + .rate_min = 80000, + .rate_max = 100000, + .fall_max = 300, + .rise_max = 1000, + .hddat_min = 0, + .vddat_max = 3450, + .sudat_min = 250, + .l_min = 4700, + .h_min = 4000, + }, + [STM32_I2C_SPEED_FAST] = { + .rate = 400000, + .rate_min = 320000, + .rate_max = 400000, + .fall_max = 300, + .rise_max = 300, + .hddat_min = 0, + .vddat_max = 900, + .sudat_min = 100, + .l_min = 1300, + .h_min = 600, + }, + [STM32_I2C_SPEED_FAST_PLUS] = { + .rate = 1000000, + .rate_min = 800000, + .rate_max = 1000000, + .fall_max = 100, + .rise_max = 120, + .hddat_min = 0, + .vddat_max = 450, + .sudat_min = 50, + .l_min = 500, + .h_min = 260, + }, +}; + +struct stm32f7_i2c_setup stm32f7_setup = { + .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, + .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, + .dnf = STM32F7_I2C_DNF_DEFAULT, + .analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE, +}; + +static inline void stm32f7_i2c_set_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) | mask, reg); +} + +static inline void stm32f7_i2c_clr_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) & ~mask, reg); +} + +static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, + struct stm32f7_i2c_setup *setup, + struct stm32f7_i2c_timings *output) +{ + u32 p_prev = STM32F7_PRESC_MAX; + u32 i2cclk = DIV_ROUND_CLOSEST(NSEC_PER_SEC, + setup->clock_src); + u32 i2cbus = DIV_ROUND_CLOSEST(NSEC_PER_SEC, + setup->speed_freq); + u32 clk_error_prev = i2cbus; + u32 tsync; + u32 af_delay_min, af_delay_max; + u32 dnf_delay; + u32 clk_min, clk_max; + int sdadel_min, sdadel_max; + int scldel_min; + struct stm32f7_i2c_timings *v, *_v, *s; + struct list_head solutions; + u16 p, l, a, h; + int ret = 0; + + if (setup->speed >= STM32_I2C_SPEED_END) { + dev_err(i2c_dev->dev, "speed out of bound {%d/%d}\n", + setup->speed, STM32_I2C_SPEED_END - 1); + return -EINVAL; + } + + if ((setup->rise_time > i2c_specs[setup->speed].rise_max) || + (setup->fall_time > i2c_specs[setup->speed].fall_max)) { + dev_err(i2c_dev->dev, + "timings out of bound Rise{%d>%d}/Fall{%d>%d}\n", + setup->rise_time, i2c_specs[setup->speed].rise_max, + setup->fall_time, i2c_specs[setup->speed].fall_max); + return -EINVAL; + } + + if (setup->dnf > STM32F7_I2C_DNF_MAX) { + dev_err(i2c_dev->dev, + "DNF out of bound %d/%d\n", + setup->dnf, STM32F7_I2C_DNF_MAX); + return -EINVAL; + } + + if (setup->speed_freq > i2c_specs[setup->speed].rate) { + dev_err(i2c_dev->dev, "ERROR: Freq {%d/%d}\n", + setup->speed_freq, i2c_specs[setup->speed].rate); + return -EINVAL; + } + + /* Analog and Digital Filters */ + af_delay_min = + (setup->analog_filter ? + STM32F7_I2C_ANALOG_FILTER_DELAY_MIN : 0); + af_delay_max = + (setup->analog_filter ? + STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0); + dnf_delay = setup->dnf * i2cclk; + + sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min - + af_delay_min - (setup->dnf + 3) * i2cclk; + + sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time - + af_delay_max - (setup->dnf + 4) * i2cclk; + + scldel_min = setup->rise_time + i2c_specs[setup->speed].sudat_min; + + if (sdadel_min < 0) + sdadel_min = 0; + if (sdadel_max < 0) + sdadel_max = 0; + + dev_dbg(i2c_dev->dev, "SDADEL(min/max): %i/%i, SCLDEL(Min): %i\n", + sdadel_min, sdadel_max, scldel_min); + + INIT_LIST_HEAD(&solutions); + /* Compute possible values for PRESC, SCLDEL and SDADEL */ + for (p = 0; p < STM32F7_PRESC_MAX; p++) { + for (l = 0; l < STM32F7_SCLDEL_MAX; l++) { + u32 scldel = (l + 1) * (p + 1) * i2cclk; + + if (scldel < scldel_min) + continue; + + for (a = 0; a < STM32F7_SDADEL_MAX; a++) { + u32 sdadel = (a * (p + 1) + 1) * i2cclk; + + if (((sdadel >= sdadel_min) && + (sdadel <= sdadel_max)) && + (p != p_prev)) { + v = kmalloc(sizeof(*v), GFP_KERNEL); + if (!v) { + ret = -ENOMEM; + goto exit; + } + + v->presc = p; + v->scldel = l; + v->sdadel = a; + p_prev = p; + + list_add_tail(&v->node, + &solutions); + } + } + } + } + + if (list_empty(&solutions)) { + dev_err(i2c_dev->dev, "no Prescaler solution\n"); + ret = -EPERM; + goto exit; + } + + tsync = af_delay_min + dnf_delay + (2 * i2cclk); + s = NULL; + clk_max = NSEC_PER_SEC / i2c_specs[setup->speed].rate_min; + clk_min = NSEC_PER_SEC / i2c_specs[setup->speed].rate_max; + + /* + * Among Prescaler possibilities discovered above figures out SCL Low + * and High Period. Provided: + * - SCL Low Period has to be higher than SCL Clock Low Period + * defined by I2C Specification. I2C Clock has to be lower than + * (SCL Low Period - Analog/Digital filters) / 4. + * - SCL High Period has to be lower than SCL Clock High Period + * defined by I2C Specification + * - I2C Clock has to be lower than SCL High Period + */ + list_for_each_entry(v, &solutions, node) { + u32 prescaler = (v->presc + 1) * i2cclk; + + for (l = 0; l < STM32F7_SCLL_MAX; l++) { + u32 tscl_l = (l + 1) * prescaler + tsync; + + if ((tscl_l < i2c_specs[setup->speed].l_min) || + (i2cclk >= + ((tscl_l - af_delay_min - dnf_delay) / 4))) { + continue; + } + + for (h = 0; h < STM32F7_SCLH_MAX; h++) { + u32 tscl_h = (h + 1) * prescaler + tsync; + u32 tscl = tscl_l + tscl_h + + setup->rise_time + setup->fall_time; + + if ((tscl >= clk_min) && (tscl <= clk_max) && + (tscl_h >= i2c_specs[setup->speed].h_min) && + (i2cclk < tscl_h)) { + int clk_error = tscl - i2cbus; + + if (clk_error < 0) + clk_error = -clk_error; + + if (clk_error < clk_error_prev) { + clk_error_prev = clk_error; + v->scll = l; + v->sclh = h; + s = v; + } + } + } + } + } + + if (!s) { + dev_err(i2c_dev->dev, "no solution at all\n"); + ret = -EPERM; + goto exit; + } + + output->presc = s->presc; + output->scldel = s->scldel; + output->sdadel = s->sdadel; + output->scll = s->scll; + output->sclh = s->sclh; + + dev_dbg(i2c_dev->dev, + "Presc: %i, scldel: %i, sdadel: %i, scll: %i, sclh: %i\n", + output->presc, + output->scldel, output->sdadel, + output->scll, output->sclh); + +exit: + /* Release list and memory */ + list_for_each_entry_safe(v, _v, &solutions, node) { + list_del(&v->node); + kfree(v); + } + + return ret; +} + +static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev, + struct stm32f7_i2c_setup *setup) +{ + int ret = 0; + + setup->speed = i2c_dev->speed; + setup->speed_freq = i2c_specs[setup->speed].rate; + setup->clock_src = clk_get_rate(i2c_dev->clk); + + if (!setup->clock_src) { + dev_err(i2c_dev->dev, "clock rate is 0\n"); + return -EINVAL; + } + + do { + ret = stm32f7_i2c_compute_timing(i2c_dev, setup, + &i2c_dev->timing); + if (ret) { + dev_err(i2c_dev->dev, + "failed to compute I2C timings.\n"); + if (i2c_dev->speed > STM32_I2C_SPEED_STANDARD) { + i2c_dev->speed--; + setup->speed = i2c_dev->speed; + setup->speed_freq = + i2c_specs[setup->speed].rate; + dev_warn(i2c_dev->dev, + "downgrade I2C Speed Freq to (%i)\n", + i2c_specs[setup->speed].rate); + } else { + break; + } + } + } while (ret); + + if (ret) { + dev_err(i2c_dev->dev, "Impossible to compute I2C timings.\n"); + return ret; + } + + dev_dbg(i2c_dev->dev, "I2C Speed(%i), Freq(%i), Clk Source(%i)\n", + setup->speed, setup->speed_freq, setup->clock_src); + dev_dbg(i2c_dev->dev, "I2C Rise(%i) and Fall(%i) Time\n", + setup->rise_time, setup->fall_time); + dev_dbg(i2c_dev->dev, "I2C Analog Filter(%s), DNF(%i)\n", + (setup->analog_filter ? "On" : "Off"), setup->dnf); + + return 0; +} + +static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_timings *t = &i2c_dev->timing; + u32 timing = 0; + + /* Timing settings */ + timing |= STM32F7_I2C_TIMINGR_PRESC(t->presc); + timing |= STM32F7_I2C_TIMINGR_SCLDEL(t->scldel); + timing |= STM32F7_I2C_TIMINGR_SDADEL(t->sdadel); + timing |= STM32F7_I2C_TIMINGR_SCLH(t->sclh); + timing |= STM32F7_I2C_TIMINGR_SCLL(t->scll); + writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR); + + /* Enable I2C */ + if (i2c_dev->setup->analog_filter) + stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_ANFOFF); + else + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_ANFOFF); + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_PE); +} + +static void stm32f7_i2c_write_tx_data(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + + if (f7_msg->count) { + writeb_relaxed(*f7_msg->buf++, base + STM32F7_I2C_TXDR); + f7_msg->count--; + } +} + +static void stm32f7_i2c_read_rx_data(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + + if (f7_msg->count) { + *f7_msg->buf++ = readb_relaxed(base + STM32F7_I2C_RXDR); + f7_msg->count--; + } +} + +static void stm32f7_i2c_reload(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + u32 cr2; + + cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2); + + cr2 &= ~STM32F7_I2C_CR2_NBYTES_MASK; + if (f7_msg->count > STM32F7_I2C_MAX_LEN) { + cr2 |= STM32F7_I2C_CR2_NBYTES(STM32F7_I2C_MAX_LEN); + } else { + cr2 &= ~STM32F7_I2C_CR2_RELOAD; + cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count); + } + + writel_relaxed(cr2, i2c_dev->base + STM32F7_I2C_CR2); +} + +static int stm32f7_i2c_wait_free_bus(struct stm32f7_i2c_dev *i2c_dev) +{ + u32 status; + int ret; + + ret = readl_relaxed_poll_timeout(i2c_dev->base + STM32F7_I2C_ISR, + status, + !(status & STM32F7_I2C_ISR_BUSY), + 10, 1000); + if (ret) { + dev_dbg(i2c_dev->dev, "bus busy\n"); + ret = -EBUSY; + } + + return ret; +} + +static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev, + struct i2c_msg *msg) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + u32 cr1, cr2; + + f7_msg->addr = msg->addr; + f7_msg->buf = msg->buf; + f7_msg->count = msg->len; + f7_msg->result = 0; + f7_msg->stop = (i2c_dev->msg_id >= i2c_dev->msg_num - 1); + + reinit_completion(&i2c_dev->complete); + + cr1 = readl_relaxed(base + STM32F7_I2C_CR1); + cr2 = readl_relaxed(base + STM32F7_I2C_CR2); + + /* Set transfer direction */ + cr2 &= ~STM32F7_I2C_CR2_RD_WRN; + if (msg->flags & I2C_M_RD) + cr2 |= STM32F7_I2C_CR2_RD_WRN; + + /* Set slave address */ + cr2 &= ~STM32F7_I2C_CR2_SADD7_MASK; + cr2 |= STM32F7_I2C_CR2_SADD7(f7_msg->addr); + + /* Set nb bytes to transfer and reload if needed */ + cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK | STM32F7_I2C_CR2_RELOAD); + if (f7_msg->count > STM32F7_I2C_MAX_LEN) { + cr2 |= STM32F7_I2C_CR2_NBYTES(STM32F7_I2C_MAX_LEN); + cr2 |= STM32F7_I2C_CR2_RELOAD; + } else { + cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count); + } + + /* Enable NACK, STOP, error and transfer complete interrupts */ + cr1 |= STM32F7_I2C_CR1_ERRIE | STM32F7_I2C_CR1_TCIE | + STM32F7_I2C_CR1_STOPIE | STM32F7_I2C_CR1_NACKIE; + + /* Clear TX/RX interrupt */ + cr1 &= ~(STM32F7_I2C_CR1_RXIE | STM32F7_I2C_CR1_TXIE); + + /* Enable RX/TX interrupt according to msg direction */ + if (msg->flags & I2C_M_RD) + cr1 |= STM32F7_I2C_CR1_RXIE; + else + cr1 |= STM32F7_I2C_CR1_TXIE; + + /* Configure Start/Repeated Start */ + cr2 |= STM32F7_I2C_CR2_START; + + /* Write configurations registers */ + writel_relaxed(cr1, base + STM32F7_I2C_CR1); + writel_relaxed(cr2, base + STM32F7_I2C_CR2); +} + +static void stm32f7_i2c_disable_irq(struct stm32f7_i2c_dev *i2c_dev, u32 mask) +{ + stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, mask); +} + +static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) +{ + struct stm32f7_i2c_dev *i2c_dev = data; + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + u32 status, mask; + + status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); + + /* Tx empty */ + if (status & STM32F7_I2C_ISR_TXIS) + stm32f7_i2c_write_tx_data(i2c_dev); + + /* RX not empty */ + if (status & STM32F7_I2C_ISR_RXNE) + stm32f7_i2c_read_rx_data(i2c_dev); + + /* NACK received */ + if (status & STM32F7_I2C_ISR_NACKF) { + dev_dbg(i2c_dev->dev, "<%s>: Receive NACK\n", __func__); + writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR); + f7_msg->result = -ENXIO; + } + + /* STOP detection flag */ + if (status & STM32F7_I2C_ISR_STOPF) { + /* Disable interrupts */ + stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK); + + /* Clear STOP flag */ + writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR); + + complete(&i2c_dev->complete); + } + + /* Transfer complete */ + if (status & STM32F7_I2C_ISR_TC) { + if (f7_msg->stop) { + mask = STM32F7_I2C_CR2_STOP; + stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask); + } else { + i2c_dev->msg_id++; + i2c_dev->msg++; + stm32f7_i2c_xfer_msg(i2c_dev, i2c_dev->msg); + } + } + + /* + * Transfer Complete Reload: 255 data bytes have been transferred + * We have to prepare the I2C controller to transfer the remaining + * data. + */ + if (status & STM32F7_I2C_ISR_TCR) + stm32f7_i2c_reload(i2c_dev); + + return IRQ_HANDLED; +} + +static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data) +{ + struct stm32f7_i2c_dev *i2c_dev = data; + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + struct device *dev = i2c_dev->dev; + u32 status; + + status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); + + /* Bus error */ + if (status & STM32F7_I2C_ISR_BERR) { + dev_err(dev, "<%s>: Bus error\n", __func__); + writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR); + f7_msg->result = -EIO; + } + + /* Arbitration loss */ + if (status & STM32F7_I2C_ISR_ARLO) { + dev_dbg(dev, "<%s>: Arbitration loss\n", __func__); + writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR); + f7_msg->result = -EAGAIN; + } + + stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK); + + complete(&i2c_dev->complete); + + return IRQ_HANDLED; +} + +static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg msgs[], int num) +{ + struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + unsigned long time_left; + int ret; + + i2c_dev->msg = msgs; + i2c_dev->msg_num = num; + i2c_dev->msg_id = 0; + + ret = clk_enable(i2c_dev->clk); + if (ret) { + dev_err(i2c_dev->dev, "Failed to enable clock\n"); + return ret; + } + + ret = stm32f7_i2c_wait_free_bus(i2c_dev); + if (ret) + goto clk_free; + + stm32f7_i2c_xfer_msg(i2c_dev, msgs); + + time_left = wait_for_completion_timeout(&i2c_dev->complete, + i2c_dev->adap.timeout); + ret = f7_msg->result; + + if (!time_left) { + dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n", + i2c_dev->msg->addr); + ret = -ETIMEDOUT; + } + +clk_free: + clk_disable(i2c_dev->clk); + + return (ret < 0) ? ret : num; +} + +static u32 stm32f7_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static struct i2c_algorithm stm32f7_i2c_algo = { + .master_xfer = stm32f7_i2c_xfer, + .functionality = stm32f7_i2c_func, +}; + +static int stm32f7_i2c_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct stm32f7_i2c_dev *i2c_dev; + const struct stm32f7_i2c_setup *setup; + struct resource *res; + u32 irq_error, irq_event, clk_rate, rise_time, fall_time; + struct i2c_adapter *adap; + struct reset_control *rst; + int ret; + + i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); + if (!i2c_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2c_dev->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c_dev->base)) + return PTR_ERR(i2c_dev->base); + + irq_event = irq_of_parse_and_map(np, 0); + if (!irq_event) { + dev_err(&pdev->dev, "IRQ event missing or invalid\n"); + return -EINVAL; + } + + irq_error = irq_of_parse_and_map(np, 1); + if (!irq_error) { + dev_err(&pdev->dev, "IRQ error missing or invalid\n"); + return -EINVAL; + } + + i2c_dev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(i2c_dev->clk)) { + dev_err(&pdev->dev, "Error: Missing controller clock\n"); + return PTR_ERR(i2c_dev->clk); + } + ret = clk_prepare_enable(i2c_dev->clk); + if (ret) { + dev_err(&pdev->dev, "Failed to prepare_enable clock\n"); + return ret; + } + + i2c_dev->speed = STM32_I2C_SPEED_STANDARD; + ret = device_property_read_u32(&pdev->dev, "clock-frequency", + &clk_rate); + if (!ret && clk_rate >= 1000000) + i2c_dev->speed = STM32_I2C_SPEED_FAST_PLUS; + else if (!ret && clk_rate >= 400000) + i2c_dev->speed = STM32_I2C_SPEED_FAST; + else if (!ret && clk_rate >= 100000) + i2c_dev->speed = STM32_I2C_SPEED_STANDARD; + + rst = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(rst)) { + dev_err(&pdev->dev, "Error: Missing controller reset\n"); + ret = PTR_ERR(rst); + goto clk_free; + } + reset_control_assert(rst); + udelay(2); + reset_control_deassert(rst); + + i2c_dev->dev = &pdev->dev; + + ret = devm_request_irq(&pdev->dev, irq_event, stm32f7_i2c_isr_event, 0, + pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq event %i\n", + irq_event); + goto clk_free; + } + + ret = devm_request_irq(&pdev->dev, irq_error, stm32f7_i2c_isr_error, 0, + pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq error %i\n", + irq_error); + goto clk_free; + } + + setup = of_device_get_match_data(&pdev->dev); + i2c_dev->setup->rise_time = setup->rise_time; + i2c_dev->setup->fall_time = setup->fall_time; + i2c_dev->setup->dnf = setup->dnf; + i2c_dev->setup->analog_filter = setup->analog_filter; + + ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", + &rise_time); + if (!ret) + i2c_dev->setup->rise_time = rise_time; + + ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns", + &fall_time); + if (!ret) + i2c_dev->setup->fall_time = fall_time; + + ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup); + if (ret) + goto clk_free; + + stm32f7_i2c_hw_config(i2c_dev); + + adap = &i2c_dev->adap; + i2c_set_adapdata(adap, i2c_dev); + snprintf(adap->name, sizeof(adap->name), "STM32F7 I2C(%pa)", + &res->start); + adap->owner = THIS_MODULE; + adap->timeout = 2 * HZ; + adap->retries = 3; + adap->algo = &stm32f7_i2c_algo; + adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; + + init_completion(&i2c_dev->complete); + + ret = i2c_add_adapter(adap); + if (ret) + goto clk_free; + + platform_set_drvdata(pdev, i2c_dev); + + clk_disable(i2c_dev->clk); + + dev_info(i2c_dev->dev, "STM32F7 I2C-%d bus adapter\n", adap->nr); + + return 0; + +clk_free: + clk_disable_unprepare(i2c_dev->clk); + + return ret; +} + +static int stm32f7_i2c_remove(struct platform_device *pdev) +{ + struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + + i2c_del_adapter(&i2c_dev->adap); + + clk_unprepare(i2c_dev->clk); + + return 0; +} + +static const struct of_device_id stm32f7_i2c_match[] = { + { .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup}, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32f7_i2c_match); + +static struct platform_driver stm32f7_i2c_driver = { + .driver = { + .name = "stm32f7-i2c", + .of_match_table = stm32f7_i2c_match, + }, + .probe = stm32f7_i2c_probe, + .remove = stm32f7_i2c_remove, +}; + +module_platform_driver(stm32f7_i2c_driver); + +MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32F7 I2C driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 70ad19c4c73e..88bdafb297f5 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -432,8 +432,10 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) atomic_set(&qp->qp_sec->error_list_count, 0); init_completion(&qp->qp_sec->error_complete); ret = security_ib_alloc_security(&qp->qp_sec->security); - if (ret) + if (ret) { kfree(qp->qp_sec); + qp->qp_sec = NULL; + } return ret; } diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4ab30d832ac5..52a2cf2d83aa 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -3869,15 +3869,15 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, resp.raw_packet_caps = attr.raw_packet_caps; resp.response_length += sizeof(resp.raw_packet_caps); - if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps)) + if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps)) goto end; - resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size; - resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags; - resp.xrq_caps.max_ops = attr.xrq_caps.max_ops; - resp.xrq_caps.max_sge = attr.xrq_caps.max_sge; - resp.xrq_caps.flags = attr.xrq_caps.flags; - resp.response_length += sizeof(resp.xrq_caps); + resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size; + resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags; + resp.tm_caps.max_ops = attr.tm_caps.max_ops; + resp.tm_caps.max_sge = attr.tm_caps.max_sge; + resp.tm_caps.flags = attr.tm_caps.flags; + resp.response_length += sizeof(resp.tm_caps); end: err = ib_copy_to_udata(ucore, &resp, resp.response_length); return err; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index ee9e27dc799b..de57d6c11a25 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1646,7 +1646,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) */ if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { if (attr.qp_state >= IB_QPS_INIT) { - if (qp->device->get_link_layer(qp->device, attr.port_num) != + if (rdma_port_get_link_layer(qp->device, attr.port_num) != IB_LINK_LAYER_INFINIBAND) return true; goto lid_check; @@ -1655,7 +1655,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) /* Can't get a quick answer, iterate over all ports */ for (port = 0; port < qp->device->phys_port_cnt; port++) - if (qp->device->get_link_layer(qp->device, port) != + if (rdma_port_get_link_layer(qp->device, port) != IB_LINK_LAYER_INFINIBAND) num_eth_ports++; diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index b3ad37fec578..ecbac91b2e14 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -93,11 +93,13 @@ struct bnxt_re_dev { struct ib_device ibdev; struct list_head list; unsigned long flags; -#define BNXT_RE_FLAG_NETDEV_REGISTERED 0 -#define BNXT_RE_FLAG_IBDEV_REGISTERED 1 -#define BNXT_RE_FLAG_GOT_MSIX 2 -#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8 -#define BNXT_RE_FLAG_QOS_WORK_REG 16 +#define BNXT_RE_FLAG_NETDEV_REGISTERED 0 +#define BNXT_RE_FLAG_IBDEV_REGISTERED 1 +#define BNXT_RE_FLAG_GOT_MSIX 2 +#define BNXT_RE_FLAG_HAVE_L2_REF 3 +#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 +#define BNXT_RE_FLAG_QOS_WORK_REG 5 +#define BNXT_RE_FLAG_TASK_IN_PROG 6 struct net_device *netdev; unsigned int version, major, minor; struct bnxt_en_dev *en_dev; @@ -108,6 +110,8 @@ struct bnxt_re_dev { struct delayed_work worker; u8 cur_prio_map; + u8 active_speed; + u8 active_width; /* FP Notification Queue (CQ & SRQ) */ struct tasklet_struct nq_task; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 01eee15bbd65..0d89621d9fe8 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, port_attr->sm_sl = 0; port_attr->subnet_timeout = 0; port_attr->init_type_reply = 0; - /* call the underlying netdev's ethtool hooks to query speed settings - * for which we acquire rtnl_lock _only_ if it's registered with - * IB stack to avoid race in the NETDEV_UNREG path - */ - if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) - if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed, - &port_attr->active_width)) - return -EINVAL; + port_attr->active_speed = rdev->active_speed; + port_attr->active_width = rdev->active_width; + return 0; } @@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, struct bnxt_re_gid_ctx *ctx, **ctx_tbl; struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; + struct bnxt_qplib_gid *gid_to_del; /* Delete the entry from the hardware */ ctx = *context; @@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, if (sgid_tbl && sgid_tbl->active) { if (ctx->idx >= sgid_tbl->max) return -EINVAL; + gid_to_del = &sgid_tbl->tbl[ctx->idx]; + /* DEL_GID is called in WQ context(netdevice_event_work_handler) + * or via the ib_unregister_device path. In the former case QP1 + * may not be destroyed yet, in which case just return as FW + * needs that entry to be present and will fail it's deletion. + * We could get invoked again after QP1 is destroyed OR get an + * ADD_GID call with a different GID value for the same index + * where we issue MODIFY_GID cmd to update the GID entry -- TBD + */ + if (ctx->idx == 0 && + rdma_link_local_addr((struct in6_addr *)gid_to_del) && + ctx->refcnt == 1 && rdev->qp1_sqp) { + dev_dbg(rdev_to_dev(rdev), + "Trying to delete GID0 while QP1 is alive\n"); + return -EFAULT; + } ctx->refcnt--; if (!ctx->refcnt) { - rc = bnxt_qplib_del_sgid(sgid_tbl, - &sgid_tbl->tbl[ctx->idx], - true); + rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to remove GID: %#x", rc); @@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) kfree(rdev->sqp_ah); kfree(rdev->qp1_sqp); + rdev->qp1_sqp = NULL; + rdev->sqp_ah = NULL; } if (!IS_ERR_OR_NULL(qp->rumem)) @@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); + qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu); } else if (qp_attr->qp_state == IB_QPS_RTR) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; qp->qplib_qp.path_mtu = __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); + qp->qplib_qp.mtu = + ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); } if (qp_attr_mask & IB_QP_TIMEOUT) { @@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, { struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_dev *rdev = qp->rdev; - struct bnxt_qplib_qp qplib_qp; + struct bnxt_qplib_qp *qplib_qp; int rc; - memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp)); - qplib_qp.id = qp->qplib_qp.id; - qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; + qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL); + if (!qplib_qp) + return -ENOMEM; + + qplib_qp->id = qp->qplib_qp.id; + qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; - rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp); + rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); - return rc; + goto out; } - qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state); - qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0; - qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access); - qp_attr->pkey_index = qplib_qp.pkey_index; - qp_attr->qkey = qplib_qp.qkey; + qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); + qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; + qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); + qp_attr->pkey_index = qplib_qp->pkey_index; + qp_attr->qkey = qplib_qp->qkey; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label, - qplib_qp.ah.host_sgid_index, - qplib_qp.ah.hop_limit, - qplib_qp.ah.traffic_class); - rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data); - rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl); - ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac); - qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu); - qp_attr->timeout = qplib_qp.timeout; - qp_attr->retry_cnt = qplib_qp.retry_cnt; - qp_attr->rnr_retry = qplib_qp.rnr_retry; - qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer; - qp_attr->rq_psn = qplib_qp.rq.psn; - qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic; - qp_attr->sq_psn = qplib_qp.sq.psn; - qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic; - qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR : - IB_SIGNAL_REQ_WR; - qp_attr->dest_qp_num = qplib_qp.dest_qpn; + rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label, + qplib_qp->ah.host_sgid_index, + qplib_qp->ah.hop_limit, + qplib_qp->ah.traffic_class); + rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data); + rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl); + ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac); + qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu); + qp_attr->timeout = qplib_qp->timeout; + qp_attr->retry_cnt = qplib_qp->retry_cnt; + qp_attr->rnr_retry = qplib_qp->rnr_retry; + qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; + qp_attr->rq_psn = qplib_qp->rq.psn; + qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; + qp_attr->sq_psn = qplib_qp->sq.psn; + qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic; + qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR : + IB_SIGNAL_REQ_WR; + qp_attr->dest_qp_num = qplib_qp->dest_qpn; qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; @@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; qp_init_attr->cap = qp_attr->cap; - return 0; +out: + kfree(qplib_qp); + return rc; } /* Routine for sending QP1 packets for RoCE V1 an V2 @@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; + wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; wqe->atomic.swap_data = atomic_wr(wr)->swap; break; case IB_WR_ATOMIC_FETCH_AND_ADD: @@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) return rc; } - if (mr->npages && mr->pages) { + if (mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, &mr->qplib_frpl); kfree(mr->pages); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 82d1cbc27aee..e7450ea92aa9 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1161,6 +1161,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) } } set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); + ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, + &rdev->active_width); bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); @@ -1255,10 +1257,14 @@ static void bnxt_re_task(struct work_struct *work) else if (netif_carrier_ok(rdev->netdev)) bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); + ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, + &rdev->active_width); break; default: break; } + smp_mb__before_atomic(); + clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); kfree(re_work); } @@ -1317,6 +1323,11 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, break; case NETDEV_UNREGISTER: + /* netdev notifier will call NETDEV_UNREGISTER again later since + * we are still holding the reference to the netdev + */ + if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags)) + goto exit; bnxt_re_ib_unreg(rdev, false); bnxt_re_remove_one(rdev); bnxt_re_dev_unreg(rdev); @@ -1335,6 +1346,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, re_work->vlan_dev = (real_dev == netdev ? NULL : netdev); INIT_WORK(&re_work->work, bnxt_re_task); + set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); queue_work(bnxt_re_wq, &re_work->work); } } @@ -1375,6 +1387,22 @@ err_netdev: static void __exit bnxt_re_mod_exit(void) { + struct bnxt_re_dev *rdev; + LIST_HEAD(to_be_deleted); + + mutex_lock(&bnxt_re_dev_lock); + /* Free all adapter allocated resources */ + if (!list_empty(&bnxt_re_dev_list)) + list_splice_init(&bnxt_re_dev_list, &to_be_deleted); + mutex_unlock(&bnxt_re_dev_lock); + + list_for_each_entry(rdev, &to_be_deleted, list) { + dev_info(rdev_to_dev(rdev), "Unregistering Device"); + bnxt_re_dev_stop(rdev); + bnxt_re_ib_unreg(rdev, true); + bnxt_re_remove_one(rdev); + bnxt_re_dev_unreg(rdev); + } unregister_netdevice_notifier(&bnxt_re_netdev_notifier); if (bnxt_re_wq) destroy_workqueue(bnxt_re_wq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 391bb7006e8f..2bdb1562bd21 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -107,6 +107,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, return -EINVAL; } + if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags)) + return -ETIMEDOUT; + /* Cmdq are in 16-byte units, each request can consume 1 or more * cmdqe */ @@ -226,6 +229,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, /* timed out */ dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", cookie, opcode, RCFW_CMD_WAIT_TIME_MS); + set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags); return rc; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 0ed312f17c8d..85b16da287f9 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -162,8 +162,9 @@ struct bnxt_qplib_rcfw { unsigned long *cmdq_bitmap; u32 bmap_size; unsigned long flags; -#define FIRMWARE_INITIALIZED_FLAG 1 +#define FIRMWARE_INITIALIZED_FLAG BIT(0) #define FIRMWARE_FIRST_FLAG BIT(31) +#define FIRMWARE_TIMED_OUT BIT(3) wait_queue_head_t waitq; int (*aeq_handler)(struct bnxt_qplib_rcfw *, struct creq_func_event *); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index ceaa2fa54d32..daf7a56e5d7e 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) unsigned int stid = GET_TID(rpl); struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); + if (!ep) { + pr_debug("%s stid %d lookup failure!\n", __func__, stid); + goto out; + } pr_debug("%s ep %p\n", __func__, ep); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); c4iw_put_ep(&ep->com); +out: return 0; } @@ -2594,9 +2599,9 @@ fail: c4iw_put_ep(&child_ep->com); reject: reject_cr(dev, hwtid, skb); +out: if (parent_ep) c4iw_put_ep(&parent_ep->com); -out: return 0; } @@ -3457,7 +3462,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) cm_id->provider_data = ep; goto out; } - + remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, ep->com.local_addr.ss_family); fail2: diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index b2ed4b9cda6e..0be42787759f 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1066,6 +1066,8 @@ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data); static int thermal_init(struct hfi1_devdata *dd); static void update_statusp(struct hfi1_pportdata *ppd, u32 state); +static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, + int msecs); static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, int msecs); static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); @@ -8238,6 +8240,7 @@ static irqreturn_t general_interrupt(int irq, void *data) u64 regs[CCE_NUM_INT_CSRS]; u32 bit; int i; + irqreturn_t handled = IRQ_NONE; this_cpu_inc(*dd->int_counter); @@ -8258,9 +8261,10 @@ static irqreturn_t general_interrupt(int irq, void *data) for_each_set_bit(bit, (unsigned long *)®s[0], CCE_NUM_INT_CSRS * 64) { is_interrupt(dd, bit); + handled = IRQ_HANDLED; } - return IRQ_HANDLED; + return handled; } static irqreturn_t sdma_interrupt(int irq, void *data) @@ -9413,7 +9417,7 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable) write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); } -void reset_qsfp(struct hfi1_pportdata *ppd) +int reset_qsfp(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u64 mask, qsfp_mask; @@ -9443,6 +9447,13 @@ void reset_qsfp(struct hfi1_pportdata *ppd) * for alarms and warnings */ set_qsfp_int_n(ppd, 1); + + /* + * After the reset, AOC transmitters are enabled by default. They need + * to be turned off to complete the QSFP setup before they can be + * enabled again. + */ + return set_qsfp_tx(ppd, 0); } static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, @@ -10305,6 +10316,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) { struct hfi1_devdata *dd = ppd->dd; u32 previous_state; + int offline_state_ret; int ret; update_lcb_cache(dd); @@ -10326,28 +10338,11 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); - /* - * Wait for offline transition. It can take a while for - * the link to go down. - */ - ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000); - if (ret < 0) - return ret; - - /* - * Now in charge of LCB - must be after the physical state is - * offline.quiet and before host_link_state is changed. - */ - set_host_lcb_access(dd); - write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ - - /* make sure the logical state is also down */ - ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); - if (ret) - force_logical_link_state_down(ppd); - - ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ + offline_state_ret = wait_phys_link_offline_substates(ppd, 10000); + if (offline_state_ret < 0) + return offline_state_ret; + /* Disabling AOC transmitters */ if (ppd->port_type == PORT_TYPE_QSFP && ppd->qsfp_info.limiting_active && qsfp_mod_present(ppd)) { @@ -10365,6 +10360,30 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) } /* + * Wait for the offline.Quiet transition if it hasn't happened yet. It + * can take a while for the link to go down. + */ + if (offline_state_ret != PLS_OFFLINE_QUIET) { + ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000); + if (ret < 0) + return ret; + } + + /* + * Now in charge of LCB - must be after the physical state is + * offline.quiet and before host_link_state is changed. + */ + set_host_lcb_access(dd); + write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ + + /* make sure the logical state is also down */ + ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); + if (ret) + force_logical_link_state_down(ppd); + + ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ + + /* * The LNI has a mandatory wait time after the physical state * moves to Offline.Quiet. The wait time may be different * depending on how the link went down. The 8051 firmware @@ -10396,6 +10415,9 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { /* went down while attempting link up */ check_lni_states(ppd); + + /* The QSFP doesn't need to be reset on LNI failure */ + ppd->qsfp_info.reset_needed = 0; } /* the active link width (downgrade) is 0 on link down */ @@ -12804,6 +12826,39 @@ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, return 0; } +/* + * wait_phys_link_offline_quiet_substates - wait for any offline substate + * @ppd: port device + * @msecs: the number of milliseconds to wait + * + * Wait up to msecs milliseconds for any offline physical link + * state change to occur. + * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. + */ +static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, + int msecs) +{ + u32 read_state; + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(msecs); + while (1) { + read_state = read_physical_state(ppd->dd); + if ((read_state & 0xF0) == PLS_OFFLINE) + break; + if (time_after(jiffies, timeout)) { + dd_dev_err(ppd->dd, + "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n", + read_state, msecs); + return -ETIMEDOUT; + } + usleep_range(1950, 2050); /* sleep 2ms-ish */ + } + + log_state_transition(ppd, read_state); + return read_state; +} + #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index b8345a60a0fb..50b8645d0b87 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -204,6 +204,7 @@ #define PLS_OFFLINE_READY_TO_QUIET_LT 0x92 #define PLS_OFFLINE_REPORT_FAILURE 0x93 #define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94 +#define PLS_OFFLINE_QUIET_DURATION 0x95 #define PLS_POLLING 0x20 #define PLS_POLLING_QUIET 0x20 #define PLS_POLLING_ACTIVE 0x21 @@ -722,7 +723,7 @@ void handle_link_downgrade(struct work_struct *work); void handle_link_bounce(struct work_struct *work); void handle_start_link(struct work_struct *work); void handle_sma_message(struct work_struct *work); -void reset_qsfp(struct hfi1_pportdata *ppd); +int reset_qsfp(struct hfi1_pportdata *ppd); void qsfp_event(struct work_struct *work); void start_freeze_handling(struct hfi1_pportdata *ppd, int flags); int send_idle_sma(struct hfi1_devdata *dd, u64 message); diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c index d46b17107901..1613af1c58d9 100644 --- a/drivers/infiniband/hw/hfi1/eprom.c +++ b/drivers/infiniband/hw/hfi1/eprom.c @@ -204,7 +204,10 @@ done_asic: return ret; } -/* magic character sequence that trails an image */ +/* magic character sequence that begins an image */ +#define IMAGE_START_MAGIC "APO=" + +/* magic character sequence that might trail an image */ #define IMAGE_TRAIL_MAGIC "egamiAPO" /* EPROM file types */ @@ -250,6 +253,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data, { void *buffer; void *p; + u32 length; int ret; buffer = kmalloc(P1_SIZE, GFP_KERNEL); @@ -262,15 +266,21 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data, return ret; } - /* scan for image magic that may trail the actual data */ - p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); - if (!p) { + /* config partition is valid only if it starts with IMAGE_START_MAGIC */ + if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) { kfree(buffer); return -ENOENT; } + /* scan for image magic that may trail the actual data */ + p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); + if (p) + length = p - buffer; + else + length = P1_SIZE; + *data = buffer; - *size = p - buffer; + *size = length; return 0; } diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 2bc89260235a..d9a1e9893136 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -930,15 +930,8 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo) switch (ret) { case 0: ret = setup_base_ctxt(fd, uctxt); - if (uctxt->subctxt_cnt) { - /* - * Base context is done (successfully or not), notify - * anybody using a sub-context that is waiting for - * this completion. - */ - clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); - wake_up(&uctxt->wait); - } + if (ret) + deallocate_ctxt(uctxt); break; case 1: ret = complete_subctxt(fd); @@ -1305,25 +1298,25 @@ static int setup_base_ctxt(struct hfi1_filedata *fd, /* Now allocate the RcvHdr queue and eager buffers. */ ret = hfi1_create_rcvhdrq(dd, uctxt); if (ret) - return ret; + goto done; ret = hfi1_setup_eagerbufs(uctxt); if (ret) - goto setup_failed; + goto done; /* If sub-contexts are enabled, do the appropriate setup */ if (uctxt->subctxt_cnt) ret = setup_subctxt(uctxt); if (ret) - goto setup_failed; + goto done; ret = hfi1_alloc_ctxt_rcv_groups(uctxt); if (ret) - goto setup_failed; + goto done; ret = init_user_ctxt(fd, uctxt); if (ret) - goto setup_failed; + goto done; user_init(uctxt); @@ -1331,12 +1324,22 @@ static int setup_base_ctxt(struct hfi1_filedata *fd, fd->uctxt = uctxt; hfi1_rcd_get(uctxt); - return 0; +done: + if (uctxt->subctxt_cnt) { + /* + * On error, set the failed bit so sub-contexts will clean up + * correctly. + */ + if (ret) + set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); -setup_failed: - /* Set the failed bit so sub-context init can do the right thing */ - set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); - deallocate_ctxt(uctxt); + /* + * Base context is done (successfully or not), notify anybody + * using a sub-context that is waiting for this completion. + */ + clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); + wake_up(&uctxt->wait); + } return ret; } diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 82447b7cdda1..09e50fd2a08f 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -68,7 +68,7 @@ /* * Code to adjust PCIe capabilities. */ -static int tune_pcie_caps(struct hfi1_devdata *); +static void tune_pcie_caps(struct hfi1_devdata *); /* * Do all the common PCIe setup and initialization. @@ -351,7 +351,7 @@ int pcie_speeds(struct hfi1_devdata *dd) */ int request_msix(struct hfi1_devdata *dd, u32 msireq) { - int nvec, ret; + int nvec; nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq, PCI_IRQ_MSIX | PCI_IRQ_LEGACY); @@ -360,12 +360,7 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq) return nvec; } - ret = tune_pcie_caps(dd); - if (ret) { - dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret); - pci_free_irq_vectors(dd->pcidev); - return ret; - } + tune_pcie_caps(dd); /* check for legacy IRQ */ if (nvec == 1 && !dd->pcidev->msix_enabled) @@ -502,7 +497,7 @@ uint aspm_mode = ASPM_MODE_DISABLED; module_param_named(aspm, aspm_mode, uint, S_IRUGO); MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic"); -static int tune_pcie_caps(struct hfi1_devdata *dd) +static void tune_pcie_caps(struct hfi1_devdata *dd) { struct pci_dev *parent; u16 rc_mpss, rc_mps, ep_mpss, ep_mps; @@ -513,22 +508,14 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) * Turn on extended tags in DevCtl in case the BIOS has turned it off * to improve WFR SDMA bandwidth */ - ret = pcie_capability_read_word(dd->pcidev, - PCI_EXP_DEVCTL, &ectl); - if (ret) { - dd_dev_err(dd, "Unable to read from PCI config\n"); - return ret; - } - - if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) { + ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl); + if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) { dd_dev_info(dd, "Enabling PCIe extended tags\n"); ectl |= PCI_EXP_DEVCTL_EXT_TAG; ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl); - if (ret) { - dd_dev_err(dd, "Unable to write to PCI config\n"); - return ret; - } + if (ret) + dd_dev_info(dd, "Unable to write to PCI config\n"); } /* Find out supported and configured values for parent (root) */ parent = dd->pcidev->bus->self; @@ -536,15 +523,22 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) * The driver cannot perform the tuning if it does not have * access to the upstream component. */ - if (!parent) - return -EINVAL; + if (!parent) { + dd_dev_info(dd, "Parent not found\n"); + return; + } if (!pci_is_root_bus(parent->bus)) { dd_dev_info(dd, "Parent not root\n"); - return -EINVAL; + return; + } + if (!pci_is_pcie(parent)) { + dd_dev_info(dd, "Parent is not PCI Express capable\n"); + return; + } + if (!pci_is_pcie(dd->pcidev)) { + dd_dev_info(dd, "PCI device is not PCI Express capable\n"); + return; } - - if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev)) - return -EINVAL; rc_mpss = parent->pcie_mpss; rc_mps = ffs(pcie_get_mps(parent)) - 8; /* Find out supported and configured values for endpoint (us) */ @@ -590,8 +584,6 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) ep_mrrs = max_mrrs; pcie_set_readrq(dd->pcidev, ep_mrrs); } - - return 0; } /* End of PCIe capability tuning */ diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c index a8af96d2b1b0..d486355880cb 100644 --- a/drivers/infiniband/hw/hfi1/platform.c +++ b/drivers/infiniband/hw/hfi1/platform.c @@ -790,7 +790,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, * reuse of stale settings established in our previous pass through. */ if (ppd->qsfp_info.reset_needed) { - reset_qsfp(ppd); + ret = reset_qsfp(ppd); + if (ret) + return ret; refresh_qsfp_cache(ppd, &ppd->qsfp_info); } else { ppd->qsfp_info.reset_needed = 1; diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 9b1566468744..a65e4cbdce2f 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -201,7 +201,6 @@ enum init_completion_state { CEQ_CREATED, ILQ_CREATED, IEQ_CREATED, - INET_NOTIFIER, IP_ADDR_REGISTERED, RDMA_DEV_REGISTERED }; diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 14f36ba4e5be..5230dd3c938c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -1504,23 +1504,40 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core, } /** - * listen_port_in_use - determine if port is in use - * @port: Listen port number + * i40iw_port_in_use - determine if port is in use + * @port: port number + * @active_side: flag for listener side vs active side */ -static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port) +static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side) { struct i40iw_cm_listener *listen_node; + struct i40iw_cm_node *cm_node; unsigned long flags; bool ret = false; - spin_lock_irqsave(&cm_core->listen_list_lock, flags); - list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { - if (listen_node->loc_port == port) { - ret = true; - break; + if (active_side) { + /* search connected node list */ + spin_lock_irqsave(&cm_core->ht_lock, flags); + list_for_each_entry(cm_node, &cm_core->connected_nodes, list) { + if (cm_node->loc_port == port) { + ret = true; + break; + } + } + if (!ret) + clear_bit(port, cm_core->active_side_ports); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + } else { + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { + if (listen_node->loc_port == port) { + ret = true; + break; + } } + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); } - spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + return ret; } @@ -1868,7 +1885,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core, spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); if (listener->iwdev) { - if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port)) + if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false)) i40iw_manage_apbvt(listener->iwdev, listener->loc_port, I40IW_MANAGE_APBVT_DEL); @@ -2247,21 +2264,21 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node) if (cm_node->listener) { i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); } else { - if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) && - cm_node->apbvt_set) { + if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) { i40iw_manage_apbvt(cm_node->iwdev, cm_node->loc_port, I40IW_MANAGE_APBVT_DEL); - i40iw_get_addr_info(cm_node, &nfo); - if (cm_node->qhash_set) { - i40iw_manage_qhash(cm_node->iwdev, - &nfo, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_DELETE, - NULL, - false); - cm_node->qhash_set = 0; - } + cm_node->apbvt_set = 0; + } + i40iw_get_addr_info(cm_node, &nfo); + if (cm_node->qhash_set) { + i40iw_manage_qhash(cm_node->iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, + false); + cm_node->qhash_set = 0; } } @@ -3255,7 +3272,8 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node, tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); if (cm_node->vlan_id < VLAN_TAG_PRESENT) { tcp_info->insert_vlan_tag = true; - tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id); + tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) | + cm_node->vlan_id); } if (cm_node->ipv4) { tcp_info->src_port = cpu_to_le16(cm_node->loc_port); @@ -3737,10 +3755,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct sockaddr_in *raddr; struct sockaddr_in6 *laddr6; struct sockaddr_in6 *raddr6; - bool qhash_set = false; - int apbvt_set = 0; - int err = 0; - enum i40iw_status_code status; + int ret = 0; + unsigned long flags; ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); if (!ibqp) @@ -3789,32 +3805,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_info.user_pri = rt_tos2priority(cm_id->tos); i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n", __func__, cm_id->tos, cm_info.user_pri); - if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || - (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, - raddr6->sin6_addr.in6_u.u6_addr32, - sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { - status = i40iw_manage_qhash(iwdev, - &cm_info, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_ADD, - NULL, - true); - if (status) - return -EINVAL; - qhash_set = true; - } - status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD); - if (status) { - i40iw_manage_qhash(iwdev, - &cm_info, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_DELETE, - NULL, - false); - return -EINVAL; - } - - apbvt_set = 1; cm_id->add_ref(cm_id); cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, conn_param->private_data_len, @@ -3822,17 +3812,40 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) &cm_info); if (IS_ERR(cm_node)) { - err = PTR_ERR(cm_node); - goto err_out; + ret = PTR_ERR(cm_node); + cm_id->rem_ref(cm_id); + return ret; + } + + if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || + (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, + raddr6->sin6_addr.in6_u.u6_addr32, + sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { + if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) { + ret = -EINVAL; + goto err; + } + cm_node->qhash_set = true; } + spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); + if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) { + spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); + if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) { + ret = -EINVAL; + goto err; + } + } else { + spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); + } + + cm_node->apbvt_set = true; i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && !cm_node->ord_size) cm_node->ord_size = 1; - cm_node->apbvt_set = apbvt_set; - cm_node->qhash_set = qhash_set; iwqp->cm_node = cm_node; cm_node->iwqp = iwqp; iwqp->cm_id = cm_id; @@ -3840,11 +3853,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { cm_node->state = I40IW_CM_STATE_SYN_SENT; - err = i40iw_send_syn(cm_node, 0); - if (err) { - i40iw_rem_ref_cm_node(cm_node); - goto err_out; - } + ret = i40iw_send_syn(cm_node, 0); + if (ret) + goto err; } i40iw_debug(cm_node->dev, @@ -3853,9 +3864,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_node->rem_port, cm_node, cm_node->cm_id); + return 0; -err_out: +err: if (cm_info.ipv4) i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, @@ -3867,22 +3879,10 @@ err_out: "Api - connect() FAILED: dest addr=%pI6", cm_info.rem_addr); - if (qhash_set) - i40iw_manage_qhash(iwdev, - &cm_info, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_DELETE, - NULL, - false); - - if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core, - cm_info.loc_port)) - i40iw_manage_apbvt(iwdev, - cm_info.loc_port, - I40IW_MANAGE_APBVT_DEL); + i40iw_rem_ref_cm_node(cm_node); cm_id->rem_ref(cm_id); iwdev->cm_core.stats_connect_errs++; - return err; + return ret; } /** diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h index 2e52e38ffcf3..45abef76295b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.h +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h @@ -71,6 +71,9 @@ #define I40IW_HW_IRD_SETTING_32 32 #define I40IW_HW_IRD_SETTING_64 64 +#define MAX_PORTS 65536 +#define I40IW_VLAN_PRIO_SHIFT 13 + enum ietf_mpa_flags { IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ @@ -411,6 +414,8 @@ struct i40iw_cm_core { spinlock_t ht_lock; /* manage hash table */ spinlock_t listen_list_lock; /* listen list */ + unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)]; + u64 stats_nodes_created; u64 stats_nodes_destroyed; u64 stats_listen_created; diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index cc742c3132c6..27590ae21881 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -99,8 +99,6 @@ static struct notifier_block i40iw_net_notifier = { .notifier_call = i40iw_net_event }; -static atomic_t i40iw_notifiers_registered; - /** * i40iw_find_i40e_handler - find a handler given a client info * @ldev: pointer to a client info @@ -1376,11 +1374,20 @@ error: */ static void i40iw_register_notifiers(void) { - if (atomic_inc_return(&i40iw_notifiers_registered) == 1) { - register_inetaddr_notifier(&i40iw_inetaddr_notifier); - register_inet6addr_notifier(&i40iw_inetaddr6_notifier); - register_netevent_notifier(&i40iw_net_notifier); - } + register_inetaddr_notifier(&i40iw_inetaddr_notifier); + register_inet6addr_notifier(&i40iw_inetaddr6_notifier); + register_netevent_notifier(&i40iw_net_notifier); +} + +/** + * i40iw_unregister_notifiers - unregister tcp ip notifiers + */ + +static void i40iw_unregister_notifiers(void) +{ + unregister_netevent_notifier(&i40iw_net_notifier); + unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); + unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); } /** @@ -1400,6 +1407,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, u32 i; u32 size; + if (!ldev->msix_count) { + i40iw_pr_err("No MSI-X vectors\n"); + return I40IW_ERR_CONFIG; + } + iwdev->msix_count = ldev->msix_count; size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; @@ -1462,12 +1474,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) if (!iwdev->reset) i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); /* fallthrough */ - case INET_NOTIFIER: - if (!atomic_dec_return(&i40iw_notifiers_registered)) { - unregister_netevent_notifier(&i40iw_net_notifier); - unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); - unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); - } /* fallthrough */ case PBLE_CHUNK_MEM: i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); @@ -1550,7 +1556,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl, status = i40iw_save_msix_info(iwdev, ldev); if (status) - goto exit; + return status; iwdev->hw.dev_context = (void *)ldev->pcidev; iwdev->hw.hw_addr = ldev->hw_addr; status = i40iw_allocate_dma_mem(&iwdev->hw, @@ -1667,8 +1673,6 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) break; iwdev->init_state = PBLE_CHUNK_MEM; iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); - i40iw_register_notifiers(); - iwdev->init_state = INET_NOTIFIER; status = i40iw_add_mac_ip(iwdev); if (status) break; @@ -2018,6 +2022,8 @@ static int __init i40iw_init_module(void) i40iw_client.type = I40E_CLIENT_IWARP; spin_lock_init(&i40iw_handler_lock); ret = i40e_register_client(&i40iw_client); + i40iw_register_notifiers(); + return ret; } @@ -2029,6 +2035,7 @@ static int __init i40iw_init_module(void) */ static void __exit i40iw_exit_module(void) { + i40iw_unregister_notifiers(); i40e_unregister_client(&i40iw_client); } diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 62f1f45b8737..e52dbbb4165e 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -160,7 +160,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, return NOTIFY_DONE; iwdev = &hdl->device; - if (iwdev->init_state < INET_NOTIFIER) + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) return NOTIFY_DONE; netdev = iwdev->ldev->netdev; @@ -217,7 +217,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, return NOTIFY_DONE; iwdev = &hdl->device; - if (iwdev->init_state < INET_NOTIFIER) + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) return NOTIFY_DONE; netdev = iwdev->ldev->netdev; @@ -266,7 +266,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void * if (!iwhdl) return NOTIFY_DONE; iwdev = &iwhdl->device; - if (iwdev->init_state < INET_NOTIFIER) + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) return NOTIFY_DONE; p = (__be32 *)neigh->primary_key; i40iw_copy_ip_ntohl(local_ipaddr, p); diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 1aa411034a27..28b3d02d511b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -1027,7 +1027,19 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; iwqp->last_aeq = I40IW_AE_RESET_SENT; spin_unlock_irqrestore(&iwqp->lock, flags); + i40iw_cm_disconn(iwqp); } + } else { + spin_lock_irqsave(&iwqp->lock, flags); + if (iwqp->cm_id) { + if (atomic_inc_return(&iwqp->close_timer_started) == 1) { + iwqp->cm_id->add_ref(iwqp->cm_id); + i40iw_schedule_cm_timer(iwqp->cm_node, + (struct i40iw_puda_buf *)iwqp, + I40IW_TIMER_TYPE_CLOSE, 1, 0); + } + } + spin_unlock_irqrestore(&iwqp->lock, flags); } } return 0; diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index 0ba5ba7540c8..e219093d2764 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -221,7 +221,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, static int add_port_entries(struct mlx4_ib_dev *device, int port_num) { int i; - char buff[10]; + char buff[11]; struct mlx4_ib_iov_port *port = NULL; int ret = 0 ; struct ib_port_attr attr; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index ab3c562d5ba7..d6fbad8f34aa 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -778,13 +778,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } if (MLX5_CAP_GEN(mdev, tag_matching)) { - props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; - props->xrq_caps.max_num_tags = + props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; + props->tm_caps.max_num_tags = (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; - props->xrq_caps.flags = IB_TM_CAP_RC; - props->xrq_caps.max_ops = + props->tm_caps.flags = IB_TM_CAP_RC; + props->tm_caps.max_ops = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); - props->xrq_caps.max_sge = MLX5_TM_MAX_SGE; + props->tm_caps.max_sge = MLX5_TM_MAX_SGE; } if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { @@ -3837,11 +3837,13 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) if (!dbg) return -ENOMEM; + dev->delay_drop.dbg = dbg; + dbg->dir_debugfs = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root); if (!dbg->dir_debugfs) - return -ENOMEM; + goto out_debugfs; dbg->events_cnt_debugfs = debugfs_create_atomic_t("num_timeout_events", 0400, @@ -3865,8 +3867,6 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) if (!dbg->timeout_debugfs) goto out_debugfs; - dev->delay_drop.dbg = dbg; - return 0; out_debugfs: diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 914f212e7ef6..f3dbd75a0a96 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, { unsigned long tmp; unsigned long m; - int i, k; - u64 base = 0; - int p = 0; - int skip; - int mask; - u64 len; - u64 pfn; + u64 base = ~0, p = 0; + u64 len, pfn; + int i = 0; struct scatterlist *sg; int entry; unsigned long page_shift = umem->page_shift; @@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, m = find_first_bit(&tmp, BITS_PER_LONG); if (max_page_shift) m = min_t(unsigned long, max_page_shift - page_shift, m); - skip = 1 << m; - mask = skip - 1; - i = 0; + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { len = sg_dma_len(sg) >> page_shift; pfn = sg_dma_address(sg) >> page_shift; - for (k = 0; k < len; k++) { - if (!(i & mask)) { - tmp = (unsigned long)pfn; - m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG)); - skip = 1 << m; - mask = skip - 1; - base = pfn; - p = 0; - } else { - if (base + p != pfn) { - tmp = (unsigned long)p; - m = find_first_bit(&tmp, BITS_PER_LONG); - skip = 1 << m; - mask = skip - 1; - base = pfn; - p = 0; - } - } - p++; - i++; + if (base + p != pfn) { + /* If either the offset or the new + * base are unaligned update m + */ + tmp = (unsigned long)(pfn | p); + if (!IS_ALIGNED(tmp, 1 << m)) + m = find_first_bit(&tmp, BITS_PER_LONG); + + base = pfn; + p = 0; } + + p += len; + i += len; } if (i) { diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 0e2789d9bb4d..37bbc543847a 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -47,7 +47,8 @@ enum { #define MLX5_UMR_ALIGN 2048 -static int clean_mr(struct mlx5_ib_mr *mr); +static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); +static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static int mr_cache_max_order(struct mlx5_ib_dev *dev); static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); @@ -1270,8 +1271,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, update_xlt_flags); + if (err) { - mlx5_ib_dereg_mr(&mr->ibmr); + dereg_mr(dev, mr); return ERR_PTR(err); } } @@ -1356,7 +1358,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, &npages, &page_shift, &ncont, &order); if (err < 0) { - clean_mr(mr); + clean_mr(dev, mr); return err; } } @@ -1410,7 +1412,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, if (err) { mlx5_ib_warn(dev, "Failed to rereg UMR\n"); ib_umem_release(mr->umem); - clean_mr(mr); + clean_mr(dev, mr); return err; } } @@ -1469,9 +1471,8 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr) } } -static int clean_mr(struct mlx5_ib_mr *mr) +static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { - struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); int allocated_from_cache = mr->allocated_from_cache; int err; @@ -1507,10 +1508,8 @@ static int clean_mr(struct mlx5_ib_mr *mr) return 0; } -int mlx5_ib_dereg_mr(struct ib_mr *ibmr) +static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { - struct mlx5_ib_dev *dev = to_mdev(ibmr->device); - struct mlx5_ib_mr *mr = to_mmr(ibmr); int npages = mr->npages; struct ib_umem *umem = mr->umem; @@ -1539,7 +1538,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr) } #endif - clean_mr(mr); + clean_mr(dev, mr); if (umem) { ib_umem_release(umem); @@ -1549,6 +1548,14 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr) return 0; } +int mlx5_ib_dereg_mr(struct ib_mr *ibmr) +{ + struct mlx5_ib_dev *dev = to_mdev(ibmr->device); + struct mlx5_ib_mr *mr = to_mmr(ibmr); + + return dereg_mr(dev, mr); +} + struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index f0dc5f4aa177..442b9bdc0f03 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -3232,7 +3232,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, mr->ibmr.iova); set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, - mr->ibmr.length); + lower_32_bits(mr->ibmr.length)); set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); set_wqe_32bit_value(wqe->wqe_words, @@ -3274,7 +3274,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, mr->npages * 8); nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, " - "length: %d, rkey: %0x, pgl_paddr: %llx, " + "length: %lld, rkey: %0x, pgl_paddr: %llx, " "page_list_len: %u, wqe_misc: %x\n", (unsigned long long) mr->ibmr.iova, mr->ibmr.length, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index dcb5942f9fb5..65b166cc7437 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -252,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status) case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: err_num = -EAGAIN; break; + default: + err_num = -EFAULT; } + break; default: err_num = -EFAULT; } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 663a0c301c43..984aa3484928 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -416,9 +416,34 @@ static inline enum ib_wc_status pvrdma_wc_status_to_ib( return (enum ib_wc_status)status; } -static inline int pvrdma_wc_opcode_to_ib(int opcode) -{ - return opcode; +static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode) +{ + switch (opcode) { + case PVRDMA_WC_SEND: + return IB_WC_SEND; + case PVRDMA_WC_RDMA_WRITE: + return IB_WC_RDMA_WRITE; + case PVRDMA_WC_RDMA_READ: + return IB_WC_RDMA_READ; + case PVRDMA_WC_COMP_SWAP: + return IB_WC_COMP_SWAP; + case PVRDMA_WC_FETCH_ADD: + return IB_WC_FETCH_ADD; + case PVRDMA_WC_LOCAL_INV: + return IB_WC_LOCAL_INV; + case PVRDMA_WC_FAST_REG_MR: + return IB_WC_REG_MR; + case PVRDMA_WC_MASKED_COMP_SWAP: + return IB_WC_MASKED_COMP_SWAP; + case PVRDMA_WC_MASKED_FETCH_ADD: + return IB_WC_MASKED_FETCH_ADD; + case PVRDMA_WC_RECV: + return IB_WC_RECV; + case PVRDMA_WC_RECV_RDMA_WITH_IMM: + return IB_WC_RECV_RDMA_WITH_IMM; + default: + return IB_WC_SEND; + } } static inline int pvrdma_wc_flags_to_ib(int flags) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 14b62f7472b4..7774654c2ccb 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -823,12 +823,18 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) wc->status != IB_WC_WR_FLUSH_ERR) { struct ipoib_neigh *neigh; - if (wc->status != IB_WC_RNR_RETRY_EXC_ERR) - ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); + /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle, + * so don't make waves. + */ + if (wc->status == IB_WC_RNR_RETRY_EXC_ERR || + wc->status == IB_WC_RETRY_EXC_ERR) + ipoib_dbg(priv, + "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n", + __func__, wc->status, wr_id, wc->vendor_err); else - ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); + ipoib_warn(priv, + "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n", + __func__, wc->status, wr_id, wc->vendor_err); spin_lock_irqsave(&priv->lock, flags); neigh = tx->neigh; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 2e075377242e..6cd61638b441 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) */ priv->dev->broadcast[8] = priv->pkey >> 8; priv->dev->broadcast[9] = priv->pkey & 0xff; - - /* - * Update the broadcast address in the priv->broadcast object, - * in case it already exists, otherwise no one will do that. - */ - if (priv->broadcast) { - spin_lock_irq(&priv->lock); - memcpy(priv->broadcast->mcmember.mgid.raw, - priv->dev->broadcast + 4, - sizeof(union ib_gid)); - spin_unlock_irq(&priv->lock); - } - return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index bac95b509a9b..dcc77014018d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -2180,6 +2180,7 @@ static struct net_device *ipoib_add_port(const char *format, { struct ipoib_dev_priv *priv; struct ib_port_attr attr; + struct rdma_netdev *rn; int result = -ENOMEM; priv = ipoib_intf_alloc(hca, port, format); @@ -2279,7 +2280,8 @@ register_failed: ipoib_dev_cleanup(priv->dev); device_init_failed: - free_netdev(priv->dev); + rn = netdev_priv(priv->dev); + rn->free_rdma_netdev(priv->dev); kfree(priv); alloc_mem_failed: @@ -2328,7 +2330,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) return; list_for_each_entry_safe(priv, tmp, dev_list, list) { - struct rdma_netdev *rn = netdev_priv(priv->dev); + struct rdma_netdev *parent_rn = netdev_priv(priv->dev); ib_unregister_event_handler(&priv->event_handler); flush_workqueue(ipoib_workqueue); @@ -2350,10 +2352,15 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) unregister_netdev(priv->dev); mutex_unlock(&priv->sysfs_mutex); - rn->free_rdma_netdev(priv->dev); + parent_rn->free_rdma_netdev(priv->dev); + + list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { + struct rdma_netdev *child_rn; - list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) + child_rn = netdev_priv(cpriv->dev); + child_rn->free_rdma_netdev(cpriv->dev); kfree(cpriv); + } kfree(priv); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 9927cd6b7082..55a9b71ed05a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -141,14 +141,17 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) return restart_syscall(); } - priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); - if (!priv) { + if (!down_write_trylock(&ppriv->vlan_rwsem)) { rtnl_unlock(); mutex_unlock(&ppriv->sysfs_mutex); - return -ENOMEM; + return restart_syscall(); } - down_write(&ppriv->vlan_rwsem); + priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); + if (!priv) { + result = -ENOMEM; + goto out; + } /* * First ensure this isn't a duplicate. We check the parent device and @@ -175,8 +178,11 @@ out: rtnl_unlock(); mutex_unlock(&ppriv->sysfs_mutex); - if (result) { - free_netdev(priv->dev); + if (result && priv) { + struct rdma_netdev *rn; + + rn = netdev_priv(priv->dev); + rn->free_rdma_netdev(priv->dev); kfree(priv); } @@ -204,7 +210,12 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) return restart_syscall(); } - down_write(&ppriv->vlan_rwsem); + if (!down_write_trylock(&ppriv->vlan_rwsem)) { + rtnl_unlock(); + mutex_unlock(&ppriv->sysfs_mutex); + return restart_syscall(); + } + list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { if (priv->pkey == pkey && priv->child_type == IPOIB_LEGACY_CHILD) { @@ -224,7 +235,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) mutex_unlock(&ppriv->sysfs_mutex); if (dev) { - free_netdev(dev); + struct rdma_netdev *rn; + + rn = netdev_priv(dev); + rn->free_rdma_netdev(priv->dev); kfree(priv); return 0; } diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 9c3e9ab53a41..322209d5ff58 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -154,7 +154,7 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec) { int i; - iser_err("page vec npages %d data length %d\n", + iser_err("page vec npages %d data length %lld\n", page_vec->npages, page_vec->fake_mr.length); for (i = 0; i < page_vec->npages; i++) iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c index 8f2042432c85..66a46c84e28f 100644 --- a/drivers/input/ff-core.c +++ b/drivers/input/ff-core.c @@ -237,9 +237,15 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file) EXPORT_SYMBOL_GPL(input_ff_erase); /* - * flush_effects - erase all effects owned by a file handle + * input_ff_flush - erase all effects owned by a file handle + * @dev: input device to erase effect from + * @file: purported owner of the effects + * + * This function erases all force-feedback effects associated with + * the given owner from specified device. Note that @file may be %NULL, + * in which case all effects will be erased. */ -static int flush_effects(struct input_dev *dev, struct file *file) +int input_ff_flush(struct input_dev *dev, struct file *file) { struct ff_device *ff = dev->ff; int i; @@ -255,6 +261,7 @@ static int flush_effects(struct input_dev *dev, struct file *file) return 0; } +EXPORT_SYMBOL_GPL(input_ff_flush); /** * input_ff_event() - generic handler for force-feedback events @@ -343,7 +350,7 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects) mutex_init(&ff->mutex); dev->ff = ff; - dev->flush = flush_effects; + dev->flush = input_ff_flush; dev->event = input_ff_event; __set_bit(EV_FF, dev->evbit); diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c index d09cefa37931..15a71acb6997 100644 --- a/drivers/input/joystick/adi.c +++ b/drivers/input/joystick/adi.c @@ -313,7 +313,7 @@ static void adi_close(struct input_dev *dev) static void adi_init_digital(struct gameport *gameport) { - int seq[] = { 4, -2, -3, 10, -6, -11, -7, -9, 11, 0 }; + static const int seq[] = { 4, -2, -3, 10, -6, -11, -7, -9, 11, 0 }; int i; for (i = 0; seq[i]; i++) { diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index f8e34ef643c7..d86e59515b9c 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -1764,10 +1764,12 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id struct usb_endpoint_descriptor *ep = &intf->cur_altsetting->endpoint[i].desc; - if (usb_endpoint_dir_in(ep)) - ep_irq_in = ep; - else - ep_irq_out = ep; + if (usb_endpoint_xfer_int(ep)) { + if (usb_endpoint_dir_in(ep)) + ep_irq_in = ep; + else + ep_irq_out = ep; + } } if (!ep_irq_in || !ep_irq_out) { diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index f47e836eaa0f..9f082a388388 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -581,6 +581,18 @@ config INPUT_PWM_BEEPER To compile this driver as a module, choose M here: the module will be called pwm-beeper. +config INPUT_PWM_VIBRA + tristate "PWM vibrator support" + depends on PWM + select INPUT_FF_MEMLESS + help + Say Y here to get support for PWM based vibrator devices. + + If unsure, say N. + + To compile this driver as a module, choose M here: the module will be + called pwm-vibra. + config INPUT_RK805_PWRKEY tristate "Rockchip RK805 PMIC power key support" depends on MFD_RK808 diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 1072e0760c19..03fd4262ada9 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_INPUT_PM8XXX_VIBRATOR) += pm8xxx-vibrator.o obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o obj-$(CONFIG_INPUT_POWERMATE) += powermate.o obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o +obj-$(CONFIG_INPUT_PWM_VIBRA) += pwm-vibra.o obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o obj-$(CONFIG_INPUT_REGULATOR_HAPTIC) += regulator-haptic.o obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c new file mode 100644 index 000000000000..55da191ae550 --- /dev/null +++ b/drivers/input/misc/pwm-vibra.c @@ -0,0 +1,267 @@ +/* + * PWM vibrator driver + * + * Copyright (C) 2017 Collabora Ltd. + * + * Based on previous work from: + * Copyright (C) 2012 Dmitry Torokhov <dmitry.torokhov@gmail.com> + * + * Based on PWM beeper driver: + * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/input.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/pwm.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> + +struct pwm_vibrator { + struct input_dev *input; + struct pwm_device *pwm; + struct pwm_device *pwm_dir; + struct regulator *vcc; + + struct work_struct play_work; + u16 level; + u32 direction_duty_cycle; +}; + +static int pwm_vibrator_start(struct pwm_vibrator *vibrator) +{ + struct device *pdev = vibrator->input->dev.parent; + struct pwm_state state; + int err; + + err = regulator_enable(vibrator->vcc); + if (err) { + dev_err(pdev, "failed to enable regulator: %d", err); + return err; + } + + pwm_get_state(vibrator->pwm, &state); + pwm_set_relative_duty_cycle(&state, vibrator->level, 0xffff); + state.enabled = true; + + err = pwm_apply_state(vibrator->pwm, &state); + if (err) { + dev_err(pdev, "failed to apply pwm state: %d", err); + return err; + } + + if (vibrator->pwm_dir) { + pwm_get_state(vibrator->pwm_dir, &state); + state.duty_cycle = vibrator->direction_duty_cycle; + state.enabled = true; + + err = pwm_apply_state(vibrator->pwm_dir, &state); + if (err) { + dev_err(pdev, "failed to apply dir-pwm state: %d", err); + pwm_disable(vibrator->pwm); + return err; + } + } + + return 0; +} + +static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) +{ + regulator_disable(vibrator->vcc); + + if (vibrator->pwm_dir) + pwm_disable(vibrator->pwm_dir); + pwm_disable(vibrator->pwm); +} + +static void pwm_vibrator_play_work(struct work_struct *work) +{ + struct pwm_vibrator *vibrator = container_of(work, + struct pwm_vibrator, play_work); + + if (vibrator->level) + pwm_vibrator_start(vibrator); + else + pwm_vibrator_stop(vibrator); +} + +static int pwm_vibrator_play_effect(struct input_dev *dev, void *data, + struct ff_effect *effect) +{ + struct pwm_vibrator *vibrator = input_get_drvdata(dev); + + vibrator->level = effect->u.rumble.strong_magnitude; + if (!vibrator->level) + vibrator->level = effect->u.rumble.weak_magnitude; + + schedule_work(&vibrator->play_work); + + return 0; +} + +static void pwm_vibrator_close(struct input_dev *input) +{ + struct pwm_vibrator *vibrator = input_get_drvdata(input); + + cancel_work_sync(&vibrator->play_work); + pwm_vibrator_stop(vibrator); +} + +static int pwm_vibrator_probe(struct platform_device *pdev) +{ + struct pwm_vibrator *vibrator; + struct pwm_state state; + int err; + + vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL); + if (!vibrator) + return -ENOMEM; + + vibrator->input = devm_input_allocate_device(&pdev->dev); + if (!vibrator->input) + return -ENOMEM; + + vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc"); + err = PTR_ERR_OR_ZERO(vibrator->vcc); + if (err) { + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to request regulator: %d", + err); + return err; + } + + vibrator->pwm = devm_pwm_get(&pdev->dev, "enable"); + err = PTR_ERR_OR_ZERO(vibrator->pwm); + if (err) { + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to request main pwm: %d", + err); + return err; + } + + INIT_WORK(&vibrator->play_work, pwm_vibrator_play_work); + + /* Sync up PWM state and ensure it is off. */ + pwm_init_state(vibrator->pwm, &state); + state.enabled = false; + err = pwm_apply_state(vibrator->pwm, &state); + if (err) { + dev_err(&pdev->dev, "failed to apply initial PWM state: %d", + err); + return err; + } + + vibrator->pwm_dir = devm_pwm_get(&pdev->dev, "direction"); + err = PTR_ERR_OR_ZERO(vibrator->pwm_dir); + switch (err) { + case 0: + /* Sync up PWM state and ensure it is off. */ + pwm_init_state(vibrator->pwm_dir, &state); + state.enabled = false; + err = pwm_apply_state(vibrator->pwm_dir, &state); + if (err) { + dev_err(&pdev->dev, "failed to apply initial PWM state: %d", + err); + return err; + } + + vibrator->direction_duty_cycle = + pwm_get_period(vibrator->pwm_dir) / 2; + device_property_read_u32(&pdev->dev, "direction-duty-cycle-ns", + &vibrator->direction_duty_cycle); + break; + + case -ENODATA: + /* Direction PWM is optional */ + vibrator->pwm_dir = NULL; + break; + + default: + dev_err(&pdev->dev, "Failed to request direction pwm: %d", err); + /* Fall through */ + + case -EPROBE_DEFER: + return err; + } + + vibrator->input->name = "pwm-vibrator"; + vibrator->input->id.bustype = BUS_HOST; + vibrator->input->dev.parent = &pdev->dev; + vibrator->input->close = pwm_vibrator_close; + + input_set_drvdata(vibrator->input, vibrator); + input_set_capability(vibrator->input, EV_FF, FF_RUMBLE); + + err = input_ff_create_memless(vibrator->input, NULL, + pwm_vibrator_play_effect); + if (err) { + dev_err(&pdev->dev, "Couldn't create FF dev: %d", err); + return err; + } + + err = input_register_device(vibrator->input); + if (err) { + dev_err(&pdev->dev, "Couldn't register input dev: %d", err); + return err; + } + + platform_set_drvdata(pdev, vibrator); + + return 0; +} + +static int __maybe_unused pwm_vibrator_suspend(struct device *dev) +{ + struct pwm_vibrator *vibrator = dev_get_drvdata(dev); + + cancel_work_sync(&vibrator->play_work); + if (vibrator->level) + pwm_vibrator_stop(vibrator); + + return 0; +} + +static int __maybe_unused pwm_vibrator_resume(struct device *dev) +{ + struct pwm_vibrator *vibrator = dev_get_drvdata(dev); + + if (vibrator->level) + pwm_vibrator_start(vibrator); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(pwm_vibrator_pm_ops, + pwm_vibrator_suspend, pwm_vibrator_resume); + +#ifdef CONFIG_OF +static const struct of_device_id pwm_vibra_dt_match_table[] = { + { .compatible = "pwm-vibrator" }, + {}, +}; +MODULE_DEVICE_TABLE(of, pwm_vibra_dt_match_table); +#endif + +static struct platform_driver pwm_vibrator_driver = { + .probe = pwm_vibrator_probe, + .driver = { + .name = "pwm-vibrator", + .pm = &pwm_vibrator_pm_ops, + .of_match_table = of_match_ptr(pwm_vibra_dt_match_table), + }, +}; +module_platform_driver(pwm_vibrator_driver); + +MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>"); +MODULE_DESCRIPTION("PWM vibrator driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pwm-vibrator"); diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 022be0e22eba..443151de90c6 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -98,14 +98,15 @@ static int uinput_request_reserve_slot(struct uinput_device *udev, uinput_request_alloc_id(udev, request)); } -static void uinput_request_done(struct uinput_device *udev, - struct uinput_request *request) +static void uinput_request_release_slot(struct uinput_device *udev, + unsigned int id) { /* Mark slot as available */ - udev->requests[request->id] = NULL; - wake_up(&udev->requests_waitq); + spin_lock(&udev->requests_lock); + udev->requests[id] = NULL; + spin_unlock(&udev->requests_lock); - complete(&request->done); + wake_up(&udev->requests_waitq); } static int uinput_request_send(struct uinput_device *udev, @@ -138,20 +139,22 @@ static int uinput_request_send(struct uinput_device *udev, static int uinput_request_submit(struct uinput_device *udev, struct uinput_request *request) { - int error; + int retval; - error = uinput_request_reserve_slot(udev, request); - if (error) - return error; + retval = uinput_request_reserve_slot(udev, request); + if (retval) + return retval; - error = uinput_request_send(udev, request); - if (error) { - uinput_request_done(udev, request); - return error; - } + retval = uinput_request_send(udev, request); + if (retval) + goto out; wait_for_completion(&request->done); - return request->retval; + retval = request->retval; + + out: + uinput_request_release_slot(udev, request->id); + return retval; } /* @@ -169,7 +172,7 @@ static void uinput_flush_requests(struct uinput_device *udev) request = udev->requests[i]; if (request) { request->retval = -ENODEV; - uinput_request_done(udev, request); + complete(&request->done); } } @@ -230,6 +233,18 @@ static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id) return uinput_request_submit(udev, &request); } +static int uinput_dev_flush(struct input_dev *dev, struct file *file) +{ + /* + * If we are called with file == NULL that means we are tearing + * down the device, and therefore we can not handle FF erase + * requests: either we are handling UI_DEV_DESTROY (and holding + * the udev->mutex), or the file descriptor is closed and there is + * nobody on the other side anymore. + */ + return file ? input_ff_flush(dev, file) : 0; +} + static void uinput_destroy_device(struct uinput_device *udev) { const char *name, *phys; @@ -297,6 +312,12 @@ static int uinput_create_device(struct uinput_device *udev) dev->ff->playback = uinput_dev_playback; dev->ff->set_gain = uinput_dev_set_gain; dev->ff->set_autocenter = uinput_dev_set_autocenter; + /* + * The standard input_ff_flush() implementation does + * not quite work for uinput as we can't reasonably + * handle FF requests during device teardown. + */ + dev->flush = uinput_dev_flush; } error = input_register_device(udev->dev); @@ -939,7 +960,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd, } req->retval = ff_up.retval; - uinput_request_done(udev, req); + complete(&req->done); goto out; case UI_END_FF_ERASE: @@ -955,7 +976,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd, } req->retval = ff_erase.retval; - uinput_request_done(udev, req); + complete(&req->done); goto out; } diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 15b1330606c1..e19eb60b3d2f 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -598,7 +598,7 @@ static int elan_i2c_write_fw_block(struct i2c_client *client, } /* Wait for F/W to update one page ROM data. */ - msleep(20); + msleep(35); error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val); if (error) { diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 6428d6f4d568..b84cd978fce2 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -700,7 +700,9 @@ static int elantech_debounce_check_v2(struct psmouse *psmouse) * When we encounter packet that matches this exactly, it means the * hardware is in debounce status. Just ignore the whole packet. */ - const u8 debounce_packet[] = { 0x84, 0xff, 0xff, 0x02, 0xff, 0xff }; + static const u8 debounce_packet[] = { + 0x84, 0xff, 0xff, 0x02, 0xff, 0xff + }; unsigned char *packet = psmouse->packet; return !memcmp(packet, debounce_packet, sizeof(debounce_packet)); @@ -741,7 +743,9 @@ static int elantech_packet_check_v2(struct psmouse *psmouse) static int elantech_packet_check_v3(struct psmouse *psmouse) { struct elantech_data *etd = psmouse->private; - const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; + static const u8 debounce_packet[] = { + 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff + }; unsigned char *packet = psmouse->packet; /* diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index ae81e57e13b9..6cbbdc6e9687 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -840,6 +840,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { }, }, { + /* Gigabyte P57 - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P57"), + }, + }, + { /* Schenker XMG C504 - Elantech touchpad */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "XMG"), diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index f872817e81e4..5bf63f76ddda 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -593,7 +593,7 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata) tsdata->gain); edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, tsdata->offset); - if (reg_addr->reg_report_rate) + if (reg_addr->reg_report_rate != NO_REGISTER) edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate, tsdata->report_rate); @@ -874,6 +874,7 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata) case M09: reg_addr->reg_threshold = M09_REGISTER_THRESHOLD; + reg_addr->reg_report_rate = NO_REGISTER; reg_addr->reg_gain = M09_REGISTER_GAIN; reg_addr->reg_offset = M09_REGISTER_OFFSET; reg_addr->reg_num_x = M09_REGISTER_NUM_X; diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 240b16f3ee97..32d2762448aa 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -267,6 +267,12 @@ static void goodix_process_events(struct goodix_ts_data *ts) if (touch_num < 0) return; + /* + * Bit 4 of the first byte reports the status of the capacitive + * Windows/Home button. + */ + input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4)); + for (i = 0; i < touch_num; i++) goodix_ts_report_touch(ts, &point_data[1 + GOODIX_CONTACT_SIZE * i]); @@ -612,6 +618,9 @@ static int goodix_request_input_dev(struct goodix_ts_data *ts) ts->input_dev->id.product = ts->id; ts->input_dev->id.version = ts->version; + /* Capacitive Windows/Home button on some devices */ + input_set_capability(ts->input_dev, EV_KEY, KEY_LEFTMETA); + error = input_register_device(ts->input_dev); if (error) { dev_err(&ts->client->dev, diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c index 92e2243fb77d..8fd909285877 100644 --- a/drivers/input/touchscreen/htcpen.c +++ b/drivers/input/touchscreen/htcpen.c @@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = { } }; -static struct dmi_system_id htcshift_dmi_table[] __initdata = { +static const struct dmi_system_id htcshift_dmi_table[] __initconst = { { .ident = "Shift", .matches = { diff --git a/drivers/input/touchscreen/surface3_spi.c b/drivers/input/touchscreen/surface3_spi.c index e12fb9b63f31..5db0f1c4ef38 100644 --- a/drivers/input/touchscreen/surface3_spi.c +++ b/drivers/input/touchscreen/surface3_spi.c @@ -173,7 +173,7 @@ static void surface3_spi_process_pen(struct surface3_ts_data *ts_data, u8 *data) static void surface3_spi_process(struct surface3_ts_data *ts_data) { - const char header[] = { + static const char header[] = { 0xff, 0xff, 0xff, 0xff, 0xa5, 0x5a, 0xe7, 0x7e, 0x01 }; u8 *data = ts_data->rd_buf; diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c index c1e23cfc6155..1a86cbd9326f 100644 --- a/drivers/input/touchscreen/ucb1400_ts.c +++ b/drivers/input/touchscreen/ucb1400_ts.c @@ -414,7 +414,7 @@ static int __maybe_unused ucb1400_ts_suspend(struct device *dev) mutex_lock(&idev->mutex); if (idev->users) - ucb1400_ts_start(ucb); + ucb1400_ts_stop(ucb); mutex_unlock(&idev->mutex); return 0; @@ -428,7 +428,7 @@ static int __maybe_unused ucb1400_ts_resume(struct device *dev) mutex_lock(&idev->mutex); if (idev->users) - ucb1400_ts_stop(ucb); + ucb1400_ts_start(ucb); mutex_unlock(&idev->mutex); return 0; diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 49bd2ab8c507..f3a21343e636 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -278,7 +278,7 @@ config EXYNOS_IOMMU_DEBUG config IPMMU_VMSA bool "Renesas VMSA-compatible IPMMU" depends on ARM || IOMMU_DMA - depends on ARCH_RENESAS || COMPILE_TEST + depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64) select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU @@ -373,7 +373,8 @@ config MTK_IOMMU_V1 config QCOM_IOMMU # Note: iommu drivers cannot (yet?) be built as modules bool "Qualcomm IOMMU Support" - depends on ARCH_QCOM || COMPILE_TEST + depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64) + depends on HAS_DMA select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 382de42b8359..6fe2d0346073 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -874,7 +874,7 @@ static bool copy_device_table(void) hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); entry = (((u64) hi) << 32) + lo; if (last_entry && last_entry != entry) { - pr_err("IOMMU:%d should use the same dev table as others!/n", + pr_err("IOMMU:%d should use the same dev table as others!\n", iommu->index); return false; } @@ -882,7 +882,7 @@ static bool copy_device_table(void) old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; if (old_devtb_size != dev_table_size) { - pr_err("The device table size of IOMMU:%d is not expected!/n", + pr_err("The device table size of IOMMU:%d is not expected!\n", iommu->index); return false; } @@ -890,7 +890,7 @@ static bool copy_device_table(void) old_devtb_phys = entry & PAGE_MASK; if (old_devtb_phys >= 0x100000000ULL) { - pr_err("The address of old device table is above 4G, not trustworthy!/n"); + pr_err("The address of old device table is above 4G, not trustworthy!\n"); return false; } old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); @@ -901,7 +901,7 @@ static bool copy_device_table(void) old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, get_order(dev_table_size)); if (old_dev_tbl_cpy == NULL) { - pr_err("Failed to allocate memory for copying old device table!/n"); + pr_err("Failed to allocate memory for copying old device table!\n"); return false; } diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index ca5ebaeafd6a..57c920c1372d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -497,7 +497,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) #define dmar_parse_one_rhsa dmar_res_noop #endif -static void __init +static void dmar_table_print_dmar_entry(struct acpi_dmar_header *header) { struct acpi_dmar_hardware_unit *drhd; diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index d665d0dc16e8..6961fc393f0b 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -245,7 +245,7 @@ static void __arm_v7s_free_table(void *table, int lvl, static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, struct io_pgtable_cfg *cfg) { - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) + if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) return; dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index bd515be5b380..16d33ac19db0 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -371,7 +371,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, int ret; spin_lock_irqsave(&dom->pgtlock, flags); - ret = dom->iop->map(dom->iop, iova, paddr, size, prot); + ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32), + size, prot); spin_unlock_irqrestore(&dom->pgtlock, flags); return ret; diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index e60e3dba85a0..50947ebb6d17 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -157,10 +157,7 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) err = of_iommu_xlate(info->dev, &iommu_spec); of_node_put(iommu_spec.np); - if (err) - return err; - - return info->np == pdev->bus->dev.of_node; + return err; } const struct iommu_ops *of_iommu_configure(struct device *dev, diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 519149ec9053..b5df99c6f680 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1042,7 +1042,7 @@ static int get_cpu_number(struct device_node *dn) { const __be32 *cell; u64 hwid; - int i; + int cpu; cell = of_get_property(dn, "reg", NULL); if (!cell) @@ -1056,9 +1056,9 @@ static int get_cpu_number(struct device_node *dn) if (hwid & ~MPIDR_HWID_BITMASK) return -1; - for (i = 0; i < num_possible_cpus(); i++) - if (cpu_logical_map(i) == hwid) - return i; + for_each_possible_cpu(cpu) + if (cpu_logical_map(cpu) == hwid) + return cpu; return -1; } diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 2370e6d9e603..cd0bcc3b7e33 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -173,7 +173,9 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map) { struct its_cmd_info info = { .cmd_type = MAP_VLPI, - .map = map, + { + .map = map, + }, }; /* @@ -189,7 +191,9 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map) { struct its_cmd_info info = { .cmd_type = GET_VLPI, - .map = map, + { + .map = map, + }, }; return irq_set_vcpu_affinity(irq, &info); @@ -205,7 +209,9 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) { struct its_cmd_info info = { .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI, - .config = config, + { + .config = config, + }, }; return irq_set_vcpu_affinity(irq, &info); diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c index 14461cbfab2f..66f97fde13d8 100644 --- a/drivers/irqchip/irq-mips-cpu.c +++ b/drivers/irqchip/irq-mips-cpu.c @@ -101,7 +101,7 @@ static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu) local_irq_save(flags); /* We can only send IPIs to VPEs within the local core */ - WARN_ON(cpu_data[cpu].core != current_cpu_data.core); + WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu)); vpflags = dvpe(); settc(cpu_vpe_id(&cpu_data[cpu])); diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index b3a60da088db..c90976d7e53c 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -12,27 +12,38 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> -#include <linux/irqchip/mips-gic.h> #include <linux/of_address.h> +#include <linux/percpu.h> #include <linux/sched.h> #include <linux/smp.h> -#include <asm/mips-cm.h> +#include <asm/mips-cps.h> #include <asm/setup.h> #include <asm/traps.h> #include <dt-bindings/interrupt-controller/mips-gic.h> -unsigned int gic_present; +#define GIC_MAX_INTRS 256 +#define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS) -struct gic_pcpu_mask { - DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS); -}; +/* Add 2 to convert GIC CPU pin to core interrupt */ +#define GIC_CPU_PIN_OFFSET 2 + +/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ +#define GIC_PIN_TO_VEC_OFFSET 1 + +/* Convert between local/shared IRQ number and GIC HW IRQ number. */ +#define GIC_LOCAL_HWIRQ_BASE 0 +#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) +#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS +#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) + +void __iomem *mips_gic_base; -static unsigned long __gic_base_addr; +DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); -static void __iomem *gic_base; -static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; static DEFINE_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; static struct irq_domain *gic_ipi_domain; @@ -44,202 +55,13 @@ static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); -static void __gic_irq_dispatch(void); - -static inline u32 gic_read32(unsigned int reg) -{ - return __raw_readl(gic_base + reg); -} - -static inline u64 gic_read64(unsigned int reg) -{ - return __raw_readq(gic_base + reg); -} - -static inline unsigned long gic_read(unsigned int reg) -{ - if (!mips_cm_is64) - return gic_read32(reg); - else - return gic_read64(reg); -} - -static inline void gic_write32(unsigned int reg, u32 val) -{ - return __raw_writel(val, gic_base + reg); -} - -static inline void gic_write64(unsigned int reg, u64 val) -{ - return __raw_writeq(val, gic_base + reg); -} - -static inline void gic_write(unsigned int reg, unsigned long val) -{ - if (!mips_cm_is64) - return gic_write32(reg, (u32)val); - else - return gic_write64(reg, (u64)val); -} - -static inline void gic_update_bits(unsigned int reg, unsigned long mask, - unsigned long val) -{ - unsigned long regval; - - regval = gic_read(reg); - regval &= ~mask; - regval |= val; - gic_write(reg, regval); -} - -static inline void gic_reset_mask(unsigned int intr) -{ - gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr), - 1ul << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_mask(unsigned int intr) -{ - gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr), - 1ul << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_polarity(unsigned int intr, unsigned int pol) -{ - gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) + - GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr), - (unsigned long)pol << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_trigger(unsigned int intr, unsigned int trig) -{ - gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) + - GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr), - (unsigned long)trig << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual) -{ - gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr), - 1ul << GIC_INTR_BIT(intr), - (unsigned long)dual << GIC_INTR_BIT(intr)); -} - -static inline void gic_map_to_pin(unsigned int intr, unsigned int pin) -{ - gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) + - GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin); -} - -static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe) -{ - gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) + - GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe), - GIC_SH_MAP_TO_VPE_REG_BIT(vpe)); -} - -#ifdef CONFIG_CLKSRC_MIPS_GIC -u64 notrace gic_read_count(void) -{ - unsigned int hi, hi2, lo; - - if (mips_cm_is64) - return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER)); - - do { - hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); - lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00)); - hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); - } while (hi2 != hi); - - return (((u64) hi) << 32) + lo; -} - -unsigned int gic_get_count_width(void) +static void gic_clear_pcpu_masks(unsigned int intr) { - unsigned int bits, config; - - config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >> - GIC_SH_CONFIG_COUNTBITS_SHF); - - return bits; -} - -void notrace gic_write_compare(u64 cnt) -{ - if (mips_cm_is64) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); - } else { - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), - (int)(cnt >> 32)); - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), - (int)(cnt & 0xffffffff)); - } -} - -void notrace gic_write_cpu_compare(u64 cnt, int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu)); - - if (mips_cm_is64) { - gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt); - } else { - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), - (int)(cnt >> 32)); - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), - (int)(cnt & 0xffffffff)); - } - - local_irq_restore(flags); -} - -u64 gic_read_compare(void) -{ - unsigned int hi, lo; - - if (mips_cm_is64) - return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE)); - - hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI)); - lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO)); - - return (((u64) hi) << 32) + lo; -} - -void gic_start_count(void) -{ - u32 gicconfig; - - /* Start the counter */ - gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF); - gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); -} - -void gic_stop_count(void) -{ - u32 gicconfig; - - /* Stop the counter */ - gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF; - gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); -} - -#endif - -unsigned gic_read_local_vp_id(void) -{ - unsigned long ident; + unsigned int i; - ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT)); - return ident & GIC_VP_IDENT_VCNUM_MSK; + /* Clear the interrupt's bit in all pcpu_masks */ + for_each_possible_cpu(i) + clear_bit(intr, per_cpu_ptr(pcpu_masks, i)); } static bool gic_local_irq_is_routable(int intr) @@ -250,17 +72,17 @@ static bool gic_local_irq_is_routable(int intr) if (cpu_has_veic) return true; - vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL)); + vpe_ctl = read_gic_vl_ctl(); switch (intr) { case GIC_LOCAL_INT_TIMER: - return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE; case GIC_LOCAL_INT_PERFCTR: - return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE; case GIC_LOCAL_INT_FDC: - return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE; case GIC_LOCAL_INT_SWINT0: case GIC_LOCAL_INT_SWINT1: - return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE; default: return true; } @@ -272,15 +94,14 @@ static void gic_bind_eic_interrupt(int irq, int set) irq -= GIC_PIN_TO_VEC_OFFSET; /* Set irq to use shadow set */ - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) + - GIC_VPE_EIC_SS(irq), set); + write_gic_vl_eic_shadow_set(irq, set); } static void gic_send_ipi(struct irq_data *d, unsigned int cpu) { irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); - gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq)); + write_gic_wedge(GIC_WEDGE_RW | hwirq); } int gic_get_c0_compare_int(void) @@ -316,47 +137,22 @@ int gic_get_c0_fdc_int(void) GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); } -int gic_get_usm_range(struct resource *gic_usm_res) -{ - if (!gic_present) - return -1; - - gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS; - gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1); - - return 0; -} - static void gic_handle_shared_int(bool chained) { - unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4; + unsigned int intr, virq; unsigned long *pcpu_mask; - unsigned long pending_reg, intrmask_reg; DECLARE_BITMAP(pending, GIC_MAX_INTRS); - DECLARE_BITMAP(intrmask, GIC_MAX_INTRS); /* Get per-cpu bitmaps */ - pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask; - - pending_reg = GIC_REG(SHARED, GIC_SH_PEND); - intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK); - - for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) { - pending[i] = gic_read(pending_reg); - intrmask[i] = gic_read(intrmask_reg); - pending_reg += gic_reg_step; - intrmask_reg += gic_reg_step; + pcpu_mask = this_cpu_ptr(pcpu_masks); - if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64) - continue; - - pending[i] |= (u64)gic_read(pending_reg) << 32; - intrmask[i] |= (u64)gic_read(intrmask_reg) << 32; - pending_reg += gic_reg_step; - intrmask_reg += gic_reg_step; - } + if (mips_cm_is64) + __ioread64_copy(pending, addr_gic_pend(), + DIV_ROUND_UP(gic_shared_intrs, 64)); + else + __ioread32_copy(pending, addr_gic_pend(), + DIV_ROUND_UP(gic_shared_intrs, 32)); - bitmap_and(pending, pending, intrmask, gic_shared_intrs); bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); for_each_set_bit(intr, pending, gic_shared_intrs) { @@ -371,19 +167,29 @@ static void gic_handle_shared_int(bool chained) static void gic_mask_irq(struct irq_data *d) { - gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); + unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); + + write_gic_rmask(intr); + gic_clear_pcpu_masks(intr); } static void gic_unmask_irq(struct irq_data *d) { - gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); + unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); + unsigned int cpu; + + write_gic_smask(intr); + + gic_clear_pcpu_masks(intr); + cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); + set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); } static void gic_ack_irq(struct irq_data *d) { unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); - gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq)); + write_gic_wedge(irq); } static int gic_set_type(struct irq_data *d, unsigned int type) @@ -395,34 +201,34 @@ static int gic_set_type(struct irq_data *d, unsigned int type) spin_lock_irqsave(&gic_lock, flags); switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_FALLING: - gic_set_polarity(irq, GIC_POL_NEG); - gic_set_trigger(irq, GIC_TRIG_EDGE); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_FALLING_EDGE); + change_gic_trig(irq, GIC_TRIG_EDGE); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = true; break; case IRQ_TYPE_EDGE_RISING: - gic_set_polarity(irq, GIC_POL_POS); - gic_set_trigger(irq, GIC_TRIG_EDGE); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_RISING_EDGE); + change_gic_trig(irq, GIC_TRIG_EDGE); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = true; break; case IRQ_TYPE_EDGE_BOTH: /* polarity is irrelevant in this case */ - gic_set_trigger(irq, GIC_TRIG_EDGE); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE); + change_gic_trig(irq, GIC_TRIG_EDGE); + change_gic_dual(irq, GIC_DUAL_DUAL); is_edge = true; break; case IRQ_TYPE_LEVEL_LOW: - gic_set_polarity(irq, GIC_POL_NEG); - gic_set_trigger(irq, GIC_TRIG_LEVEL); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_ACTIVE_LOW); + change_gic_trig(irq, GIC_TRIG_LEVEL); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = false; break; case IRQ_TYPE_LEVEL_HIGH: default: - gic_set_polarity(irq, GIC_POL_POS); - gic_set_trigger(irq, GIC_TRIG_LEVEL); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_ACTIVE_HIGH); + change_gic_trig(irq, GIC_TRIG_LEVEL); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = false; break; } @@ -443,32 +249,28 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); - cpumask_t tmp = CPU_MASK_NONE; - unsigned long flags; - int i, cpu; + unsigned long flags; + unsigned int cpu; - cpumask_and(&tmp, cpumask, cpu_online_mask); - if (cpumask_empty(&tmp)) + cpu = cpumask_first_and(cpumask, cpu_online_mask); + if (cpu >= NR_CPUS) return -EINVAL; - cpu = cpumask_first(&tmp); - /* Assumption : cpumask refers to a single CPU */ spin_lock_irqsave(&gic_lock, flags); /* Re-route this IRQ */ - gic_map_to_vpe(irq, mips_cm_vp_id(cpu)); + write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); /* Update the pcpu_masks */ - for (i = 0; i < min(gic_vpes, NR_CPUS); i++) - clear_bit(irq, pcpu_masks[i].pcpu_mask); - set_bit(irq, pcpu_masks[cpu].pcpu_mask); + gic_clear_pcpu_masks(irq); + if (read_gic_mask(irq)) + set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); - cpumask_copy(irq_data_get_affinity_mask(d), cpumask); irq_data_update_effective_affinity(d, cpumask_of(cpu)); spin_unlock_irqrestore(&gic_lock, flags); - return IRQ_SET_MASK_OK_NOCOPY; + return IRQ_SET_MASK_OK; } #endif @@ -499,8 +301,8 @@ static void gic_handle_local_int(bool chained) unsigned long pending, masked; unsigned int intr, virq; - pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); - masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK)); + pending = read_gic_vl_pend(); + masked = read_gic_vl_mask(); bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); @@ -518,14 +320,14 @@ static void gic_mask_local_irq(struct irq_data *d) { int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr); + write_gic_vl_rmask(BIT(intr)); } static void gic_unmask_local_irq(struct irq_data *d) { int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr); + write_gic_vl_smask(BIT(intr)); } static struct irq_chip gic_local_irq_controller = { @@ -542,9 +344,8 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d) spin_lock_irqsave(&gic_lock, flags); for (i = 0; i < gic_vpes; i++) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr); + write_gic_vl_other(mips_cm_vp_id(i)); + write_gic_vo_rmask(BIT(intr)); } spin_unlock_irqrestore(&gic_lock, flags); } @@ -557,9 +358,8 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d) spin_lock_irqsave(&gic_lock, flags); for (i = 0; i < gic_vpes; i++) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr); + write_gic_vl_other(mips_cm_vp_id(i)); + write_gic_vo_smask(BIT(intr)); } spin_unlock_irqrestore(&gic_lock, flags); } @@ -582,103 +382,54 @@ static void gic_irq_dispatch(struct irq_desc *desc) gic_handle_shared_int(true); } -static void __init gic_basic_init(void) -{ - unsigned int i; - - board_bind_eic_interrupt = &gic_bind_eic_interrupt; - - /* Setup defaults */ - for (i = 0; i < gic_shared_intrs; i++) { - gic_set_polarity(i, GIC_POL_POS); - gic_set_trigger(i, GIC_TRIG_LEVEL); - gic_reset_mask(i); - } - - for (i = 0; i < gic_vpes; i++) { - unsigned int j; - - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { - if (!gic_local_irq_is_routable(j)) - continue; - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j); - } - } -} - static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { int intr = GIC_HWIRQ_TO_LOCAL(hw); - int ret = 0; int i; unsigned long flags; + u32 val; if (!gic_local_irq_is_routable(intr)) return -EPERM; + if (intr > GIC_LOCAL_INT_FDC) { + pr_err("Invalid local IRQ %d\n", intr); + return -EINVAL; + } + + if (intr == GIC_LOCAL_INT_TIMER) { + /* CONFIG_MIPS_CMP workaround (see __gic_init) */ + val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; + } else { + val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; + } + spin_lock_irqsave(&gic_lock, flags); for (i = 0; i < gic_vpes; i++) { - u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; - - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - - switch (intr) { - case GIC_LOCAL_INT_WD: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val); - break; - case GIC_LOCAL_INT_COMPARE: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), - val); - break; - case GIC_LOCAL_INT_TIMER: - /* CONFIG_MIPS_CMP workaround (see __gic_init) */ - val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin; - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), - val); - break; - case GIC_LOCAL_INT_PERFCTR: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), - val); - break; - case GIC_LOCAL_INT_SWINT0: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), - val); - break; - case GIC_LOCAL_INT_SWINT1: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), - val); - break; - case GIC_LOCAL_INT_FDC: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val); - break; - default: - pr_err("Invalid local IRQ %d\n", intr); - ret = -EINVAL; - break; - } + write_gic_vl_other(mips_cm_vp_id(i)); + write_gic_vo_map(intr, val); } spin_unlock_irqrestore(&gic_lock, flags); - return ret; + return 0; } static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, - irq_hw_number_t hw, unsigned int vpe) + irq_hw_number_t hw, unsigned int cpu) { int intr = GIC_HWIRQ_TO_SHARED(hw); + struct irq_data *data; unsigned long flags; - int i; + + data = irq_get_irq_data(virq); spin_lock_irqsave(&gic_lock, flags); - gic_map_to_pin(intr, gic_cpu_pin); - gic_map_to_vpe(intr, mips_cm_vp_id(vpe)); - for (i = 0; i < min(gic_vpes, NR_CPUS); i++) - clear_bit(intr, pcpu_masks[i].pcpu_mask); - set_bit(intr, pcpu_masks[vpe].pcpu_mask); + write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); + write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); + gic_clear_pcpu_masks(intr); + set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); + irq_data_update_effective_affinity(data, cpumask_of(cpu)); spin_unlock_irqrestore(&gic_lock, flags); return 0; @@ -885,34 +636,69 @@ static const struct irq_domain_ops gic_ipi_domain_ops = { .match = gic_ipi_domain_match, }; -static void __init __gic_init(unsigned long gic_base_addr, - unsigned long gic_addrspace_size, - unsigned int cpu_vec, unsigned int irqbase, - struct device_node *node) + +static int __init gic_of_init(struct device_node *node, + struct device_node *parent) { - unsigned int gicconfig, cpu; - unsigned int v[2]; + unsigned int cpu_vec, i, j, gicconfig, cpu, v[2]; + unsigned long reserved; + phys_addr_t gic_base; + struct resource res; + size_t gic_len; - __gic_base_addr = gic_base_addr; + /* Find the first available CPU vector. */ + i = 0; + reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0); + while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", + i++, &cpu_vec)) + reserved |= BIT(cpu_vec); - gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size); + cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); + if (cpu_vec == hweight_long(ST0_IM)) { + pr_err("No CPU vectors available for GIC\n"); + return -ENODEV; + } - gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> - GIC_SH_CONFIG_NUMINTRS_SHF; - gic_shared_intrs = ((gic_shared_intrs + 1) * 8); + if (of_address_to_resource(node, 0, &res)) { + /* + * Probe the CM for the GIC base address if not specified + * in the device-tree. + */ + if (mips_cm_present()) { + gic_base = read_gcr_gic_base() & + ~CM_GCR_GIC_BASE_GICEN; + gic_len = 0x20000; + } else { + pr_err("Failed to get GIC memory range\n"); + return -ENODEV; + } + } else { + gic_base = res.start; + gic_len = resource_size(&res); + } + + if (mips_cm_present()) { + write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN); + /* Ensure GIC region is enabled before trying to access it */ + __sync(); + } + + mips_gic_base = ioremap_nocache(gic_base, gic_len); - gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> - GIC_SH_CONFIG_NUMVPES_SHF; + gicconfig = read_gic_config(); + gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; + gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS); + gic_shared_intrs = (gic_shared_intrs + 1) * 8; + + gic_vpes = gicconfig & GIC_CONFIG_PVPS; + gic_vpes >>= __ffs(GIC_CONFIG_PVPS); gic_vpes = gic_vpes + 1; if (cpu_has_veic) { /* Set EIC mode for all VPEs */ for_each_present_cpu(cpu) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(cpu)); - gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL), - GIC_VPE_CTL_EIC_MODE_MSK); + write_gic_vl_other(mips_cm_vp_id(cpu)); + write_gic_vo_ctl(GIC_VX_CTL_EIC); } /* Always use vector 1 in EIC mode */ @@ -937,9 +723,7 @@ static void __init __gic_init(unsigned long gic_base_addr, */ if (IS_ENABLED(CONFIG_MIPS_CMP) && gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { - timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL, - GIC_VPE_TIMER_MAP)) & - GIC_MAP_MSK; + timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP; irq_set_chained_handler(MIPS_CPU_IRQ_BASE + GIC_CPU_PIN_OFFSET + timer_cpu_pin, @@ -950,17 +734,21 @@ static void __init __gic_init(unsigned long gic_base_addr, } gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + - gic_shared_intrs, irqbase, + gic_shared_intrs, 0, &gic_irq_domain_ops, NULL); - if (!gic_irq_domain) - panic("Failed to add GIC IRQ domain"); + if (!gic_irq_domain) { + pr_err("Failed to add GIC IRQ domain"); + return -ENXIO; + } gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, IRQ_DOMAIN_FLAG_IPI_PER_CPU, GIC_NUM_LOCAL_INTRS + gic_shared_intrs, node, &gic_ipi_domain_ops, NULL); - if (!gic_ipi_domain) - panic("Failed to add GIC IPI domain"); + if (!gic_ipi_domain) { + pr_err("Failed to add GIC IPI domain"); + return -ENXIO; + } irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); @@ -975,64 +763,25 @@ static void __init __gic_init(unsigned long gic_base_addr, } bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); - gic_basic_init(); -} - -void __init gic_init(unsigned long gic_base_addr, - unsigned long gic_addrspace_size, - unsigned int cpu_vec, unsigned int irqbase) -{ - __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL); -} -static int __init gic_of_init(struct device_node *node, - struct device_node *parent) -{ - struct resource res; - unsigned int cpu_vec, i = 0, reserved = 0; - phys_addr_t gic_base; - size_t gic_len; + board_bind_eic_interrupt = &gic_bind_eic_interrupt; - /* Find the first available CPU vector. */ - while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", - i++, &cpu_vec)) - reserved |= BIT(cpu_vec); - for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) { - if (!(reserved & BIT(cpu_vec))) - break; - } - if (cpu_vec == 8) { - pr_err("No CPU vectors available for GIC\n"); - return -ENODEV; + /* Setup defaults */ + for (i = 0; i < gic_shared_intrs; i++) { + change_gic_pol(i, GIC_POL_ACTIVE_HIGH); + change_gic_trig(i, GIC_TRIG_LEVEL); + write_gic_rmask(i); } - if (of_address_to_resource(node, 0, &res)) { - /* - * Probe the CM for the GIC base address if not specified - * in the device-tree. - */ - if (mips_cm_present()) { - gic_base = read_gcr_gic_base() & - ~CM_GCR_GIC_BASE_GICEN_MSK; - gic_len = 0x20000; - } else { - pr_err("Failed to get GIC memory range\n"); - return -ENODEV; + for (i = 0; i < gic_vpes; i++) { + write_gic_vl_other(mips_cm_vp_id(i)); + for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { + if (!gic_local_irq_is_routable(j)) + continue; + write_gic_vo_rmask(BIT(j)); } - } else { - gic_base = res.start; - gic_len = resource_size(&res); } - if (mips_cm_present()) { - write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); - /* Ensure GIC region is enabled before trying to access it */ - __sync(); - } - gic_present = true; - - __gic_init(gic_base, gic_len, cpu_vec, 0, node); - return 0; } IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 6c44609fd83a..cd2b3c69771a 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) isdn_net_local *lp; struct ippp_struct *is; int proto; - unsigned char protobuf[4]; is = file->private_data; @@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) if (!lp) printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n"); else { - /* - * Don't reset huptimer for - * LCP packets. (Echo requests). - */ - if (copy_from_user(protobuf, buf, 4)) - return -EFAULT; - proto = PPP_PROTOCOL(protobuf); - if (proto != PPP_LCP) - lp->huptimer = 0; + if (lp->isdn_device < 0 || lp->isdn_channel < 0) { + unsigned char protobuf[4]; + /* + * Don't reset huptimer for + * LCP packets. (Echo requests). + */ + if (copy_from_user(protobuf, buf, 4)) + return -EFAULT; + + proto = PPP_PROTOCOL(protobuf); + if (proto != PPP_LCP) + lp->huptimer = 0; - if (lp->isdn_device < 0 || lp->isdn_channel < 0) return 0; + } if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) && lp->dialstate == 0 && (lp->flags & ISDN_NET_CONNECTED)) { unsigned short hl; struct sk_buff *skb; + unsigned char *cpy_buf; /* * we need to reserve enough space in front of * sk_buff. old call to dev_alloc_skb only reserved @@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) return count; } skb_reserve(skb, hl); - if (copy_from_user(skb_put(skb, count), buf, count)) + cpy_buf = skb_put(skb, count); + if (copy_from_user(cpy_buf, buf, count)) { kfree_skb(skb); return -EFAULT; } + + /* + * Don't reset huptimer for + * LCP packets. (Echo requests). + */ + proto = PPP_PROTOCOL(cpy_buf); + if (proto != PPP_LCP) + lp->huptimer = 0; + if (is->debug & 0x40) { printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len); isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c index bbbbe0898233..9a257f969300 100644 --- a/drivers/leds/leds-as3645a.c +++ b/drivers/leds/leds-as3645a.c @@ -112,6 +112,10 @@ #define AS_PEAK_mA_TO_REG(a) \ ((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250) +/* LED numbers for Devicetree */ +#define AS_LED_FLASH 0 +#define AS_LED_INDICATOR 1 + enum as_mode { AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT, AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT, @@ -491,10 +495,29 @@ static int as3645a_parse_node(struct as3645a *flash, struct device_node *node) { struct as3645a_config *cfg = &flash->cfg; + struct device_node *child; const char *name; int rval; - flash->flash_node = of_get_child_by_name(node, "flash"); + for_each_child_of_node(node, child) { + u32 id = 0; + + of_property_read_u32(child, "reg", &id); + + switch (id) { + case AS_LED_FLASH: + flash->flash_node = of_node_get(child); + break; + case AS_LED_INDICATOR: + flash->indicator_node = of_node_get(child); + break; + default: + dev_warn(&flash->client->dev, + "unknown LED %u encountered, ignoring\n", id); + break; + } + } + if (!flash->flash_node) { dev_err(&flash->client->dev, "can't find flash node\n"); return -ENODEV; @@ -534,11 +557,10 @@ static int as3645a_parse_node(struct as3645a *flash, of_property_read_u32(flash->flash_node, "voltage-reference", &cfg->voltage_reference); - of_property_read_u32(flash->flash_node, "peak-current-limit", + of_property_read_u32(flash->flash_node, "ams,input-max-microamp", &cfg->peak); cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak); - flash->indicator_node = of_get_child_by_name(node, "indicator"); if (!flash->indicator_node) { dev_warn(&flash->client->dev, "can't find indicator node\n"); @@ -721,6 +743,7 @@ static int as3645a_remove(struct i2c_client *client) as3645a_set_control(flash, AS_MODE_EXT_TORCH, false); v4l2_flash_release(flash->vf); + v4l2_flash_release(flash->vfind); led_classdev_flash_unregister(&flash->fled); led_classdev_unregister(&flash->iled_cdev); diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c index 0f9ed1ea0e89..492789f56896 100644 --- a/drivers/leds/leds-clevo-mail.c +++ b/drivers/leds/leds-clevo-mail.c @@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id) * detected as working, but in reality it is not) as low as * possible. */ -static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = { +static const struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = { { .callback = clevo_mail_led_dmi_callback, .ident = "Clevo D410J", diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index 732eb86bc1a5..a9db8674cd02 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection"); * detected as working, but in reality it is not) as low as * possible. */ -static struct dmi_system_id nas_led_whitelist[] __initdata = { +static const struct dmi_system_id nas_led_whitelist[] __initconst = { { .callback = ss4200_led_dmi_callback, .ident = "Intel SS4200-E", diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 9601225e0ae9..d216a8f7bc22 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -64,6 +64,12 @@ #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1)) /* + * Align buffer writes to this boundary. + * Tests show that SSDs have the highest IOPS when using 4k writes. + */ +#define DM_BUFIO_WRITE_ALIGN 4096 + +/* * dm_buffer->list_mode */ #define LIST_CLEAN 0 @@ -149,6 +155,10 @@ struct dm_buffer { blk_status_t write_error; unsigned long state; unsigned long last_accessed; + unsigned dirty_start; + unsigned dirty_end; + unsigned write_start; + unsigned write_end; struct dm_bufio_client *c; struct list_head write_list; struct bio bio; @@ -560,7 +570,7 @@ static void dmio_complete(unsigned long error, void *context) } static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, - unsigned n_sectors, bio_end_io_t *end_io) + unsigned n_sectors, unsigned offset, bio_end_io_t *end_io) { int r; struct dm_io_request io_req = { @@ -578,10 +588,10 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, if (b->data_mode != DATA_MODE_VMALLOC) { io_req.mem.type = DM_IO_KMEM; - io_req.mem.ptr.addr = b->data; + io_req.mem.ptr.addr = (char *)b->data + offset; } else { io_req.mem.type = DM_IO_VMA; - io_req.mem.ptr.vma = b->data; + io_req.mem.ptr.vma = (char *)b->data + offset; } b->bio.bi_end_io = end_io; @@ -609,10 +619,10 @@ static void inline_endio(struct bio *bio) } static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, - unsigned n_sectors, bio_end_io_t *end_io) + unsigned n_sectors, unsigned offset, bio_end_io_t *end_io) { char *ptr; - int len; + unsigned len; bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); b->bio.bi_iter.bi_sector = sector; @@ -625,29 +635,20 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, b->bio.bi_private = end_io; bio_set_op_attrs(&b->bio, rw, 0); - /* - * We assume that if len >= PAGE_SIZE ptr is page-aligned. - * If len < PAGE_SIZE the buffer doesn't cross page boundary. - */ - ptr = b->data; + ptr = (char *)b->data + offset; len = n_sectors << SECTOR_SHIFT; - if (len >= PAGE_SIZE) - BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1)); - else - BUG_ON((unsigned long)ptr & (len - 1)); - do { - if (!bio_add_page(&b->bio, virt_to_page(ptr), - len < PAGE_SIZE ? len : PAGE_SIZE, + unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len); + if (!bio_add_page(&b->bio, virt_to_page(ptr), this_step, offset_in_page(ptr))) { BUG_ON(b->c->block_size <= PAGE_SIZE); - use_dmio(b, rw, sector, n_sectors, end_io); + use_dmio(b, rw, sector, n_sectors, offset, end_io); return; } - len -= PAGE_SIZE; - ptr += PAGE_SIZE; + len -= this_step; + ptr += this_step; } while (len > 0); submit_bio(&b->bio); @@ -657,18 +658,33 @@ static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io) { unsigned n_sectors; sector_t sector; - - if (rw == WRITE && b->c->write_callback) - b->c->write_callback(b); + unsigned offset, end; sector = (b->block << b->c->sectors_per_block_bits) + b->c->start; - n_sectors = 1 << b->c->sectors_per_block_bits; + + if (rw != WRITE) { + n_sectors = 1 << b->c->sectors_per_block_bits; + offset = 0; + } else { + if (b->c->write_callback) + b->c->write_callback(b); + offset = b->write_start; + end = b->write_end; + offset &= -DM_BUFIO_WRITE_ALIGN; + end += DM_BUFIO_WRITE_ALIGN - 1; + end &= -DM_BUFIO_WRITE_ALIGN; + if (unlikely(end > b->c->block_size)) + end = b->c->block_size; + + sector += offset >> SECTOR_SHIFT; + n_sectors = (end - offset) >> SECTOR_SHIFT; + } if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) && b->data_mode != DATA_MODE_VMALLOC) - use_inline_bio(b, rw, sector, n_sectors, end_io); + use_inline_bio(b, rw, sector, n_sectors, offset, end_io); else - use_dmio(b, rw, sector, n_sectors, end_io); + use_dmio(b, rw, sector, n_sectors, offset, end_io); } /*---------------------------------------------------------------- @@ -720,6 +736,9 @@ static void __write_dirty_buffer(struct dm_buffer *b, clear_bit(B_DIRTY, &b->state); wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); + b->write_start = b->dirty_start; + b->write_end = b->dirty_end; + if (!write_list) submit_io(b, WRITE, write_endio); else @@ -1221,19 +1240,37 @@ void dm_bufio_release(struct dm_buffer *b) } EXPORT_SYMBOL_GPL(dm_bufio_release); -void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) +void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, + unsigned start, unsigned end) { struct dm_bufio_client *c = b->c; + BUG_ON(start >= end); + BUG_ON(end > b->c->block_size); + dm_bufio_lock(c); BUG_ON(test_bit(B_READING, &b->state)); - if (!test_and_set_bit(B_DIRTY, &b->state)) + if (!test_and_set_bit(B_DIRTY, &b->state)) { + b->dirty_start = start; + b->dirty_end = end; __relink_lru(b, LIST_DIRTY); + } else { + if (start < b->dirty_start) + b->dirty_start = start; + if (end > b->dirty_end) + b->dirty_end = end; + } dm_bufio_unlock(c); } +EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty); + +void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) +{ + dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); +} EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) @@ -1398,6 +1435,8 @@ retry: wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); set_bit(B_DIRTY, &b->state); + b->dirty_start = 0; + b->dirty_end = c->block_size; __unlink_buffer(b); __link_buffer(b, new_block, LIST_DIRTY); } else { diff --git a/drivers/md/dm-bufio.h b/drivers/md/dm-bufio.h index b6d8f53ec15b..be732d3f8611 100644 --- a/drivers/md/dm-bufio.h +++ b/drivers/md/dm-bufio.h @@ -94,6 +94,15 @@ void dm_bufio_release(struct dm_buffer *b); void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); /* + * Mark a part of the buffer dirty. + * + * The specified part of the buffer is scheduled to be written. dm-bufio may + * write the specified part of the buffer or it may write a larger superset. + */ +void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, + unsigned start, unsigned end); + +/* * Initiate writing of dirty buffers, without waiting for completion. */ void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index dcac25c2be7a..8785134c9f1f 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2306,7 +2306,7 @@ static void init_features(struct cache_features *cf) static int parse_features(struct cache_args *ca, struct dm_arg_set *as, char **error) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 2, "Invalid number of cache feature arguments"}, }; @@ -2348,7 +2348,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as, static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, char **error) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "Invalid number of policy arguments"}, }; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 54aef8ed97db..a55ffd4f5933 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2529,7 +2529,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar { struct crypt_config *cc = ti->private; struct dm_arg_set as; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 6, "Invalid number of feature args"}, }; unsigned int opt_params, val; diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 7146c2d9762d..b82cb1ab1eaa 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -51,7 +51,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, unsigned argc; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 6, "Invalid number of feature args"}, {1, UINT_MAX, "Invalid corrupt bio byte"}, {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, @@ -178,7 +178,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, */ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, UINT_MAX, "Invalid up interval"}, {0, UINT_MAX, "Invalid down interval"}, }; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 27c0f223f8ea..096fe9b66c50 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -225,6 +225,8 @@ struct dm_integrity_c { struct alg_spec internal_hash_alg; struct alg_spec journal_crypt_alg; struct alg_spec journal_mac_alg; + + atomic64_t number_of_mismatches; }; struct dm_integrity_range { @@ -298,7 +300,7 @@ static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...) /* * DM Integrity profile, protection is performed layer above (dm-crypt) */ -static struct blk_integrity_profile dm_integrity_profile = { +static const struct blk_integrity_profile dm_integrity_profile = { .name = "DM-DIF-EXT-TAG", .generate_fn = NULL, .verify_fn = NULL, @@ -310,6 +312,8 @@ static void dm_integrity_dtr(struct dm_target *ti); static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) { + if (err == -EILSEQ) + atomic64_inc(&ic->number_of_mismatches); if (!cmpxchg(&ic->failed, 0, err)) DMERR("Error on %s: %d", msg, err); } @@ -770,13 +774,13 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi unsigned i; io_comp.ic = ic; - io_comp.comp = COMPLETION_INITIALIZER_ONSTACK(io_comp.comp); + init_completion(&io_comp.comp); if (commit_start + commit_sections <= ic->journal_sections) { io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (ic->journal_io) { crypt_comp_1.ic = ic; - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); + init_completion(&crypt_comp_1.comp); crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); wait_for_completion_io(&crypt_comp_1.comp); @@ -792,18 +796,18 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi to_end = ic->journal_sections - commit_start; if (ic->journal_io) { crypt_comp_1.ic = ic; - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); + init_completion(&crypt_comp_1.comp); crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); if (try_wait_for_completion(&crypt_comp_1.comp)) { rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); + reinit_completion(&crypt_comp_1.comp); crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); wait_for_completion_io(&crypt_comp_1.comp); } else { crypt_comp_2.ic = ic; - crypt_comp_2.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2.comp); + init_completion(&crypt_comp_2.comp); crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); wait_for_completion_io(&crypt_comp_1.comp); @@ -1041,7 +1045,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se memcpy(tag, dp, to_copy); } else if (op == TAG_WRITE) { memcpy(dp, tag, to_copy); - dm_bufio_mark_buffer_dirty(b); + dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy); } else { /* e.g.: op == TAG_CMP */ if (unlikely(memcmp(dp, tag, to_copy))) { @@ -1275,6 +1279,7 @@ again: DMERR("Checksum failed at sector 0x%llx", (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); r = -EILSEQ; + atomic64_inc(&ic->number_of_mismatches); } if (likely(checksums != checksums_onstack)) kfree(checksums); @@ -1676,7 +1681,7 @@ sleep: dio->in_flight = (atomic_t)ATOMIC_INIT(2); if (need_sync_io) { - read_comp = COMPLETION_INITIALIZER_ONSTACK(read_comp); + init_completion(&read_comp); dio->completion = &read_comp; } else dio->completion = NULL; @@ -1700,7 +1705,11 @@ sleep: if (need_sync_io) { wait_for_completion_io(&read_comp); - integrity_metadata(&dio->work); + if (likely(!bio->bi_status)) + integrity_metadata(&dio->work); + else + dec_in_flight(dio); + } else { INIT_WORK(&dio->work, integrity_metadata); queue_work(ic->metadata_wq, &dio->work); @@ -1834,7 +1843,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, comp.ic = ic; comp.in_flight = (atomic_t)ATOMIC_INIT(1); - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); + init_completion(&comp.comp); i = write_start; for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { @@ -2061,7 +2070,7 @@ static void replay_journal(struct dm_integrity_c *ic) if (ic->journal_io) { struct journal_completion crypt_comp; crypt_comp.ic = ic; - crypt_comp.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp.comp); + init_completion(&crypt_comp.comp); crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); wait_for_completion(&crypt_comp.comp); @@ -2233,7 +2242,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, switch (type) { case STATUSTYPE_INFO: - result[0] = '\0'; + DMEMIT("%llu", (unsigned long long)atomic64_read(&ic->number_of_mismatches)); break; case STATUSTYPE_TABLE: { @@ -2634,7 +2643,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) memset(iv, 0x00, ivsize); skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); + init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) wait_for_completion(&comp.comp); @@ -2691,7 +2700,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) sg_init_one(&sg, crypt_data, crypt_len); skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); + init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) wait_for_completion(&comp.comp); @@ -2778,7 +2787,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) int r; unsigned extra_args; struct dm_arg_set as; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 9, "Invalid number of feature args"}, }; unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; @@ -2806,6 +2815,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) bio_list_init(&ic->flush_bio_list); init_waitqueue_head(&ic->copy_to_journal_wait); init_completion(&ic->crypto_backoff); + atomic64_set(&ic->number_of_mismatches, 0); r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); if (r) { @@ -3202,7 +3212,7 @@ static void dm_integrity_dtr(struct dm_target *ti) static struct target_type integrity_target = { .name = "integrity", - .version = {1, 0, 0}, + .version = {1, 1, 0}, .module = THIS_MODULE, .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, .ctr = dm_integrity_ctr, diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index e06f0ef7d2ec..8756a6850431 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1629,7 +1629,7 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para *---------------------------------------------------------------*/ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) { - static struct { + static const struct { int cmd; int flags; ioctl_fn fn; diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 405eca206d67..d5f8eff7c11d 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -184,20 +184,6 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } -static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr, - size_t size) -{ - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff)) - return; - dax_flush(dax_dev, pgoff, addr, size); -} - static struct target_type linear_target = { .name = "linear", .version = {1, 4, 0}, @@ -212,7 +198,6 @@ static struct target_type linear_target = { .iterate_devices = linear_iterate_devices, .direct_access = linear_dax_direct_access, .dax_copy_from_iter = linear_dax_copy_from_iter, - .dax_flush = linear_dax_flush, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 534a254eb977..8b80a9ce9ea9 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -100,6 +100,7 @@ struct log_writes_c { struct dm_dev *logdev; u64 logged_entries; u32 sectorsize; + u32 sectorshift; atomic_t io_blocks; atomic_t pending_blocks; sector_t next_sector; @@ -128,6 +129,18 @@ struct per_bio_data { struct pending_block *block; }; +static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc, + sector_t sectors) +{ + return sectors >> (lc->sectorshift - SECTOR_SHIFT); +} + +static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc, + sector_t sectors) +{ + return sectors << (lc->sectorshift - SECTOR_SHIFT); +} + static void put_pending_block(struct log_writes_c *lc) { if (atomic_dec_and_test(&lc->pending_blocks)) { @@ -253,7 +266,7 @@ static int log_one_block(struct log_writes_c *lc, if (!block->vec_cnt) goto out; - sector++; + sector += dev_to_bio_sectors(lc, 1); atomic_inc(&lc->io_blocks); bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); @@ -354,10 +367,9 @@ static int log_writes_kthread(void *arg) goto next; sector = lc->next_sector; - if (block->flags & LOG_DISCARD_FLAG) - lc->next_sector++; - else - lc->next_sector += block->nr_sectors + 1; + if (!(block->flags & LOG_DISCARD_FLAG)) + lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); + lc->next_sector += dev_to_bio_sectors(lc, 1); /* * Apparently the size of the device may not be known @@ -399,7 +411,7 @@ next: if (!try_to_freeze()) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop() && - !atomic_read(&lc->pending_blocks)) + list_empty(&lc->logging_blocks)) schedule(); __set_current_state(TASK_RUNNING); } @@ -435,7 +447,6 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) INIT_LIST_HEAD(&lc->unflushed_blocks); INIT_LIST_HEAD(&lc->logging_blocks); init_waitqueue_head(&lc->wait); - lc->sectorsize = 1 << SECTOR_SHIFT; atomic_set(&lc->io_blocks, 0); atomic_set(&lc->pending_blocks, 0); @@ -455,6 +466,8 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } + lc->sectorsize = bdev_logical_block_size(lc->dev->bdev); + lc->sectorshift = ilog2(lc->sectorsize); lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); if (IS_ERR(lc->log_kthread)) { ret = PTR_ERR(lc->log_kthread); @@ -464,8 +477,12 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - /* We put the super at sector 0, start logging at sector 1 */ - lc->next_sector = 1; + /* + * next_sector is in 512b sectors to correspond to what bi_sector expects. + * The super starts at sector 0, and the next_sector is the next logical + * one based on the sectorsize of the device. + */ + lc->next_sector = lc->sectorsize >> SECTOR_SHIFT; lc->logging_enabled = true; lc->end_sector = logdev_last_sector(lc); lc->device_supports_discard = true; @@ -599,8 +616,8 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) if (discard_bio) block->flags |= LOG_DISCARD_FLAG; - block->sector = bio->bi_iter.bi_sector; - block->nr_sectors = bio_sectors(bio); + block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector); + block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); /* We don't need the data, just submit */ if (discard_bio) { @@ -767,9 +784,12 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit if (!q || !blk_queue_discard(q)) { lc->device_supports_discard = false; - limits->discard_granularity = 1 << SECTOR_SHIFT; + limits->discard_granularity = lc->sectorsize; limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); } + limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev); + limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev); + limits->io_min = limits->physical_block_size; } static struct target_type log_writes_target = { diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 96aedaac2c64..11f273d2f018 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -632,6 +632,10 @@ static void process_queued_bios(struct work_struct *work) case DM_MAPIO_REMAPPED: generic_make_request(bio); break; + case 0: + break; + default: + WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r); } } blk_finish_plug(&plug); @@ -698,7 +702,7 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, struct path_selector_type *pst; unsigned ps_argc; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "invalid number of path selector args"}, }; @@ -822,7 +826,7 @@ retain: static struct priority_group *parse_priority_group(struct dm_arg_set *as, struct multipath *m) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {1, 1024, "invalid number of paths"}, {0, 1024, "invalid number of selector args"} }; @@ -898,7 +902,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) int ret; struct dm_target *ti = m->ti; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "invalid number of hardware handler args"}, }; @@ -950,7 +954,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) struct dm_target *ti = m->ti; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 8, "invalid number of feature args"}, {1, 50, "pg_init_retries must be between 1 and 50"}, {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, @@ -1019,7 +1023,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) { /* target arguments */ - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "invalid number of priority groups"}, {0, 1024, "invalid initial priority group number"}, }; @@ -1379,6 +1383,7 @@ static void pg_init_done(void *data, int errors) case SCSI_DH_RETRY: /* Wait before retrying. */ delay_retry = 1; + /* fall through */ case SCSI_DH_IMM_RETRY: case SCSI_DH_RES_TEMP_UNAVAIL: if (pg_init_limit_reached(m, pgpath)) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5bfe285ea9d1..1ac58c5651b7 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3238,7 +3238,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio) if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) return DM_MAPIO_REQUEUE; - mddev->pers->make_request(mddev, bio); + md_handle_request(mddev, bio); return DM_MAPIO_SUBMITTED; } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index c6ebc5b1e00e..eadfcfd106ff 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -117,9 +117,9 @@ static void end_clone_bio(struct bio *clone) struct dm_rq_clone_bio_info *info = container_of(clone, struct dm_rq_clone_bio_info, clone); struct dm_rq_target_io *tio = info->tio; - struct bio *bio = info->orig; unsigned int nr_bytes = info->orig->bi_iter.bi_size; blk_status_t error = clone->bi_status; + bool is_last = !clone->bi_next; bio_put(clone); @@ -137,28 +137,23 @@ static void end_clone_bio(struct bio *clone) * when the request is completed. */ tio->error = error; - return; + goto exit; } /* * I/O for the bio successfully completed. * Notice the data completion to the upper layer. */ - - /* - * bios are processed from the head of the list. - * So the completing bio should always be rq->bio. - * If it's not, something wrong is happening. - */ - if (tio->orig->bio != bio) - DMERR("bio completion is going in the middle of the request"); + tio->completed += nr_bytes; /* * Update the original request. * Do not use blk_end_request() here, because it may complete * the original request before the clone, and break the ordering. */ - blk_update_request(tio->orig, BLK_STS_OK, nr_bytes); + if (is_last) + exit: + blk_update_request(tio->orig, BLK_STS_OK, tio->completed); } static struct dm_rq_target_io *tio_from_request(struct request *rq) @@ -237,14 +232,14 @@ static void dm_end_request(struct request *clone, blk_status_t error) /* * Requeue the original request of a clone. */ -static void dm_old_requeue_request(struct request *rq) +static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms) { struct request_queue *q = rq->q; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, rq); - blk_run_queue_async(q); + blk_delay_queue(q, delay_ms); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -270,6 +265,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ struct mapped_device *md = tio->md; struct request *rq = tio->orig; int rw = rq_data_dir(rq); + unsigned long delay_ms = delay_requeue ? 100 : 0; rq_end_stats(md, rq); if (tio->clone) { @@ -278,9 +274,9 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ } if (!rq->q->mq_ops) - dm_old_requeue_request(rq); + dm_old_requeue_request(rq, delay_ms); else - dm_mq_delay_requeue_request(rq, delay_requeue ? 100/*ms*/ : 0); + dm_mq_delay_requeue_request(rq, delay_ms); rq_completed(md, rw, false); } @@ -455,6 +451,7 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq, tio->clone = NULL; tio->orig = rq; tio->error = 0; + tio->completed = 0; /* * Avoid initializing info for blk-mq; it passes * target-specific data through info.ptr diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h index 9813922e4fe5..f43c45460aac 100644 --- a/drivers/md/dm-rq.h +++ b/drivers/md/dm-rq.h @@ -29,6 +29,7 @@ struct dm_rq_target_io { struct dm_stats_aux stats_aux; unsigned long duration_jiffies; unsigned n_sectors; + unsigned completed; }; /* diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index ab50d7c4377f..b5e892149c54 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -351,25 +351,6 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } -static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr, - size_t size) -{ - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - struct stripe_c *sc = ti->private; - struct dax_device *dax_dev; - struct block_device *bdev; - uint32_t stripe; - - stripe_map_sector(sc, sector, &stripe, &dev_sector); - dev_sector += sc->stripe[stripe].physical_start; - dax_dev = sc->stripe[stripe].dev->dax_dev; - bdev = sc->stripe[stripe].dev->bdev; - - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff)) - return; - dax_flush(dax_dev, pgoff, addr, size); -} - /* * Stripe status: * @@ -489,7 +470,6 @@ static struct target_type stripe_target = { .io_hints = stripe_io_hints, .direct_access = stripe_dax_direct_access, .dax_copy_from_iter = stripe_dax_copy_from_iter, - .dax_flush = stripe_dax_flush, }; int __init dm_stripe_init(void) diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 2dcea4c56f37..4c8de1ff78ca 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -251,7 +251,7 @@ static void switch_dtr(struct dm_target *ti) */ static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"}, {1, UINT_MAX, "Invalid region size"}, {0, 0, "Invalid number of optional args"}, diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 28a4071cdf85..ef7b8f201f73 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -806,7 +806,8 @@ int dm_table_add_target(struct dm_table *t, const char *type, /* * Target argument parsing helpers. */ -static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, +static int validate_next_arg(const struct dm_arg *arg, + struct dm_arg_set *arg_set, unsigned *value, char **error, unsigned grouped) { const char *arg_str = dm_shift_arg(arg_set); @@ -824,14 +825,14 @@ static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, return 0; } -int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, +int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 0); } EXPORT_SYMBOL(dm_read_arg); -int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, +int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 1); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 69d88aee3055..1e25705209c2 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3041,7 +3041,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, unsigned argc; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 4, "Invalid number of pool feature arguments"}, }; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 1c5b6185c79d..bda3caca23ca 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -839,7 +839,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) struct dm_target *ti = v->ti; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"}, }; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 04ae795e8a5f..6e54145969c5 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -987,24 +987,6 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, return ret; } -static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, - size_t size) -{ - struct mapped_device *md = dax_get_private(dax_dev); - sector_t sector = pgoff * PAGE_SECTORS; - struct dm_target *ti; - int srcu_idx; - - ti = dm_dax_get_live_target(md, sector, &srcu_idx); - - if (!ti) - goto out; - if (ti->type->dax_flush) - ti->type->dax_flush(ti, pgoff, addr, size); - out: - dm_put_live_table(md, srcu_idx); -} - /* * A target may call dm_accept_partial_bio only from the map routine. It is * allowed for all bio types except REQ_PREFLUSH. @@ -2992,7 +2974,6 @@ static const struct block_device_operations dm_blk_dops = { static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, .copy_from_iter = dm_dax_copy_from_iter, - .flush = dm_dax_flush, }; /* diff --git a/drivers/md/md.c b/drivers/md/md.c index 08fcaebc61bd..0ff1bbf6c90e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock); * call has finished, the bio has been linked into some internal structure * and so is visible to ->quiesce(), so we don't need the refcount any more. */ +void md_handle_request(struct mddev *mddev, struct bio *bio) +{ +check_suspended: + rcu_read_lock(); + if (mddev->suspended) { + DEFINE_WAIT(__wait); + for (;;) { + prepare_to_wait(&mddev->sb_wait, &__wait, + TASK_UNINTERRUPTIBLE); + if (!mddev->suspended) + break; + rcu_read_unlock(); + schedule(); + rcu_read_lock(); + } + finish_wait(&mddev->sb_wait, &__wait); + } + atomic_inc(&mddev->active_io); + rcu_read_unlock(); + + if (!mddev->pers->make_request(mddev, bio)) { + atomic_dec(&mddev->active_io); + wake_up(&mddev->sb_wait); + goto check_suspended; + } + + if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) + wake_up(&mddev->sb_wait); +} +EXPORT_SYMBOL(md_handle_request); + static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) { const int rw = bio_data_dir(bio); @@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) bio_endio(bio); return BLK_QC_T_NONE; } -check_suspended: - rcu_read_lock(); - if (mddev->suspended) { - DEFINE_WAIT(__wait); - for (;;) { - prepare_to_wait(&mddev->sb_wait, &__wait, - TASK_UNINTERRUPTIBLE); - if (!mddev->suspended) - break; - rcu_read_unlock(); - schedule(); - rcu_read_lock(); - } - finish_wait(&mddev->sb_wait, &__wait); - } - atomic_inc(&mddev->active_io); - rcu_read_unlock(); /* * save the sectors now since our bio can @@ -310,20 +324,14 @@ check_suspended: sectors = bio_sectors(bio); /* bio could be mergeable after passing to underlayer */ bio->bi_opf &= ~REQ_NOMERGE; - if (!mddev->pers->make_request(mddev, bio)) { - atomic_dec(&mddev->active_io); - wake_up(&mddev->sb_wait); - goto check_suspended; - } + + md_handle_request(mddev, bio); cpu = part_stat_lock(); part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); part_stat_unlock(); - if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) - wake_up(&mddev->sb_wait); - return BLK_QC_T_NONE; } @@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws) struct mddev *mddev = container_of(ws, struct mddev, flush_work); struct bio *bio = mddev->flush_bio; + /* + * must reset flush_bio before calling into md_handle_request to avoid a + * deadlock, because other bios passed md_handle_request suspend check + * could wait for this and below md_handle_request could wait for those + * bios because of suspend check + */ + mddev->flush_bio = NULL; + wake_up(&mddev->sb_wait); + if (bio->bi_iter.bi_size == 0) /* an empty barrier - all done */ bio_endio(bio); else { bio->bi_opf &= ~REQ_PREFLUSH; - mddev->pers->make_request(mddev, bio); + md_handle_request(mddev, bio); } - - mddev->flush_bio = NULL; - wake_up(&mddev->sb_wait); } void md_flush_request(struct mddev *mddev, struct bio *bio) diff --git a/drivers/md/md.h b/drivers/md/md.h index 561d22b9a9a8..d8287d3cd1bf 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -692,6 +692,7 @@ extern void md_stop_writes(struct mddev *mddev); extern int md_rdev_init(struct md_rdev *rdev); extern void md_rdev_clear(struct md_rdev *rdev); +extern void md_handle_request(struct mddev *mddev, struct bio *bio); extern void mddev_suspend(struct mddev *mddev); extern void mddev_resume(struct mddev *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4188a4881148..928e24a07133 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -811,6 +811,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh spin_unlock(&head->batch_head->batch_lock); goto unlock_out; } + /* + * We must assign batch_head of this stripe within the + * batch_lock, otherwise clear_batch_ready of batch head + * stripe could clear BATCH_READY bit of this stripe and + * this stripe->batch_head doesn't get assigned, which + * could confuse clear_batch_ready for this stripe + */ + sh->batch_head = head->batch_head; /* * at this point, head's BATCH_READY could be cleared, but we @@ -818,8 +826,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh */ list_add(&sh->batch_list, &head->batch_list); spin_unlock(&head->batch_head->batch_lock); - - sh->batch_head = head->batch_head; } else { head->batch_head = head; sh->batch_head = head->batch_head; @@ -4599,7 +4605,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh, set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | (1 << STRIPE_PREREAD_ACTIVE) | - (1 << STRIPE_DEGRADED)), + (1 << STRIPE_DEGRADED) | + (1 << STRIPE_ON_UNPLUG_LIST)), head_sh->state & (1 << STRIPE_INSYNC)); sh->check_state = head_sh->check_state; @@ -6568,14 +6575,17 @@ static ssize_t raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) { struct r5conf *conf; - unsigned long new; + unsigned int new; int err; struct r5worker_group *new_groups, *old_groups; int group_cnt, worker_cnt_per_group; if (len >= PAGE_SIZE) return -EINVAL; - if (kstrtoul(page, 10, &new)) + if (kstrtouint(page, 10, &new)) + return -EINVAL; + /* 8192 should be big enough */ + if (new > 8192) return -EINVAL; err = mddev_lock(mddev); diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index dd769e40416f..eed6c397d840 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -181,7 +181,10 @@ static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg) { static const struct cec_event ev_lost_msgs = { .event = CEC_EVENT_LOST_MSGS, - .lost_msgs.lost_msgs = 1, + .flags = 0, + { + .lost_msgs = { 1 }, + }, }; struct cec_msg_entry *entry; diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c index b94eb1c0023d..ada26d4acfb4 100644 --- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c +++ b/drivers/media/pci/cx25821/cx25821-audio-upstream.c @@ -277,7 +277,7 @@ static int cx25821_get_audio_data(struct cx25821_dev *dev, p = (char *)dev->_audiodata_buf_virt_addr + frame_offset; for (i = 0; i < dev->_audio_lines_count; i++) { - int n = kernel_read(file, file_offset, mybuf, AUDIO_LINE_SIZE); + int n = kernel_read(file, mybuf, AUDIO_LINE_SIZE, &file_offset); if (n < AUDIO_LINE_SIZE) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", __func__); @@ -290,7 +290,6 @@ static int cx25821_get_audio_data(struct cx25821_dev *dev, memcpy(p, mybuf, n); p += n; } - file_offset += n; } dev->_audioframe_count++; fput(file); @@ -318,7 +317,7 @@ static int cx25821_openfile_audio(struct cx25821_dev *dev, { char *p = (void *)dev->_audiodata_buf_virt_addr; struct file *file; - loff_t offset; + loff_t file_offset = 0; int i, j; file = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0); @@ -328,11 +327,11 @@ static int cx25821_openfile_audio(struct cx25821_dev *dev, return PTR_ERR(file); } - for (j = 0, offset = 0; j < NUM_AUDIO_FRAMES; j++) { + for (j = 0; j < NUM_AUDIO_FRAMES; j++) { for (i = 0; i < dev->_audio_lines_count; i++) { char buf[AUDIO_LINE_SIZE]; - int n = kernel_read(file, offset, buf, - AUDIO_LINE_SIZE); + loff_t offset = file_offset; + int n = kernel_read(file, buf, AUDIO_LINE_SIZE, &file_offset); if (n < AUDIO_LINE_SIZE) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", @@ -344,8 +343,6 @@ static int cx25821_openfile_audio(struct cx25821_dev *dev, if (p) memcpy(p + offset, buf, n); - - offset += n; } dev->_audioframe_count++; } diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index 895f655780a7..55d824b3a808 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c @@ -494,7 +494,7 @@ static struct platform_driver kempld_driver = { .remove = kempld_remove, }; -static struct dmi_system_id kempld_dmi_table[] __initdata = { +static const struct dmi_system_id kempld_dmi_table[] __initconst = { { .ident = "BBD6", .matches = { diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index d18b3d9292fd..3ba04f371380 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1279,7 +1279,7 @@ ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, } /* use bounce buffer for copy */ - tbuf = (void *)__get_free_page(GFP_TEMPORARY); + tbuf = (void *)__get_free_page(GFP_KERNEL); if (!tbuf) return -ENOMEM; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index affa7370ba82..74c663b1c0a7 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -242,6 +242,12 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; + /* + * mmc_init_request() depends on card->bouncesz so it must be calculated + * before blk_init_allocated_queue() starts allocating requests. + */ + card->bouncesz = mmc_queue_calc_bouncesz(host); + mq->card = card; mq->queue = blk_alloc_queue(GFP_KERNEL); if (!mq->queue) @@ -265,7 +271,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); - card->bouncesz = mmc_queue_calc_bouncesz(host); if (card->bouncesz) { blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); blk_queue_max_segments(mq->queue, card->bouncesz / 512); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 02179ed2a40d..8c15637178ff 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -5,7 +5,7 @@ comment "MMC/SD/SDIO Host Controller Drivers" config MMC_DEBUG - bool "MMC host drivers debugginG" + bool "MMC host drivers debugging" depends on MMC != n help This is an option for use by developers; most people should diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index b9cc95998799..eee08d81b242 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c @@ -7,6 +7,7 @@ * * Copyright (C) 2016 Cavium Inc. */ +#include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/mmc/mmc.h> @@ -149,8 +150,11 @@ error: for (i = 0; i < CAVIUM_MAX_MMC; i++) { if (host->slot[i]) cvm_mmc_of_slot_remove(host->slot[i]); - if (host->slot_pdev[i]) + if (host->slot_pdev[i]) { + get_device(&host->slot_pdev[i]->dev); of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); + put_device(&host->slot_pdev[i]->dev); + } } clk_disable_unprepare(host->clk); return ret; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index bbaddf18a1b3..d0ccc6729fd2 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -392,6 +392,7 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { enum { INTEL_DSM_FNS = 0, + INTEL_DSM_V18_SWITCH = 3, INTEL_DSM_DRV_STRENGTH = 9, INTEL_DSM_D3_RETUNE = 10, }; @@ -557,6 +558,19 @@ static void intel_hs400_enhanced_strobe(struct mmc_host *mmc, sdhci_writel(host, val, INTEL_HS400_ES_REG); } +static void sdhci_intel_voltage_switch(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct device *dev = &slot->chip->pdev->dev; + u32 result = 0; + int err; + + err = intel_dsm(intel_host, dev, INTEL_DSM_V18_SWITCH, &result); + pr_debug("%s: %s DSM error %d result %u\n", + mmc_hostname(host->mmc), __func__, err, result); +} + static const struct sdhci_ops sdhci_intel_byt_ops = { .set_clock = sdhci_set_clock, .set_power = sdhci_intel_set_power, @@ -565,6 +579,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = { .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, + .voltage_switch = sdhci_intel_voltage_switch, }; static void byt_read_dsm(struct sdhci_pci_slot *slot) diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 12cf8288d663..a7293e186e03 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -129,50 +129,6 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host) #define CMDREQ_TIMEOUT 5000 -#ifdef CONFIG_MMC_DEBUG - -#define STATUS_TO_TEXT(a, status, i) \ - do { \ - if ((status) & TMIO_STAT_##a) { \ - if ((i)++) \ - printk(KERN_DEBUG " | "); \ - printk(KERN_DEBUG #a); \ - } \ - } while (0) - -static void pr_debug_status(u32 status) -{ - int i = 0; - - pr_debug("status: %08x = ", status); - STATUS_TO_TEXT(CARD_REMOVE, status, i); - STATUS_TO_TEXT(CARD_INSERT, status, i); - STATUS_TO_TEXT(SIGSTATE, status, i); - STATUS_TO_TEXT(WRPROTECT, status, i); - STATUS_TO_TEXT(CARD_REMOVE_A, status, i); - STATUS_TO_TEXT(CARD_INSERT_A, status, i); - STATUS_TO_TEXT(SIGSTATE_A, status, i); - STATUS_TO_TEXT(CMD_IDX_ERR, status, i); - STATUS_TO_TEXT(STOPBIT_ERR, status, i); - STATUS_TO_TEXT(ILL_FUNC, status, i); - STATUS_TO_TEXT(CMD_BUSY, status, i); - STATUS_TO_TEXT(CMDRESPEND, status, i); - STATUS_TO_TEXT(DATAEND, status, i); - STATUS_TO_TEXT(CRCFAIL, status, i); - STATUS_TO_TEXT(DATATIMEOUT, status, i); - STATUS_TO_TEXT(CMDTIMEOUT, status, i); - STATUS_TO_TEXT(RXOVERFLOW, status, i); - STATUS_TO_TEXT(TXUNDERRUN, status, i); - STATUS_TO_TEXT(RXRDY, status, i); - STATUS_TO_TEXT(TXRQ, status, i); - STATUS_TO_TEXT(ILL_ACCESS, status, i); - printk("\n"); -} - -#else -#define pr_debug_status(s) do { } while (0) -#endif - static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct tmio_mmc_host *host = mmc_priv(mmc); @@ -762,9 +718,6 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; - pr_debug_status(status); - pr_debug_status(ireg); - /* Clear the status except the interrupt status */ sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ); diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c index 3e33ab66eb24..77b1d8013295 100644 --- a/drivers/mtd/maps/lantiq-flash.c +++ b/drivers/mtd/maps/lantiq-flash.c @@ -114,12 +114,6 @@ ltq_mtd_probe(struct platform_device *pdev) struct cfi_private *cfi; int err; - if (of_machine_is_compatible("lantiq,falcon") && - (ltq_boot_select() != BS_FLASH)) { - dev_err(&pdev->dev, "invalid bootstrap options\n"); - return -ENODEV; - } - ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL); if (!ltq_mtd) return -ENOMEM; diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 5736b0c90b33..a308e707392d 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -581,6 +581,14 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, slave->mtd.erasesize = parent->erasesize; } + /* + * Slave erasesize might differ from the master one if the master + * exposes several regions with different erasesize. Adjust + * wr_alignment accordingly. + */ + if (!(slave->mtd.flags & MTD_NO_ERASE)) + wr_alignment = slave->mtd.erasesize; + tmp = slave->offset; remainder = do_div(tmp, wr_alignment); if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 146af8218314..8268636675ef 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c @@ -363,7 +363,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, size += (req->ecc.strength + 1) * sizeof(u16); /* Reserve space for mu, dmu and delta. */ size = ALIGN(size, sizeof(s32)); - size += (req->ecc.strength + 1) * sizeof(s32); + size += (req->ecc.strength + 1) * sizeof(s32) * 3; user = kzalloc(size, GFP_KERNEL); if (!user) diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index c3bb358ef01e..5796468db653 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -707,7 +707,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) } res = clk_prepare_enable(host->clk); if (res) - goto err_exit1; + goto err_put_clk; nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl; nand_chip->dev_ready = lpc32xx_nand_device_ready; @@ -814,6 +814,7 @@ err_exit3: dma_release_channel(host->dma_chan); err_exit2: clk_disable_unprepare(host->clk); +err_put_clk: clk_put(host->clk); err_exit1: lpc32xx_wp_enable(host); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index bcc8cef1c615..12edaae17d81 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2668,7 +2668,7 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len, static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { - int chipnr, realpage, page, blockmask, column; + int chipnr, realpage, page, column; struct nand_chip *chip = mtd_to_nand(mtd); uint32_t writelen = ops->len; @@ -2704,7 +2704,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, realpage = (int)(to >> chip->page_shift); page = realpage & chip->pagemask; - blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; /* Invalidate the page cache, when we write to the cached page */ if (to <= ((loff_t)chip->pagebuf << chip->page_shift) && diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index fec613221958..246b4393118e 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -1356,7 +1356,7 @@ static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_ if (err) return err; noreclaim_flag = memalloc_noreclaim_save(); - tx = kernel_read(file, pos, buf, count); + tx = kernel_read(file, buf, count, &pos); memalloc_noreclaim_restore(noreclaim_flag); put_pages(ns); return tx; @@ -1372,7 +1372,7 @@ static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size if (err) return err; noreclaim_flag = memalloc_noreclaim_save(); - tx = kernel_write(file, buf, count, pos); + tx = kernel_write(file, buf, count, &pos); memalloc_noreclaim_restore(noreclaim_flag); put_pages(ns); return tx; diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index cf1d4a15e10a..19c000722cbc 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1784,7 +1784,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, * @nor: pointer to a 'struct spi_nor' * @addr: offset in the SFDP area to start reading data from * @len: number of bytes to read - * @buf: buffer where the SFDP data are copied into + * @buf: buffer where the SFDP data are copied into (dma-safe memory) * * Whatever the actual numbers of bytes for address and dummy cycles are * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always @@ -1829,6 +1829,36 @@ read_err: return ret; } +/** + * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters. + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the SFDP area to start reading data from + * @len: number of bytes to read + * @buf: buffer where the SFDP data are copied into + * + * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not + * guaranteed to be dma-safe. + * + * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp() + * otherwise. + */ +static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr, + size_t len, void *buf) +{ + void *dma_safe_buf; + int ret; + + dma_safe_buf = kmalloc(len, GFP_KERNEL); + if (!dma_safe_buf) + return -ENOMEM; + + ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf); + memcpy(buf, dma_safe_buf, len); + kfree(dma_safe_buf); + + return ret; +} + struct sfdp_parameter_header { u8 id_lsb; u8 minor; @@ -2101,7 +2131,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, bfpt_header->length * sizeof(u32)); addr = SFDP_PARAM_HEADER_PTP(bfpt_header); memset(&bfpt, 0, sizeof(bfpt)); - err = spi_nor_read_sfdp(nor, addr, len, &bfpt); + err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt); if (err < 0) return err; @@ -2127,6 +2157,15 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, params->size = bfpt.dwords[BFPT_DWORD(2)]; if (params->size & BIT(31)) { params->size &= ~BIT(31); + + /* + * Prevent overflows on params->size. Anyway, a NOR of 2^64 + * bits is unlikely to exist so this error probably means + * the BFPT we are reading is corrupted/wrong. + */ + if (params->size > 63) + return -EINVAL; + params->size = 1ULL << params->size; } else { params->size++; @@ -2243,7 +2282,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor, int i, err; /* Get the SFDP header. */ - err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header); + err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header); if (err < 0) return err; diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index c3963f880448..b210fdb31c98 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -383,7 +383,7 @@ int ubiblock_create(struct ubi_volume_info *vi) /* Initialize the gendisk of this ubiblock device */ gd = alloc_disk(1); if (!gd) { - pr_err("UBI: block: alloc_disk failed"); + pr_err("UBI: block: alloc_disk failed\n"); ret = -ENODEV; goto out_free_dev; } @@ -607,7 +607,7 @@ static void __init ubiblock_create_from_param(void) desc = open_volume_desc(p->name, p->ubi_num, p->vol_id); if (IS_ERR(desc)) { pr_err( - "UBI: block: can't open volume on ubi%d_%d, err=%ld", + "UBI: block: can't open volume on ubi%d_%d, err=%ld\n", p->ubi_num, p->vol_id, PTR_ERR(desc)); continue; } @@ -618,7 +618,7 @@ static void __init ubiblock_create_from_param(void) ret = ubiblock_create(&vi); if (ret) { pr_err( - "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d", + "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n", vi.name, p->ubi_num, p->vol_id, ret); continue; } diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index d854521962ef..842550b5712a 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -825,7 +825,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, for (i = 0; i < UBI_MAX_DEVICES; i++) { ubi = ubi_devices[i]; if (ubi && mtd->index == ubi->mtd->index) { - pr_err("ubi: mtd%d is already attached to ubi%d", + pr_err("ubi: mtd%d is already attached to ubi%d\n", mtd->index, i); return -EEXIST; } @@ -840,7 +840,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, * no sense to attach emulated MTD devices, so we prohibit this. */ if (mtd->type == MTD_UBIVOLUME) { - pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI", + pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI\n", mtd->index); return -EINVAL; } @@ -851,7 +851,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, if (!ubi_devices[ubi_num]) break; if (ubi_num == UBI_MAX_DEVICES) { - pr_err("ubi: only %d UBI devices may be created", + pr_err("ubi: only %d UBI devices may be created\n", UBI_MAX_DEVICES); return -ENFILE; } @@ -861,7 +861,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, /* Make sure ubi_num is not busy */ if (ubi_devices[ubi_num]) { - pr_err("ubi: ubi%i already exists", ubi_num); + pr_err("ubi: ubi%i already exists\n", ubi_num); return -EEXIST; } } @@ -1166,7 +1166,7 @@ static int __init ubi_init(void) BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); if (mtd_devs > UBI_MAX_DEVICES) { - pr_err("UBI error: too many MTD devices, maximum is %d", + pr_err("UBI error: too many MTD devices, maximum is %d\n", UBI_MAX_DEVICES); return -EINVAL; } @@ -1178,7 +1178,7 @@ static int __init ubi_init(void) err = misc_register(&ubi_ctrl_cdev); if (err) { - pr_err("UBI error: cannot register device"); + pr_err("UBI error: cannot register device\n"); goto out; } @@ -1205,7 +1205,7 @@ static int __init ubi_init(void) mtd = open_mtd_device(p->name); if (IS_ERR(mtd)) { err = PTR_ERR(mtd); - pr_err("UBI error: cannot open mtd %s, error %d", + pr_err("UBI error: cannot open mtd %s, error %d\n", p->name, err); /* See comment below re-ubi_is_module(). */ if (ubi_is_module()) @@ -1218,7 +1218,7 @@ static int __init ubi_init(void) p->vid_hdr_offs, p->max_beb_per1024); mutex_unlock(&ubi_devices_mutex); if (err < 0) { - pr_err("UBI error: cannot attach mtd%d", + pr_err("UBI error: cannot attach mtd%d\n", mtd->index); put_mtd_device(mtd); @@ -1242,7 +1242,7 @@ static int __init ubi_init(void) err = ubiblock_init(); if (err) { - pr_err("UBI error: block: cannot initialize, error %d", err); + pr_err("UBI error: block: cannot initialize, error %d\n", err); /* See comment above re-ubi_is_module(). */ if (ubi_is_module()) @@ -1265,7 +1265,7 @@ out_dev_unreg: misc_deregister(&ubi_ctrl_cdev); out: class_unregister(&ubi_class); - pr_err("UBI error: cannot initialize UBI, error %d", err); + pr_err("UBI error: cannot initialize UBI, error %d\n", err); return err; } late_initcall(ubi_init); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index b44c8d348e78..5a832bc79b1b 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1667,7 +1667,7 @@ err: ret = invalidate_fastmap(ubi); if (ret < 0) { - ubi_err(ubi, "Unable to invalidiate current fastmap!"); + ubi_err(ubi, "Unable to invalidate current fastmap!"); ubi_ro_mode(ubi); } else { return_fm_pebs(ubi, old_fm); diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h index 22ed3f627506..bfceae5a890e 100644 --- a/drivers/mtd/ubi/ubi-media.h +++ b/drivers/mtd/ubi/ubi-media.h @@ -229,7 +229,7 @@ struct ubi_ec_hdr { * copy. UBI also calculates data CRC when the data is moved and stores it at * the @data_crc field of the copy (P1). So when UBI needs to pick one physical * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is - * examined. If it is cleared, the situation* is simple and the newer one is + * examined. If it is cleared, the situation is simple and the newer one is * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC * checksum is correct, this physical eraseblock is selected (P1). Otherwise * the older one (P) is selected. @@ -389,7 +389,7 @@ struct ubi_vtbl_record { #define UBI_FM_POOL_MAGIC 0x67AF4D08 #define UBI_FM_EBA_MAGIC 0xf0c040a8 -/* A fastmap supber block can be located between PEB 0 and +/* A fastmap super block can be located between PEB 0 and * UBI_FM_MAX_START */ #define UBI_FM_MAX_START 64 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index fc63992ab0e0..c99dc59d729b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4289,7 +4289,7 @@ static int bond_check_params(struct bond_params *params) int bond_mode = BOND_MODE_ROUNDROBIN; int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; int lacp_fast = 0; - int tlb_dynamic_lb = 0; + int tlb_dynamic_lb; /* Convert string parameters. */ if (mode) { @@ -4601,16 +4601,13 @@ static int bond_check_params(struct bond_params *params) } ad_user_port_key = valptr->value; - if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) { - bond_opt_initstr(&newval, "default"); - valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), - &newval); - if (!valptr) { - pr_err("Error: No tlb_dynamic_lb default value"); - return -EINVAL; - } - tlb_dynamic_lb = valptr->value; + bond_opt_initstr(&newval, "default"); + valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval); + if (!valptr) { + pr_err("Error: No tlb_dynamic_lb default value"); + return -EINVAL; } + tlb_dynamic_lb = valptr->value; if (lp_interval == 0) { pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n", diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index a12d603d41c6..5931aa2fe997 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -754,6 +754,9 @@ static int bond_option_mode_set(struct bonding *bond, bond->params.miimon); } + if (newval->value == BOND_MODE_ALB) + bond->params.tlb_dynamic_lb = 1; + /* don't cache arp_validate between modes */ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; bond->params.mode = newval->value; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index a6572b51435a..83eec9a8c275 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -432,6 +432,27 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); } +static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, + u64 *tx_bytes, u64 *tx_packets) +{ + struct bcm_sysport_tx_ring *ring; + u64 bytes = 0, packets = 0; + unsigned int start; + unsigned int q; + + for (q = 0; q < priv->netdev->num_tx_queues; q++) { + ring = &priv->tx_rings[q]; + do { + start = u64_stats_fetch_begin_irq(&priv->syncp); + bytes = ring->bytes; + packets = ring->packets; + } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); + + *tx_bytes += bytes; + *tx_packets += packets; + } +} + static void bcm_sysport_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -439,11 +460,16 @@ static void bcm_sysport_get_stats(struct net_device *dev, struct bcm_sysport_stats64 *stats64 = &priv->stats64; struct u64_stats_sync *syncp = &priv->syncp; struct bcm_sysport_tx_ring *ring; + u64 tx_bytes = 0, tx_packets = 0; unsigned int start; int i, j; - if (netif_running(dev)) + if (netif_running(dev)) { bcm_sysport_update_mib_counters(priv); + bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets); + stats64->tx_bytes = tx_bytes; + stats64->tx_packets = tx_packets; + } for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { const struct bcm_sysport_stats *s; @@ -461,12 +487,13 @@ static void bcm_sysport_get_stats(struct net_device *dev, continue; p += s->stat_offset; - if (s->stat_sizeof == sizeof(u64)) + if (s->stat_sizeof == sizeof(u64) && + s->type == BCM_SYSPORT_STAT_NETDEV64) { do { start = u64_stats_fetch_begin_irq(syncp); data[i] = *(u64 *)p; } while (u64_stats_fetch_retry_irq(syncp, start)); - else + } else data[i] = *(u32 *)p; j++; } @@ -1716,30 +1743,12 @@ static void bcm_sysport_get_stats64(struct net_device *dev, { struct bcm_sysport_priv *priv = netdev_priv(dev); struct bcm_sysport_stats64 *stats64 = &priv->stats64; - struct bcm_sysport_tx_ring *ring; - u64 tx_packets = 0, tx_bytes = 0; unsigned int start; - unsigned int q; netdev_stats_to_stats64(stats, &dev->stats); - for (q = 0; q < dev->num_tx_queues; q++) { - ring = &priv->tx_rings[q]; - do { - start = u64_stats_fetch_begin_irq(&priv->syncp); - tx_bytes = ring->bytes; - tx_packets = ring->packets; - } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); - - stats->tx_bytes += tx_bytes; - stats->tx_packets += tx_packets; - } - - /* lockless update tx_bytes and tx_packets */ - u64_stats_update_begin(&priv->syncp); - stats64->tx_bytes = stats->tx_bytes; - stats64->tx_packets = stats->tx_packets; - u64_stats_update_end(&priv->syncp); + bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, + &stats->tx_packets); do { start = u64_stats_fetch_begin_irq(&priv->syncp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index ccd699fb2d70..7dd3d131043a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -750,6 +750,10 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, { int rc = 0; + if (!is_classid_clsact_ingress(cls_flower->common.classid) || + cls_flower->common.chain_index) + return -EOPNOTSUPP; + switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index af33dc15c55f..656e6af70f0a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -11536,11 +11536,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tg3_napi_enable(tp); for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; err = tg3_request_irq(tp, i); if (err) { for (i--; i >= 0; i--) { - tnapi = &tp->napi[i]; + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); } goto out_napi_fini; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 674cf9d13b98..8984c4938881 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -930,6 +930,14 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb) return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; } +static inline bool is_ipv6_ext_hdr(struct sk_buff *skb) +{ + if (ip_hdr(skb)->version == 6) + return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr); + else + return false; +} + #define be_error_recovering(adapter) \ (adapter->flags & BE_FLAGS_TRY_RECOVERY) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 319eee36649b..0e3d9f39a807 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5089,6 +5089,20 @@ static netdev_features_t be_features_check(struct sk_buff *skb, struct be_adapter *adapter = netdev_priv(dev); u8 l4_hdr = 0; + if (skb_is_gso(skb)) { + /* IPv6 TSO requests with extension hdrs are a problem + * to Lancer and BE3 HW. Disable TSO6 feature. + */ + if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb)) + features &= ~NETIF_F_TSO6; + + /* Lancer cannot handle the packet with MSS less than 256. + * Disable the GSO support in such cases + */ + if (lancer_chip(adapter) && skb_shinfo(skb)->gso_size < 256) + features &= ~NETIF_F_GSO_MASK; + } + /* The code below restricts offload features for some tunneled and * Q-in-Q packets. * Offload features for normal (non tunnel) packets are unchanged. diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 38c7b21e5d63..ede1876a9a19 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -374,8 +374,8 @@ struct bufdesc_ex { #define FEC_ENET_TS_AVAIL ((uint)0x00010000) #define FEC_ENET_TS_TIMER ((uint)0x00008000) -#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) -#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER) +#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) +#define FEC_NAPI_IMASK FEC_ENET_MII #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) /* ENET interrupt coalescing macro define */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 56f56d6ada9c..3dc2d771a222 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1559,14 +1559,14 @@ fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) if (int_events == 0) return false; - if (int_events & FEC_ENET_RXF) + if (int_events & FEC_ENET_RXF_0) fep->work_rx |= (1 << 2); if (int_events & FEC_ENET_RXF_1) fep->work_rx |= (1 << 0); if (int_events & FEC_ENET_RXF_2) fep->work_rx |= (1 << 1); - if (int_events & FEC_ENET_TXF) + if (int_events & FEC_ENET_TXF_0) fep->work_tx |= (1 << 2); if (int_events & FEC_ENET_TXF_1) fep->work_tx |= (1 << 0); @@ -1604,8 +1604,8 @@ fec_enet_interrupt(int irq, void *dev_id) } if (fep->ptp_clock) - fec_ptp_check_pps_event(fep); - + if (fec_ptp_check_pps_event(fep)) + ret = IRQ_HANDLED; return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 59efbd605416..5bcb2238acb2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -37,20 +37,15 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, } static int hnae3_match_n_instantiate(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev, - bool is_reg, bool *matched) + struct hnae3_ae_dev *ae_dev, bool is_reg) { int ret; - *matched = false; - /* check if this client matches the type of ae_dev */ if (!(hnae3_client_match(client->type, ae_dev->dev_type) && hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { return 0; } - /* there is a match of client and dev */ - *matched = true; /* now, (un-)instantiate client by calling lower layer */ if (is_reg) { @@ -69,7 +64,6 @@ int hnae3_register_client(struct hnae3_client *client) { struct hnae3_client *client_tmp; struct hnae3_ae_dev *ae_dev; - bool matched; int ret = 0; mutex_lock(&hnae3_common_lock); @@ -86,7 +80,7 @@ int hnae3_register_client(struct hnae3_client *client) /* if the client could not be initialized on current port, for * any error reasons, move on to next available port */ - ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched); + ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed for port\n"); @@ -102,12 +96,11 @@ EXPORT_SYMBOL(hnae3_register_client); void hnae3_unregister_client(struct hnae3_client *client) { struct hnae3_ae_dev *ae_dev; - bool matched; mutex_lock(&hnae3_common_lock); /* un-initialize the client on every matched port */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { - hnae3_match_n_instantiate(client, ae_dev, false, &matched); + hnae3_match_n_instantiate(client, ae_dev, false); } list_del(&client->node); @@ -124,7 +117,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) const struct pci_device_id *id; struct hnae3_ae_dev *ae_dev; struct hnae3_client *client; - bool matched; int ret = 0; mutex_lock(&hnae3_common_lock); @@ -151,13 +143,10 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true, - &matched); + ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed\n"); - if (matched) - break; } } @@ -175,7 +164,6 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) const struct pci_device_id *id; struct hnae3_ae_dev *ae_dev; struct hnae3_client *client; - bool matched; mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ @@ -187,12 +175,8 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) /* check the client list for the match with this ae_dev type and * un-initialize the figure out client instance */ - list_for_each_entry(client, &hnae3_client_list, node) { - hnae3_match_n_instantiate(client, ae_dev, false, - &matched); - if (matched) - break; - } + list_for_each_entry(client, &hnae3_client_list, node) + hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); @@ -212,7 +196,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; - bool matched; int ret = 0; mutex_lock(&hnae3_common_lock); @@ -246,13 +229,10 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true, - &matched); + ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed\n"); - if (matched) - break; } out_err: @@ -270,7 +250,6 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; - bool matched; mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ @@ -279,12 +258,8 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) if (!id) continue; - list_for_each_entry(client, &hnae3_client_list, node) { - hnae3_match_n_instantiate(client, ae_dev, false, - &matched); - if (matched) - break; - } + list_for_each_entry(client, &hnae3_client_list, node) + hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index b2f28ae81273..1a01cadfe5f3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -49,7 +49,17 @@ #define HNAE3_CLASS_NAME_SIZE 16 #define HNAE3_DEV_INITED_B 0x0 -#define HNAE_DEV_SUPPORT_ROCE_B 0x1 +#define HNAE3_DEV_SUPPORT_ROCE_B 0x1 +#define HNAE3_DEV_SUPPORT_DCB_B 0x2 + +#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ + BIT(HNAE3_DEV_SUPPORT_ROCE_B)) + +#define hnae3_dev_roce_supported(hdev) \ + hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) + +#define hnae3_dev_dcb_supported(hdev) \ + hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) @@ -366,12 +376,12 @@ struct hnae3_ae_algo { struct hnae3_tc_info { u16 tqp_offset; /* TQP offset from base TQP */ u16 tqp_count; /* Total TQPs */ - u8 up; /* user priority */ u8 tc; /* TC index */ bool enable; /* If this TC is enable or not */ }; #define HNAE3_MAX_TC 8 +#define HNAE3_MAX_USER_PRIO 8 struct hnae3_knic_private_info { struct net_device *netdev; /* Set by KNIC client when init instance */ u16 rss_size; /* Allocated RSS queues */ @@ -379,6 +389,7 @@ struct hnae3_knic_private_info { u16 num_desc; u8 num_tc; /* Total number of enabled TCs */ + u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ u16 num_tqps; /* total number of TQPs in this handle */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 91ae0135ee50..758cf3948131 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -238,7 +238,7 @@ struct hclge_tqp_map { u8 rsv[18]; }; -#define HCLGE_VECTOR_ELEMENTS_PER_CMD 11 +#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10 enum hclge_int_type { HCLGE_INT_TX, @@ -252,8 +252,12 @@ struct hclge_ctrl_vector_chain { #define HCLGE_INT_TYPE_S 0 #define HCLGE_INT_TYPE_M 0x3 #define HCLGE_TQP_ID_S 2 -#define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S) +#define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S) +#define HCLGE_INT_GL_IDX_S 13 +#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S) __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; + u8 vfid; + u8 rsv; }; #define HCLGE_TC_NUM 8 @@ -266,7 +270,8 @@ struct hclge_tx_buff_alloc { struct hclge_rx_priv_buff { __le16 buf_num[HCLGE_TC_NUM]; - u8 rsv[8]; + __le16 shared_buf; + u8 rsv[6]; }; struct hclge_query_version { @@ -684,6 +689,7 @@ struct hclge_reset_tqp_queue { #define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ #define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ #define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ +#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */ #define HCLGE_TYPE_CRQ 0 #define HCLGE_TYPE_CSQ 1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index bb45365fb817..e0685e630afe 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, - /* Required last entry */ - {0, } -}; - -static const struct pci_device_id roce_pci_tbl[] = { - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, - /* Required last entry */ + /* required last entry */ {0, } }; @@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->num_tqps = __le16_to_cpu(req->tqp_num); hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; - if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) { + if (hnae3_dev_roce_supported(hdev)) { hdev->num_roce_msix = hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); @@ -1063,9 +1053,9 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->base_tqp_pid = 0; hdev->rss_size_max = 1; hdev->rx_buf_len = cfg.rx_buf_len; - for (i = 0; i < ETH_ALEN; i++) - hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i]; + ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); hdev->hw.mac.media_type = cfg.media_type; + hdev->hw.mac.phy_addr = cfg.phy_addr; hdev->num_desc = cfg.tqp_desc_num; hdev->tm_info.num_pg = 1; hdev->tm_info.num_tc = cfg.tc_num; @@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) tc_num = hclge_get_tc_num(hdev); pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); - shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; + if (hnae3_dev_dcb_supported(hdev)) + shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; + else + shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; + shared_buf_tc = pfc_enable_num * hdev->mps + (tc_num - pfc_enable_num) * hdev->mps / 2 + hdev->mps; @@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) struct hclge_priv_buf *priv; int i; + /* When DCB is not supported, rx private + * buffer is not allocated. + */ + if (!hnae3_dev_dcb_supported(hdev)) { + if (!hclge_is_rx_buf_ok(hdev, rx_all)) + return -ENOMEM; + + return 0; + } + /* step 1, try to alloc private buffer for all enabled tc */ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { priv = &hdev->priv_buf[i]; @@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) priv->wl.high = 2 * hdev->mps; priv->buf_size = priv->wl.high; } + } else { + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; } } @@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { priv = &hdev->priv_buf[i]; - if (hdev->hw_tc_map & BIT(i)) - priv->enable = 1; + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { priv->wl.low = 128; @@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B); } + req->shared_buf = + cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | + (1 << HCLGE_TC0_PRI_BUF_EN_B)); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev) return ret; } - ret = hclge_rx_priv_wl_config(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "could not configure rx private waterline %d\n", ret); - return ret; - } + if (hnae3_dev_dcb_supported(hdev)) { + ret = hclge_rx_priv_wl_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure rx private waterline %d\n", + ret); + return ret; + } - ret = hclge_common_thrd_config(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "could not configure common threshold %d\n", ret); - return ret; + ret = hclge_common_thrd_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure common threshold %d\n", + ret); + return ret; + } } ret = hclge_common_wl_config(hdev); @@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) u16 tc_valid[HCLGE_MAX_TC_NUM]; u16 tc_size[HCLGE_MAX_TC_NUM]; u32 *rss_indir = NULL; + u16 rss_size = 0, roundup_size; const u8 *key; int i, ret, j; @@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { vport[j].rss_indirection_tbl[i] = - i % hdev->rss_size_max; + i % vport[j].alloc_rss_size; + + /* vport 0 is for PF */ + if (j != 0) + continue; + + rss_size = vport[j].alloc_rss_size; rss_indir[i] = vport[j].rss_indirection_tbl[i]; } } @@ -2613,42 +2644,31 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) if (ret) goto err; + /* Each TC have the same queue size, and tc_size set to hardware is + * the log2 of roundup power of two of rss_size, the acutal queue + * size is limited by indirection table. + */ + if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { + dev_err(&hdev->pdev->dev, + "Configure rss tc size failed, invalid TC_SIZE = %d\n", + rss_size); + return -EINVAL; + } + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - if (hdev->hw_tc_map & BIT(i)) - tc_valid[i] = 1; - else - tc_valid[i] = 0; + tc_valid[i] = 0; - switch (hdev->rss_size_max) { - case HCLGE_RSS_TC_SIZE_0: - tc_size[i] = 0; - break; - case HCLGE_RSS_TC_SIZE_1: - tc_size[i] = 1; - break; - case HCLGE_RSS_TC_SIZE_2: - tc_size[i] = 2; - break; - case HCLGE_RSS_TC_SIZE_3: - tc_size[i] = 3; - break; - case HCLGE_RSS_TC_SIZE_4: - tc_size[i] = 4; - break; - case HCLGE_RSS_TC_SIZE_5: - tc_size[i] = 5; - break; - case HCLGE_RSS_TC_SIZE_6: - tc_size[i] = 6; - break; - case HCLGE_RSS_TC_SIZE_7: - tc_size[i] = 7; - break; - default: - break; - } - tc_offset[i] = hdev->rss_size_max * i; + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = rss_size * i; } + ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); err: @@ -2679,7 +2699,11 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, node->tqp_index); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + req->vfid = vport->vport_id; if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; @@ -2763,8 +2787,12 @@ static int hclge_unmap_ring_from_vector( hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, node->tqp_index); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + req->vfid = vport->vport_id; if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; @@ -2778,7 +2806,7 @@ static int hclge_unmap_ring_from_vector( } i = 0; hclge_cmd_setup_basic_desc(&desc, - HCLGE_OPC_ADD_RING_TO_VECTOR, + HCLGE_OPC_DEL_RING_TO_VECTOR, false); req->int_vector_id = vector_id; } @@ -3665,6 +3693,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) { #define HCLGE_VLAN_TYPE_VF_TABLE 0 #define HCLGE_VLAN_TYPE_PORT_TABLE 1 + struct hnae3_handle *handle; int ret; ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE, @@ -3674,8 +3703,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE, true); + if (ret) + return ret; - return ret; + handle = &hdev->vport[0].nic; + return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) @@ -3920,8 +3952,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, goto err; if (hdev->roce_client && - hnae_get_bit(hdev->ae_dev->flag, - HNAE_DEV_SUPPORT_ROCE_B)) { + hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; ret = hclge_init_roce_base_info(vport); @@ -3944,8 +3975,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, break; case HNAE3_CLIENT_ROCE: - if (hnae_get_bit(hdev->ae_dev->flag, - HNAE_DEV_SUPPORT_ROCE_B)) { + if (hnae3_dev_roce_supported(hdev)) { hdev->roce_client = client; vport->roce.client = client; } @@ -4057,7 +4087,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; - const struct pci_device_id *id; struct hclge_dev *hdev; int ret; @@ -4072,10 +4101,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->ae_dev = ae_dev; ae_dev->priv = hdev; - id = pci_match_id(roce_pci_tbl, ae_dev->pdev); - if (id) - hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1); - ret = hclge_pci_init(hdev); if (ret) { dev_err(&pdev->dev, "PCI init failed\n"); @@ -4138,12 +4163,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_rss_init_hw(hdev); - if (ret) { - dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); - return ret; - } - ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); @@ -4156,6 +4175,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_rss_init_hw(hdev); + if (ret) { + dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); + return ret; + } + setup_timer(&hdev->service_timer, hclge_service_timer, (unsigned long)hdev); INIT_WORK(&hdev->service_task, hclge_service_task); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index edb10ad075eb..9fcfd9395424 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -176,7 +176,6 @@ struct hclge_pg_info { struct hclge_tc_info { u8 tc_id; u8 tc_sch_mode; /* 0: sp; 1: dwrr */ - u8 up; u8 pgid; u32 bw_limit; }; @@ -197,6 +196,7 @@ struct hclge_tm_info { u8 num_tc; u8 num_pg; /* It must be 1 if vNET-Base schd */ u8 pg_dwrr[HCLGE_PG_NUM]; + u8 prio_tc[HNAE3_MAX_USER_PRIO]; struct hclge_pg_info pg_info[HCLGE_PG_NUM]; struct hclge_tc_info tc_info[HNAE3_MAX_TC]; enum hclge_fc_mode fc_mode; @@ -477,6 +477,7 @@ struct hclge_vport { u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ /* User configured lookup table entries */ u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; + u16 alloc_rss_size; u16 qs_offset; u16 bw_limit; /* VSI BW Limit (0 = disabled) */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 1c577d268f00..73a75d7cc551 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) { u8 tc; - for (tc = 0; tc < hdev->tm_info.num_tc; tc++) - if (hdev->tm_info.tc_info[tc].up == pri_id) - break; + tc = hdev->tm_info.prio_tc[pri_id]; if (tc >= hdev->tm_info.num_tc) return -EINVAL; @@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); - for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) { + for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { ret = hclge_fill_pri_array(hdev, pri, pri_id); if (ret) return ret; @@ -280,11 +278,11 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, shap_cfg_cmd->pg_id = pg_id; - hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); - hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); - hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); - hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); - hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -307,11 +305,11 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, shap_cfg_cmd->pri_id = pri_id; - hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); - hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); - hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); - hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); - hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -397,6 +395,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->num_tqps / kinfo->num_tc); vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; vport->dwrr = 100; /* 100 percent as init */ + vport->alloc_rss_size = kinfo->rss_size; for (i = 0; i < kinfo->num_tc; i++) { if (hdev->hw_tc_map & BIT(i)) { @@ -404,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; kinfo->tc_info[i].tqp_count = kinfo->rss_size; kinfo->tc_info[i].tc = i; - kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up; } else { /* Set to default queue if TC is disable */ kinfo->tc_info[i].enable = false; kinfo->tc_info[i].tqp_offset = 0; kinfo->tc_info[i].tqp_count = 1; kinfo->tc_info[i].tc = 0; - kinfo->tc_info[i].up = 0; } } + + memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, + FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc)); } static void hclge_tm_vport_info_update(struct hclge_dev *hdev) @@ -435,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev) for (i = 0; i < hdev->tm_info.num_tc; i++) { hdev->tm_info.tc_info[i].tc_id = i; hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; - hdev->tm_info.tc_info[i].up = i; hdev->tm_info.tc_info[i].pgid = 0; hdev->tm_info.tc_info[i].bw_limit = hdev->tm_info.pg_info[0].bw_limit; } + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + hdev->tm_info.prio_tc[i] = + (i >= hdev->tm_info.num_tc) ? 0 : i; + hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; } @@ -976,6 +979,10 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) if (ret) return ret; + /* Only DCB-supported dev supports qset back pressure setting */ + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + for (i = 0; i < hdev->tm_info.num_tc; i++) { ret = hclge_tm_qs_bp_cfg(hdev, i); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 7e67337dfaf2..85158b0d73fe 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -94,10 +94,10 @@ struct hclge_bp_to_qs_map_cmd { u32 rsvd1; }; -#define hclge_tm_set_feild(dest, string, val) \ +#define hclge_tm_set_field(dest, string, val) \ hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ (HCLGE_TM_SHAP_##string##_LSH), val) -#define hclge_tm_get_feild(src, string) \ +#define hclge_tm_get_field(src, string) \ hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ (HCLGE_TM_SHAP_##string##_LSH)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 1c3e29447891..35369e1c8036 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -41,11 +41,16 @@ static struct hnae3_client client; static const struct pci_device_id hns3_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, /* required last entry */ {0, } }; @@ -1348,6 +1353,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } ae_dev->pdev = pdev; + ae_dev->flag = ent->driver_data; ae_dev->dev_type = HNAE3_DEV_KNIC; pci_set_drvdata(pdev, ae_dev); @@ -2705,10 +2711,11 @@ static void hns3_init_mac_addr(struct net_device *netdev) eth_hw_addr_random(netdev); dev_warn(priv->dev, "using random MAC address %pM\n", netdev->dev_addr); - /* Also copy this new MAC address into hdev */ - if (h->ae_algo->ops->set_mac_addr) - h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); } + + if (h->ae_algo->ops->set_mac_addr) + h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); + } static void hns3_nic_set_priv_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 2c74baa2398a..fff09dcf9e34 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget) unsigned long flags; MAL_DBG2(mal, "poll(%d)" NL, budget); - again: + /* Process TX skbs */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = @@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget) spin_lock_irqsave(&mal->lock, flags); mal_disable_eob_irq(mal); spin_unlock_irqrestore(&mal->lock, flags); - goto again; } mc->ops->poll_tx(mc->dev); } diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 8a835e82256a..eef35bf3e849 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -4193,7 +4193,7 @@ static struct pci_driver skge_driver = { .driver.pm = SKGE_PM_OPS, }; -static struct dmi_system_id skge_32bit_dma_boards[] = { +static const struct dmi_system_id skge_32bit_dma_boards[] = { { .ident = "Gigabyte nForce boards", .matches = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ed7cd6c48019..696b99e65a5a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -575,15 +575,14 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_span_entry * -mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) +mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) { - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; int i; for (i = 0; i < mlxsw_sp->span.entries_count; i++) { struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - if (curr->used && curr->local_port == port->local_port) + if (curr->used && curr->local_port == local_port) return curr; } return NULL; @@ -594,7 +593,8 @@ static struct mlxsw_sp_span_entry { struct mlxsw_sp_span_entry *span_entry; - span_entry = mlxsw_sp_span_entry_find(port); + span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, + port->local_port); if (span_entry) { /* Already exists, just take a reference */ span_entry->ref_count++; @@ -783,12 +783,13 @@ err_port_bind: } static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, - struct mlxsw_sp_port *to, + u8 destination_port, enum mlxsw_sp_span_type type) { struct mlxsw_sp_span_entry *span_entry; - span_entry = mlxsw_sp_span_entry_find(to); + span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, + destination_port); if (!span_entry) { netdev_err(from->dev, "no span entry found\n"); return; @@ -1563,14 +1564,12 @@ static void mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; enum mlxsw_sp_span_type span_type; - struct mlxsw_sp_port *to_port; - to_port = mlxsw_sp->ports[mirror->to_local_port]; span_type = mirror->ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); + mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port, + span_type); } static int @@ -2545,7 +2544,9 @@ out: return err; } -#define MLXSW_SP_QSFP_I2C_ADDR 0x50 +#define MLXSW_SP_I2C_ADDR_LOW 0x50 +#define MLXSW_SP_I2C_ADDR_HIGH 0x51 +#define MLXSW_SP_EEPROM_PAGE_LENGTH 256 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, u16 offset, u16 size, void *data, @@ -2554,12 +2555,25 @@ static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; char mcia_pl[MLXSW_REG_MCIA_LEN]; + u16 i2c_addr; int status; int err; size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); + + if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && + offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) + /* Cross pages read, read until offset 256 in low page */ + size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; + + i2c_addr = MLXSW_SP_I2C_ADDR_LOW; + if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { + i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; + offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; + } + mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, - 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR); + 0, 0, offset, size, i2c_addr); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index f0fb898533fb..2cfb3f5d092d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -4868,7 +4868,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, struct fib_notifier_info *info = ptr; struct mlxsw_sp_router *router; - if (!net_eq(info->net, &init_net)) + if (!net_eq(info->net, &init_net) || + (info->family != AF_INET && info->family != AF_INET6)) return NOTIFY_DONE; fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index d396183108f7..a18b4d2b1d3e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -44,6 +44,16 @@ #include "../nfp_net.h" #include "../nfp_port.h" +#define NFP_FLOWER_WHITELIST_DISSECTOR \ + (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ + BIT(FLOW_DISSECTOR_KEY_BASIC) | \ + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_PORTS) | \ + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_VLAN) | \ + BIT(FLOW_DISSECTOR_KEY_IP)) + static int nfp_flower_xmit_flow(struct net_device *netdev, struct nfp_fl_payload *nfp_flow, u8 mtype) @@ -112,6 +122,9 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, u8 key_layer; int key_size; + if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) + return -EOPNOTSUPP; + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { struct flow_dissector_key_control *mask_enc_ctl = diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index f055b1774d65..f8fa63b66739 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -74,6 +74,45 @@ static const struct pci_device_id nfp_pci_device_ids[] = { }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); +static bool nfp_board_ready(struct nfp_pf *pf) +{ + const char *cp; + long state; + int err; + + cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); + if (!cp) + return false; + + err = kstrtol(cp, 0, &state); + if (err < 0) + return false; + + return state == 15; +} + +static int nfp_pf_board_state_wait(struct nfp_pf *pf) +{ + const unsigned long wait_until = jiffies + 10 * HZ; + + while (!nfp_board_ready(pf)) { + if (time_is_before_eq_jiffies(wait_until)) { + nfp_err(pf->cpp, "NFP board initialization timeout\n"); + return -EINVAL; + } + + nfp_info(pf->cpp, "waiting for board initialization\n"); + if (msleep_interruptible(500)) + return -ERESTARTSYS; + + /* Refresh cached information */ + kfree(pf->hwinfo); + pf->hwinfo = nfp_hwinfo_read(pf->cpp); + } + + return 0; +} + static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) { int err; @@ -312,6 +351,10 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) struct nfp_nsp *nsp; int err; + err = nfp_resource_wait(pf->cpp, NFP_RESOURCE_NSP, 30); + if (err) + return err; + nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { err = PTR_ERR(nsp); @@ -425,6 +468,10 @@ static int nfp_pci_probe(struct pci_dev *pdev, nfp_hwinfo_lookup(pf->hwinfo, "assembly.revision"), nfp_hwinfo_lookup(pf->hwinfo, "cpld.version")); + err = nfp_pf_board_state_wait(pf); + if (err) + goto err_hwinfo_free; + err = devlink_register(devlink, &pdev->dev); if (err) goto err_hwinfo_free; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 5abb9ba31e7d..ff373acd28f3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -64,23 +64,6 @@ #define NFP_PF_CSR_SLICE_SIZE (32 * 1024) -static int nfp_is_ready(struct nfp_pf *pf) -{ - const char *cp; - long state; - int err; - - cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); - if (!cp) - return 0; - - err = kstrtol(cp, 0, &state); - if (err < 0) - return 0; - - return state == 15; -} - /** * nfp_net_get_mac_addr() - Get the MAC address. * @pf: NFP PF handle @@ -725,12 +708,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf) INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics); - /* Verify that the board has completed initialization */ - if (!nfp_is_ready(pf)) { - nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n"); - return -EINVAL; - } - if (!pf->rtbl) { nfp_err(pf->cpp, "No %s, giving up.\n", pf->fw_loaded ? "symbol table" : "firmware found"); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index 1a8d04a1e113..3ce51f03126f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -97,6 +97,8 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name); void nfp_resource_release(struct nfp_resource *res); +int nfp_resource_wait(struct nfp_cpp *cpp, const char *name, unsigned int secs); + u32 nfp_resource_cpp_id(struct nfp_resource *res); const char *nfp_resource_name(struct nfp_resource *res); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index 072612263dab..b1dd13ff282b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -250,6 +250,51 @@ void nfp_resource_release(struct nfp_resource *res) } /** + * nfp_resource_wait() - Wait for resource to appear + * @cpp: NFP CPP handle + * @name: Name of the resource + * @secs: Number of seconds to wait + * + * Wait for resource to appear in the resource table, grab and release + * its lock. The wait is jiffies-based, don't expect fine granularity. + * + * Return: 0 on success, errno otherwise. + */ +int nfp_resource_wait(struct nfp_cpp *cpp, const char *name, unsigned int secs) +{ + unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ; + unsigned long err_at = jiffies + secs * HZ; + struct nfp_resource *res; + + while (true) { + res = nfp_resource_acquire(cpp, name); + if (!IS_ERR(res)) { + nfp_resource_release(res); + return 0; + } + + if (PTR_ERR(res) != -ENOENT) { + nfp_err(cpp, "error waiting for resource %s: %ld\n", + name, PTR_ERR(res)); + return PTR_ERR(res); + } + if (time_is_before_eq_jiffies(err_at)) { + nfp_err(cpp, "timeout waiting for resource %s\n", name); + return -ETIMEDOUT; + } + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; + nfp_info(cpp, "waiting for NFP resource %s\n", name); + } + if (msleep_interruptible(10)) { + nfp_err(cpp, "wait for resource %s interrupted\n", + name); + return -ERESTARTSYS; + } + } +} + +/** * nfp_resource_cpp_id() - Return the cpp_id of a resource handle * @res: NFP Resource handle * diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 89ab786da25f..4a67c55aa9f1 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/init.h> +#include <linux/interrupt.h> #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index eaca4578435d..8f6ccc0c39e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1244,7 +1244,6 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, if (!dcbx_info) return -ENOMEM; - memset(dcbx_info, 0, sizeof(*dcbx_info)); rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); if (rc) { kfree(dcbx_info); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c index bbe24639aa5a..c8c6231b87f3 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -88,6 +88,8 @@ static void emac_set_msglevel(struct net_device *netdev, u32 data) static int emac_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { + case ETH_SS_PRIV_FLAGS: + return 1; case ETH_SS_STATS: return EMAC_STATS_LEN; default: @@ -100,6 +102,10 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) unsigned int i; switch (stringset) { + case ETH_SS_PRIV_FLAGS: + strcpy(data, "single-pause-mode"); + break; + case ETH_SS_STATS: for (i = 0; i < EMAC_STATS_LEN; i++) { strlcpy(data, emac_ethtool_stat_strings[i], @@ -230,6 +236,27 @@ static int emac_get_regs_len(struct net_device *netdev) return EMAC_MAX_REG_SIZE * sizeof(u32); } +#define EMAC_PRIV_ENABLE_SINGLE_PAUSE BIT(0) + +static int emac_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + adpt->single_pause_mode = !!(flags & EMAC_PRIV_ENABLE_SINGLE_PAUSE); + + if (netif_running(netdev)) + return emac_reinit_locked(adpt); + + return 0; +} + +static u32 emac_get_priv_flags(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + return adpt->single_pause_mode ? EMAC_PRIV_ENABLE_SINGLE_PAUSE : 0; +} + static const struct ethtool_ops emac_ethtool_ops = { .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, @@ -253,6 +280,9 @@ static const struct ethtool_ops emac_ethtool_ops = { .get_regs_len = emac_get_regs_len, .get_regs = emac_get_regs, + + .set_priv_flags = emac_set_priv_flags, + .get_priv_flags = emac_get_priv_flags, }; void emac_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index bcd4708b3745..0ea3ca09c689 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -551,6 +551,28 @@ static void emac_mac_start(struct emac_adapter *adpt) mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL | DEBUG_MODE | SINGLE_PAUSE_MODE); + /* Enable single-pause-frame mode if requested. + * + * If enabled, the EMAC will send a single pause frame when the RX + * queue is full. This normally leads to packet loss because + * the pause frame disables the remote MAC only for 33ms (the quanta), + * and then the remote MAC continues sending packets even though + * the RX queue is still full. + * + * If disabled, the EMAC sends a pause frame every 31ms until the RX + * queue is no longer full. Normally, this is the preferred + * method of operation. However, when the system is hung (e.g. + * cores are halted), the EMAC interrupt handler is never called + * and so the RX queue fills up quickly and stays full. The resuling + * non-stop "flood" of pause frames sometimes has the effect of + * disabling nearby switches. In some cases, other nearby switches + * are also affected, shutting down the entire network. + * + * The user can enable or disable single-pause-frame mode + * via ethtool. + */ + mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0; + writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1); writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 60850bfa3d32..759543512117 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -443,6 +443,9 @@ static void emac_init_adapter(struct emac_adapter *adpt) /* default to automatic flow control */ adpt->automatic = true; + + /* Disable single-pause-frame mode by default */ + adpt->single_pause_mode = false; } /* Get the clock */ diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index 8ee4ec6aef2e..d7c9f44209d4 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -363,6 +363,9 @@ struct emac_adapter { bool tx_flow_control; bool rx_flow_control; + /* True == use single-pause-frame mode. */ + bool single_pause_mode; + /* Ring parameter */ u8 tpd_burst; u8 rfd_burst; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 557c9bf1a469..86b8c758f94e 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -84,6 +84,10 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) if (((int)skb->len - (int)packet_len) < 0) return NULL; + /* Some hardware can send us empty frames. Catch them */ + if (ntohs(maph->pkt_len) == 0) + return NULL; + skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); if (!skbn) return NULL; @@ -94,11 +98,5 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) memcpy(skbn->data, skb->data, packet_len); skb_pull(skb, packet_len); - /* Some hardware can send us empty frames. Catch them */ - if (ntohs(maph->pkt_len) == 0) { - kfree_skb(skb); - return NULL; - } - return skbn; } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index ca22f2898664..d24b47b8e0b2 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2135,11 +2135,12 @@ static int rtl8139_poll(struct napi_struct *napi, int budget) if (likely(RTL_R16(IntrStatus) & RxAckBits)) work_done += rtl8139_rx(dev, tp, budget); - if (work_done < budget && napi_complete_done(napi, work_done)) { + if (work_done < budget) { unsigned long flags; spin_lock_irqsave(&tp->lock, flags); - RTL_W16_F(IntrMask, rtl8139_intr_mask); + if (napi_complete_done(napi, work_done)) + RTL_W16_F(IntrMask, rtl8139_intr_mask); spin_unlock_irqrestore(&tp->lock, flags); } spin_unlock(&tp->rx_lock); diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 0b6a39b003a4..012fb66eed8d 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2595,6 +2595,11 @@ static int smsc911x_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct smsc911x_data *pdata = netdev_priv(ndev); + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } + /* enable wake on LAN, energy detection and the external PME * signal. */ smsc911x_reg_write(pdata, PMT_CTRL, @@ -2628,7 +2633,15 @@ static int smsc911x_resume(struct device *dev) while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) udelay(1000); - return (to == 0) ? -EIO : 0; + if (to == 0) + return -EIO; + + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; } static const struct dev_pm_ops smsc911x_pm_ops = { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index a366b3747eeb..8a280b48e3a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -315,6 +315,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, { .compatible = "allwinner,sun8i-h3-emac" }, { .compatible = "allwinner,sun8i-v3s-emac" }, { .compatible = "allwinner,sun50i-a64-emac" }, + {}, }; /* If phy-handle property is passed from DT, use it as the PHY */ diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index acd29d60174a..83e6f76eb965 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2598,7 +2598,7 @@ static struct platform_driver rhine_driver_platform = { } }; -static struct dmi_system_id rhine_dmi_table[] __initdata = { +static const struct dmi_system_id rhine_dmi_table[] __initconst = { { .ident = "EPIA-M", .matches = { diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index ec546da86683..5176be76ca7d 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -150,6 +150,8 @@ struct netvsc_device_info { u32 num_chn; u32 send_sections; u32 recv_sections; + u32 send_section_size; + u32 recv_section_size; }; enum rndis_device_state { @@ -204,6 +206,8 @@ int netvsc_recv_callback(struct net_device *net, const struct ndis_pkt_8021q_info *vlan); void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); + +void rndis_set_subchannel(struct work_struct *w); bool rndis_filter_opened(const struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); @@ -782,6 +786,7 @@ struct netvsc_device { u32 num_chn; atomic_t open_chn; + struct work_struct subchan_work; wait_queue_head_t subchan_open; struct rndis_device *extension; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 0062b802676f..8d5077fb0492 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -76,11 +76,9 @@ static struct netvsc_device *alloc_net_device(void) net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; - net_device->recv_section_size = NETVSC_RECV_SECTION_SIZE; - net_device->send_section_size = NETVSC_SEND_SECTION_SIZE; - init_completion(&net_device->channel_init_wait); init_waitqueue_head(&net_device->subchan_open); + INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); return net_device; } @@ -261,7 +259,7 @@ static int netvsc_init_buf(struct hv_device *device, int ret = 0; /* Get receive buffer area. */ - buf_size = device_info->recv_sections * net_device->recv_section_size; + buf_size = device_info->recv_sections * device_info->recv_section_size; buf_size = roundup(buf_size, PAGE_SIZE); net_device->recv_buf = vzalloc(buf_size); @@ -343,7 +341,7 @@ static int netvsc_init_buf(struct hv_device *device, goto cleanup; /* Now setup the send buffer. */ - buf_size = device_info->send_sections * net_device->send_section_size; + buf_size = device_info->send_sections * device_info->send_section_size; buf_size = round_up(buf_size, PAGE_SIZE); net_device->send_buf = vzalloc(buf_size); @@ -557,6 +555,8 @@ void netvsc_device_remove(struct hv_device *device) = rtnl_dereference(net_device_ctx->nvdev); int i; + cancel_work_sync(&net_device->subchan_work); + netvsc_disconnect_vsp(device); RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 165ba4b3b423..a32ae02e1b6c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -49,7 +49,7 @@ #define NETVSC_MIN_TX_SECTIONS 10 #define NETVSC_DEFAULT_TX 192 /* ~1M */ #define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */ -#define NETVSC_DEFAULT_RX 2048 /* ~4M */ +#define NETVSC_DEFAULT_RX 10485 /* Max ~16M */ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) @@ -848,15 +848,14 @@ static int netvsc_set_channels(struct net_device *net, device_info.num_chn = count; device_info.ring_size = ring_size; device_info.send_sections = nvdev->send_section_cnt; + device_info.send_section_size = nvdev->send_section_size; device_info.recv_sections = nvdev->recv_section_cnt; + device_info.recv_section_size = nvdev->recv_section_size; rndis_filter_device_remove(dev, nvdev); nvdev = rndis_filter_device_add(dev, &device_info); - if (!IS_ERR(nvdev)) { - netif_set_real_num_tx_queues(net, nvdev->num_chn); - netif_set_real_num_rx_queues(net, nvdev->num_chn); - } else { + if (IS_ERR(nvdev)) { ret = PTR_ERR(nvdev); device_info.num_chn = orig; nvdev = rndis_filter_device_add(dev, &device_info); @@ -966,7 +965,9 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; device_info.send_sections = nvdev->send_section_cnt; + device_info.send_section_size = nvdev->send_section_size; device_info.recv_sections = nvdev->recv_section_cnt; + device_info.recv_section_size = nvdev->recv_section_size; rndis_filter_device_remove(hdev, nvdev); @@ -1488,7 +1489,9 @@ static int netvsc_set_ringparam(struct net_device *ndev, device_info.num_chn = nvdev->num_chn; device_info.ring_size = ring_size; device_info.send_sections = new_tx; + device_info.send_section_size = nvdev->send_section_size; device_info.recv_sections = new_rx; + device_info.recv_section_size = nvdev->recv_section_size; netif_device_detach(ndev); was_opened = rndis_filter_opened(nvdev); @@ -1937,7 +1940,9 @@ static int netvsc_probe(struct hv_device *dev, device_info.ring_size = ring_size; device_info.num_chn = VRSS_CHANNEL_DEFAULT; device_info.send_sections = NETVSC_DEFAULT_TX; + device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; device_info.recv_sections = NETVSC_DEFAULT_RX; + device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; nvdev = rndis_filter_device_add(dev, &device_info); if (IS_ERR(nvdev)) { @@ -1954,9 +1959,6 @@ static int netvsc_probe(struct hv_device *dev, NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; net->vlan_features = net->features; - netif_set_real_num_tx_queues(net, nvdev->num_chn); - netif_set_real_num_rx_queues(net, nvdev->num_chn); - netdev_lockdep_set_classes(net); /* MTU range: 68 - 1500 or 65521 */ @@ -2012,9 +2014,10 @@ static int netvsc_remove(struct hv_device *dev) if (vf_netdev) netvsc_unregister_vf(vf_netdev); + unregister_netdevice(net); + rndis_filter_device_remove(dev, rtnl_dereference(ndev_ctx->nvdev)); - unregister_netdevice(net); rtnl_unlock(); hv_set_drvdata(dev, NULL); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 69c40b8fccc3..065b204d8e17 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1039,8 +1039,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) /* Set the channel before opening.*/ nvchan->channel = new_sc; - netif_napi_add(ndev, &nvchan->napi, - netvsc_poll, NAPI_POLL_WEIGHT); ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, nvscdev->ring_size * PAGE_SIZE, NULL, 0, @@ -1048,10 +1046,86 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) if (ret == 0) napi_enable(&nvchan->napi); else - netif_napi_del(&nvchan->napi); + netdev_notice(ndev, "sub channel open failed: %d\n", ret); - atomic_inc(&nvscdev->open_chn); - wake_up(&nvscdev->subchan_open); + if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn) + wake_up(&nvscdev->subchan_open); +} + +/* Open sub-channels after completing the handling of the device probe. + * This breaks overlap of processing the host message for the + * new primary channel with the initialization of sub-channels. + */ +void rndis_set_subchannel(struct work_struct *w) +{ + struct netvsc_device *nvdev + = container_of(w, struct netvsc_device, subchan_work); + struct nvsp_message *init_packet = &nvdev->channel_init_pkt; + struct net_device_context *ndev_ctx; + struct rndis_device *rdev; + struct net_device *ndev; + struct hv_device *hv_dev; + int i, ret; + + if (!rtnl_trylock()) { + schedule_work(w); + return; + } + + rdev = nvdev->extension; + if (!rdev) + goto unlock; /* device was removed */ + + ndev = rdev->ndev; + ndev_ctx = netdev_priv(ndev); + hv_dev = ndev_ctx->device_ctx; + + memset(init_packet, 0, sizeof(struct nvsp_message)); + init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; + init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; + init_packet->msg.v5_msg.subchn_req.num_subchannels = + nvdev->num_chn - 1; + ret = vmbus_sendpacket(hv_dev->channel, init_packet, + sizeof(struct nvsp_message), + (unsigned long)init_packet, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret) { + netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); + goto failed; + } + + wait_for_completion(&nvdev->channel_init_wait); + if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { + netdev_err(ndev, "sub channel request failed\n"); + goto failed; + } + + nvdev->num_chn = 1 + + init_packet->msg.v5_msg.subchn_comp.num_subchannels; + + /* wait for all sub channels to open */ + wait_event(nvdev->subchan_open, + atomic_read(&nvdev->open_chn) == nvdev->num_chn); + + /* ignore failues from setting rss parameters, still have channels */ + rndis_filter_set_rss_param(rdev, netvsc_hash_key); + + netif_set_real_num_tx_queues(ndev, nvdev->num_chn); + netif_set_real_num_rx_queues(ndev, nvdev->num_chn); + + rtnl_unlock(); + return; + +failed: + /* fallback to only primary channel */ + for (i = 1; i < nvdev->num_chn; i++) + netif_napi_del(&nvdev->chan_table[i].napi); + + nvdev->max_chn = 1; + nvdev->num_chn = 1; +unlock: + rtnl_unlock(); } struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, @@ -1063,7 +1137,6 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct rndis_device *rndis_device; struct ndis_offload hwcaps; struct ndis_offload_params offloads; - struct nvsp_message *init_packet; struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); unsigned int gso_max_size = GSO_MAX_SIZE; @@ -1215,9 +1288,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, net_device->num_chn); atomic_set(&net_device->open_chn, 1); - - if (net_device->num_chn == 1) - return net_device; + vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); for (i = 1; i < net_device->num_chn; i++) { ret = netvsc_alloc_recv_comp_ring(net_device, i); @@ -1228,38 +1299,15 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, } } - vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); + for (i = 1; i < net_device->num_chn; i++) + netif_napi_add(net, &net_device->chan_table[i].napi, + netvsc_poll, NAPI_POLL_WEIGHT); - init_packet = &net_device->channel_init_pkt; - memset(init_packet, 0, sizeof(struct nvsp_message)); - init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; - init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; - init_packet->msg.v5_msg.subchn_req.num_subchannels = - net_device->num_chn - 1; - ret = vmbus_sendpacket(dev->channel, init_packet, - sizeof(struct nvsp_message), - (unsigned long)init_packet, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (ret) - goto out; - - wait_for_completion(&net_device->channel_init_wait); - if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { - ret = -ENODEV; - goto out; - } + if (net_device->num_chn > 1) + schedule_work(&net_device->subchan_work); - net_device->num_chn = 1 + - init_packet->msg.v5_msg.subchn_comp.num_subchannels; - - /* wait for all sub channels to open */ - wait_event(net_device->subchan_open, - atomic_read(&net_device->open_chn) == net_device->num_chn); - - /* ignore failues from setting rss parameters, still have channels */ - rndis_filter_set_rss_param(rndis_device, netvsc_hash_key); out: + /* if unavailable, just proceed with one queue */ if (ret) { net_device->max_chn = 1; net_device->num_chn = 1; @@ -1280,10 +1328,10 @@ void rndis_filter_device_remove(struct hv_device *dev, /* Halt and release the rndis device */ rndis_filter_halt_device(rndis_dev); - kfree(rndis_dev); net_dev->extension = NULL; netvsc_device_remove(dev); + kfree(rndis_dev); } int rndis_filter_open(struct netvsc_device *nvdev) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index a9d16a3af514..cd931cf9dcc2 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -160,15 +160,6 @@ config MDIO_XGENE endif -menuconfig PHYLIB - tristate "PHY Device support and infrastructure" - depends on NETDEVICES - select MDIO_DEVICE - help - Ethernet controllers are usually attached to PHY - devices. This option provides infrastructure for - managing PHY devices. - config PHYLINK tristate depends on NETDEVICES @@ -179,6 +170,15 @@ config PHYLINK configuration links, PHYs, and Serdes links with MAC level autonegotiation modes. +menuconfig PHYLIB + tristate "PHY Device support and infrastructure" + depends on NETDEVICES + select MDIO_DEVICE + help + Ethernet controllers are usually attached to PHY + devices. This option provides infrastructure for + managing PHY devices. + if PHYLIB config SWPHY diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index e842d2cd1ee7..2b1e67bc1e73 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -373,7 +373,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev, cmd->base.port = PORT_BNC; else cmd->base.port = PORT_MII; - + cmd->base.transceiver = phy_is_internal(phydev) ? + XCVR_INTERNAL : XCVR_EXTERNAL; cmd->base.phy_address = phydev->mdio.addr; cmd->base.autoneg = phydev->autoneg; cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8cf0c5901f95..67f25ac29025 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -879,7 +879,7 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) { const char *drv_name = phydev->drv ? phydev->drv->name : "unbound"; char *irq_str; - char irq_num[4]; + char irq_num[8]; switch(phydev->irq) { case PHY_POLL: diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index d15dd3938ba8..2e5150b0b8d5 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) priv->phy_drv->read_status(phydev); val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); - val &= XILINX_GMII2RGMII_SPEED_MASK; + val &= ~XILINX_GMII2RGMII_SPEED_MASK; if (phydev->speed == SPEED_1000) val |= BMCR_SPEED1000; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index b99a7fb09f8e..0161f77641fa 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1265,30 +1265,45 @@ static int lan78xx_ethtool_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct lan78xx_net *dev = netdev_priv(netdev); + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret) + return ret; ee->magic = LAN78XX_EEPROM_MAGIC; - return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); + ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); + + usb_autopm_put_interface(dev->intf); + + return ret; } static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct lan78xx_net *dev = netdev_priv(netdev); + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret) + return ret; - /* Allow entire eeprom update only */ - if ((ee->magic == LAN78XX_EEPROM_MAGIC) && - (ee->offset == 0) && - (ee->len == 512) && - (data[0] == EEPROM_INDICATOR)) - return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); + /* Invalid EEPROM_INDICATOR at offset zero will result in a failure + * to load data from EEPROM + */ + if (ee->magic == LAN78XX_EEPROM_MAGIC) + ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); else if ((ee->magic == LAN78XX_OTP_MAGIC) && (ee->offset == 0) && (ee->len == 512) && (data[0] == OTP_INDICATOR_1)) - return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); + ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); + + usb_autopm_put_interface(dev->intf); - return -EINVAL; + return ret; } static void lan78xx_get_strings(struct net_device *netdev, u32 stringset, @@ -2434,7 +2449,6 @@ static int lan78xx_reset(struct lan78xx_net *dev) /* LAN7801 only has RGMII mode */ if (dev->chipid == ID_REV_CHIP_ID_7801_) buf &= ~MAC_CR_GMII_EN_; - buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; ret = lan78xx_write_reg(dev, MAC_CR, buf); ret = lan78xx_read_reg(dev, MAC_TX, &buf); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 340c13484e5c..309b88acd3d0 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -526,7 +526,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev) static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, u16 lcladv, u16 rmtadv) { - u32 flow, afc_cfg = 0; + u32 flow = 0, afc_cfg; int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg); if (ret < 0) @@ -537,20 +537,19 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, if (cap & FLOW_CTRL_RX) flow = 0xFFFF0002; - else - flow = 0; - if (cap & FLOW_CTRL_TX) + if (cap & FLOW_CTRL_TX) { afc_cfg |= 0xF; - else + flow |= 0xFFFF0000; + } else { afc_cfg &= ~0xF; + } netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n", cap & FLOW_CTRL_RX ? "enabled" : "disabled", cap & FLOW_CTRL_TX ? "enabled" : "disabled"); } else { netif_dbg(dev, link, dev->net, "half duplex\n"); - flow = 0; afc_cfg |= 0xF; } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 7e19051f3230..9b243e6f3008 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -957,12 +957,12 @@ static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, { const struct ipv6hdr *iph = ipv6_hdr(skb); struct flowi6 fl6 = { + .flowi6_iif = ifindex, + .flowi6_mark = skb->mark, + .flowi6_proto = iph->nexthdr, .daddr = iph->daddr, .saddr = iph->saddr, .flowlabel = ip6_flowinfo(iph), - .flowi6_mark = skb->mark, - .flowi6_proto = iph->nexthdr, - .flowi6_iif = ifindex, }; struct net *net = dev_net(vrf_dev); struct rt6_info *rt6; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 1427a386a033..3e4d1e7998da 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1417,6 +1417,15 @@ static int btt_claim_class(struct device *dev) struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nd_namespace_index *nsindex; + /* + * If any of the DIMMs do not support labels the only + * possible BTT format is v1. + */ + if (!ndd) { + loop_bitmask = 0; + break; + } + nsindex = to_namespace_index(ndd, ndd->ns_current); if (nsindex == NULL) loop_bitmask |= 1; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index e9aa453da50c..39dfd7affa31 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -262,16 +262,9 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, return copy_from_iter_flushcache(addr, bytes, i); } -static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, - void *addr, size_t size) -{ - arch_wb_cache_pmem(addr, size); -} - static const struct dax_operations pmem_dax_ops = { .direct_access = pmem_dax_direct_access, .copy_from_iter = pmem_copy_from_iter, - .flush = pmem_dax_flush, }; static const struct attribute_group *pmem_attribute_groups[] = { diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index acc816b67582..bb2aad078637 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -134,8 +134,6 @@ static inline bool nvme_req_needs_retry(struct request *req) return false; if (nvme_req(req)->status & NVME_SC_DNR) return false; - if (jiffies - req->start_time >= req->timeout) - return false; if (nvme_req(req)->retries >= nvme_max_retries) return false; return true; @@ -2590,7 +2588,7 @@ static void nvme_async_event_work(struct work_struct *work) container_of(work, struct nvme_ctrl, async_event_work); spin_lock_irq(&ctrl->lock); - while (ctrl->event_limit > 0) { + while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) { int aer_idx = --ctrl->event_limit; spin_unlock_irq(&ctrl->lock); @@ -2677,7 +2675,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, /*FALLTHRU*/ case NVME_SC_ABORT_REQ: ++ctrl->event_limit; - queue_work(nvme_wq, &ctrl->async_event_work); + if (ctrl->state == NVME_CTRL_LIVE) + queue_work(nvme_wq, &ctrl->async_event_work); break; default: break; @@ -2692,7 +2691,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, nvme_queue_scan(ctrl); break; case NVME_AER_NOTICE_FW_ACT_STARTING: - schedule_work(&ctrl->fw_act_work); + queue_work(nvme_wq, &ctrl->fw_act_work); break; default: dev_warn(ctrl->device, "async event result %08x\n", result); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 47307752dc65..555c976cc2ee 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -565,6 +565,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, opts->queue_size = NVMF_DEF_QUEUE_SIZE; opts->nr_io_queues = num_online_cpus(); opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; + opts->kato = NVME_DEFAULT_KATO; options = o = kstrdup(buf, GFP_KERNEL); if (!options) @@ -655,21 +656,22 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, goto out; } - if (opts->discovery_nqn) { - pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n"); - ret = -EINVAL; - goto out; - } - if (token < 0) { pr_err("Invalid keep_alive_tmo %d\n", token); ret = -EINVAL; goto out; - } else if (token == 0) { + } else if (token == 0 && !opts->discovery_nqn) { /* Allowed for debug */ pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); } opts->kato = token; + + if (opts->discovery_nqn && opts->kato) { + pr_err("Discovery controllers cannot accept KATO != 0\n"); + ret = -EINVAL; + goto out; + } + break; case NVMF_OPT_CTRL_LOSS_TMO: if (match_int(args, &token)) { @@ -762,8 +764,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, uuid_copy(&opts->host->id, &hostid); out: - if (!opts->discovery_nqn && !opts->kato) - opts->kato = NVME_DEFAULT_KATO; kfree(options); return ret; } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index d2e882c0f496..af075e998944 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1376,7 +1376,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) if (atomic_read(&op->state) == FCPOP_STATE_ABORTED) status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); else if (freq->status) - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); /* * For the linux implementation, if we have an unsuccesful @@ -1404,7 +1404,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) */ if (freq->transferred_length != be32_to_cpu(op->cmd_iu.data_len)) { - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); goto done; } result.u64 = 0; @@ -1421,7 +1421,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) freq->transferred_length || op->rsp_iu.status_code || sqe->common.command_id != cqe->command_id)) { - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); goto done; } result = cqe->result; @@ -1429,7 +1429,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) break; default: - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); goto done; } @@ -1989,16 +1989,17 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, * as well as those by FC-NVME spec. */ WARN_ON_ONCE(sqe->common.metadata); - WARN_ON_ONCE(sqe->common.dptr.prp1); - WARN_ON_ONCE(sqe->common.dptr.prp2); sqe->common.flags |= NVME_CMD_SGL_METABUF; /* - * format SQE DPTR field per FC-NVME rules - * type=data block descr; subtype=offset; - * offset is currently 0. + * format SQE DPTR field per FC-NVME rules: + * type=0x5 Transport SGL Data Block Descriptor + * subtype=0xA Transport-specific value + * address=0 + * length=length of the data series */ - sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET; + sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | + NVME_SGL_FMT_TRANSPORT_A; sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); sqe->rw.dptr.sgl.addr = 0; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 4a2121335f48..cb73bc8cad3b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -24,6 +24,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/once.h> #include <linux/pci.h> #include <linux/poison.h> #include <linux/t10-pi.h> @@ -540,6 +541,20 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) } #endif +static void nvme_print_sgl(struct scatterlist *sgl, int nents) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + dma_addr_t phys = sg_phys(sg); + pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " + "dma_address:%pad dma_length:%d\n", + i, &phys, sg->offset, sg->length, &sg_dma_address(sg), + sg_dma_len(sg)); + } +} + static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); @@ -622,19 +637,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) return BLK_STS_OK; bad_sgl: - if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n", - blk_rq_payload_bytes(req), iod->nents)) { - for_each_sg(iod->sg, sg, iod->nents, i) { - dma_addr_t phys = sg_phys(sg); - pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " - "dma_address:%pad dma_length:%d\n", i, &phys, - sg->offset, sg->length, - &sg_dma_address(sg), - sg_dma_len(sg)); - } - } + WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), + "Invalid SGL for payload:%d nents:%d\n", + blk_rq_payload_bytes(req), iod->nents); return BLK_STS_IOERR; - } static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, @@ -1313,11 +1319,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) if (result < 0) goto release_cq; + nvme_init_queue(nvmeq, qid); result = queue_request_irq(nvmeq); if (result < 0) goto release_sq; - nvme_init_queue(nvmeq, qid); return result; release_sq: @@ -1464,6 +1470,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) return result; nvmeq->cq_vector = 0; + nvme_init_queue(nvmeq, 0); result = queue_request_irq(nvmeq); if (result) { nvmeq->cq_vector = -1; @@ -2156,7 +2163,6 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; - nvme_init_queue(dev->queues[0], 0); result = nvme_alloc_admin_tags(dev); if (result) goto out; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 58983000964b..92a03ff5fb4d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -942,7 +942,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); - WARN_ON_ONCE(!changed); + if (!changed) { + /* state change failure is ok if we're in DELETING state */ + WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING); + return; + } + ctrl->ctrl.nr_reconnects = 0; nvme_start_ctrl(&ctrl->ctrl); @@ -962,7 +967,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, err_work); - nvme_stop_ctrl(&ctrl->ctrl); + nvme_stop_keep_alive(&ctrl->ctrl); if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 7c23eaf8e563..1b208beeef50 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -390,10 +390,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) if (status) nvmet_set_status(req, status); - /* XXX: need to fill in something useful for sq_head */ - req->rsp->sq_head = 0; - if (likely(req->sq)) /* may happen during early failure */ - req->rsp->sq_id = cpu_to_le16(req->sq->qid); + if (req->sq->size) + req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size; + req->rsp->sq_head = cpu_to_le16(req->sq->sqhd); + req->rsp->sq_id = cpu_to_le16(req->sq->qid); req->rsp->command_id = req->cmd->common.command_id; if (req->ns) @@ -420,6 +420,7 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, u16 size) { + sq->sqhd = 0; sq->qid = qid; sq->size = size; diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 859a66725291..db3bf6b8bf9e 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -109,9 +109,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) pr_warn("queue already connected!\n"); return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; } + if (!sqsize) { + pr_warn("queue size zero!\n"); + return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + } - nvmet_cq_setup(ctrl, req->cq, qid, sqsize); - nvmet_sq_setup(ctrl, req->sq, qid, sqsize); + /* note: convert queue size from 0's-based value to 1's-based value */ + nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); + nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); return 0; } diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 421e43bf1dd7..58e010bdda3e 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -148,7 +148,7 @@ struct nvmet_fc_tgt_assoc { u32 a_id; struct nvmet_fc_tgtport *tgtport; struct list_head a_list; - struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES]; + struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; struct kref ref; }; @@ -608,7 +608,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, unsigned long flags; int ret; - if (qid >= NVMET_NR_QUEUES) + if (qid > NVMET_NR_QUEUES) return NULL; queue = kzalloc((sizeof(*queue) + @@ -783,6 +783,9 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, u16 qid = nvmet_fc_getqueueid(connection_id); unsigned long flags; + if (qid > NVMET_NR_QUEUES) + return NULL; + spin_lock_irqsave(&tgtport->lock, flags); list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { @@ -888,7 +891,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) int i; spin_lock_irqsave(&tgtport->lock, flags); - for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) { + for (i = NVMET_NR_QUEUES; i >= 0; i--) { queue = assoc->queues[i]; if (queue) { if (!nvmet_fc_tgt_q_get(queue)) @@ -1910,8 +1913,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, spin_lock_irqsave(&fod->flock, flags); fod->writedataactive = false; spin_unlock_irqrestore(&fod->flock, flags); - nvmet_req_complete(&fod->req, - NVME_SC_FC_TRANSPORT_ERROR); + nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { fcpreq->fcp_error = ret; fcpreq->transferred_length = 0; @@ -1929,8 +1931,7 @@ __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) /* if in the middle of an io and we need to tear down */ if (abort) { if (fcpreq->op == NVMET_FCOP_WRITEDATA) { - nvmet_req_complete(&fod->req, - NVME_SC_FC_TRANSPORT_ERROR); + nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return true; } @@ -1968,8 +1969,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) fod->abort = true; spin_unlock(&fod->flock); - nvmet_req_complete(&fod->req, - NVME_SC_FC_TRANSPORT_ERROR); + nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return; } @@ -2533,13 +2533,17 @@ nvmet_fc_remove_port(struct nvmet_port *port) { struct nvmet_fc_tgtport *tgtport = port->priv; unsigned long flags; + bool matched = false; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); if (tgtport->port == port) { - nvmet_fc_tgtport_put(tgtport); + matched = true; tgtport->port = NULL; } spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + + if (matched) + nvmet_fc_tgtport_put(tgtport); } static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 1cb9847ec261..7b75d9de55ab 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -224,8 +224,6 @@ struct fcloop_nport { struct fcloop_lport *lport; struct list_head nport_list; struct kref ref; - struct completion rport_unreg_done; - struct completion tport_unreg_done; u64 node_name; u64 port_name; u32 port_role; @@ -576,7 +574,7 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport, tfcp_req->aborted = true; spin_unlock(&tfcp_req->reqlock); - tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED; + tfcp_req->status = NVME_SC_INTERNAL; /* * nothing more to do. If io wasn't active, the transport should @@ -631,6 +629,32 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, } static void +fcloop_nport_free(struct kref *ref) +{ + struct fcloop_nport *nport = + container_of(ref, struct fcloop_nport, ref); + unsigned long flags; + + spin_lock_irqsave(&fcloop_lock, flags); + list_del(&nport->nport_list); + spin_unlock_irqrestore(&fcloop_lock, flags); + + kfree(nport); +} + +static void +fcloop_nport_put(struct fcloop_nport *nport) +{ + kref_put(&nport->ref, fcloop_nport_free); +} + +static int +fcloop_nport_get(struct fcloop_nport *nport) +{ + return kref_get_unless_zero(&nport->ref); +} + +static void fcloop_localport_delete(struct nvme_fc_local_port *localport) { struct fcloop_lport *lport = localport->private; @@ -644,8 +668,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) { struct fcloop_rport *rport = remoteport->private; - /* release any threads waiting for the unreg to complete */ - complete(&rport->nport->rport_unreg_done); + fcloop_nport_put(rport->nport); } static void @@ -653,8 +676,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) { struct fcloop_tport *tport = targetport->private; - /* release any threads waiting for the unreg to complete */ - complete(&tport->nport->tport_unreg_done); + fcloop_nport_put(tport->nport); } #define FCLOOP_HW_QUEUES 4 @@ -722,6 +744,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, goto out_free_opts; } + memset(&pinfo, 0, sizeof(pinfo)); pinfo.node_name = opts->wwnn; pinfo.port_name = opts->wwpn; pinfo.port_role = opts->roles; @@ -804,32 +827,6 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, return ret ? ret : count; } -static void -fcloop_nport_free(struct kref *ref) -{ - struct fcloop_nport *nport = - container_of(ref, struct fcloop_nport, ref); - unsigned long flags; - - spin_lock_irqsave(&fcloop_lock, flags); - list_del(&nport->nport_list); - spin_unlock_irqrestore(&fcloop_lock, flags); - - kfree(nport); -} - -static void -fcloop_nport_put(struct fcloop_nport *nport) -{ - kref_put(&nport->ref, fcloop_nport_free); -} - -static int -fcloop_nport_get(struct fcloop_nport *nport) -{ - return kref_get_unless_zero(&nport->ref); -} - static struct fcloop_nport * fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) { @@ -938,6 +935,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, if (!nport) return -EIO; + memset(&pinfo, 0, sizeof(pinfo)); pinfo.node_name = nport->node_name; pinfo.port_name = nport->port_name; pinfo.port_role = nport->port_role; @@ -979,24 +977,12 @@ __unlink_remote_port(struct fcloop_nport *nport) } static int -__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) +__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) { - int ret; - if (!rport) return -EALREADY; - init_completion(&nport->rport_unreg_done); - - ret = nvme_fc_unregister_remoteport(rport->remoteport); - if (ret) - return ret; - - wait_for_completion(&nport->rport_unreg_done); - - fcloop_nport_put(nport); - - return ret; + return nvme_fc_unregister_remoteport(rport->remoteport); } static ssize_t @@ -1029,7 +1015,7 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, if (!nport) return -ENOENT; - ret = __wait_remoteport_unreg(nport, rport); + ret = __remoteport_unreg(nport, rport); return ret ? ret : count; } @@ -1086,24 +1072,12 @@ __unlink_target_port(struct fcloop_nport *nport) } static int -__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) +__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) { - int ret; - if (!tport) return -EALREADY; - init_completion(&nport->tport_unreg_done); - - ret = nvmet_fc_unregister_targetport(tport->targetport); - if (ret) - return ret; - - wait_for_completion(&nport->tport_unreg_done); - - fcloop_nport_put(nport); - - return ret; + return nvmet_fc_unregister_targetport(tport->targetport); } static ssize_t @@ -1136,7 +1110,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, if (!nport) return -ENOENT; - ret = __wait_targetport_unreg(nport, tport); + ret = __targetport_unreg(nport, tport); return ret ? ret : count; } @@ -1223,11 +1197,11 @@ static void __exit fcloop_exit(void) spin_unlock_irqrestore(&fcloop_lock, flags); - ret = __wait_targetport_unreg(nport, tport); + ret = __targetport_unreg(nport, tport); if (ret) pr_warn("%s: Failed deleting target port\n", __func__); - ret = __wait_remoteport_unreg(nport, rport); + ret = __remoteport_unreg(nport, rport); if (ret) pr_warn("%s: Failed deleting remote port\n", __func__); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 7d261ab894f4..7b8e20adf760 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -74,6 +74,7 @@ struct nvmet_sq { struct percpu_ref ref; u16 qid; u16 size; + u16 sqhd; struct completion free_done; struct completion confirm_done; }; diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 4ddc6e8f9fe7..f9308c2f22e6 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -251,9 +251,8 @@ err: return ret; } -static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test) +static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq) { - u8 irq; u8 msi_count; struct pci_epf *epf = epf_test->epf; struct pci_epc *epc = epf->epc; @@ -262,7 +261,6 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test) reg->status |= STATUS_IRQ_RAISED; msi_count = pci_epc_get_msi(epc); - irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; if (irq > msi_count || msi_count <= 0) pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); else @@ -289,6 +287,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) reg->command = 0; reg->status = 0; + irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; + if (command & COMMAND_RAISE_LEGACY_IRQ) { reg->status = STATUS_IRQ_RAISED; pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); @@ -301,7 +301,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) reg->status |= STATUS_WRITE_FAIL; else reg->status |= STATUS_WRITE_SUCCESS; - pci_epf_test_raise_irq(epf_test); + pci_epf_test_raise_irq(epf_test, irq); goto reset_handler; } @@ -311,7 +311,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) reg->status |= STATUS_READ_SUCCESS; else reg->status |= STATUS_READ_FAIL; - pci_epf_test_raise_irq(epf_test); + pci_epf_test_raise_irq(epf_test, irq); goto reset_handler; } @@ -321,13 +321,12 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) reg->status |= STATUS_COPY_SUCCESS; else reg->status |= STATUS_COPY_FAIL; - pci_epf_test_raise_irq(epf_test); + pci_epf_test_raise_irq(epf_test, irq); goto reset_handler; } if (command & COMMAND_RAISE_MSI_IRQ) { msi_count = pci_epc_get_msi(epc); - irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; if (irq > msi_count || msi_count <= 0) goto reset_handler; reg->status = STATUS_IRQ_RAISED; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 1eecfa301f7f..8e075ea2743e 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -686,7 +686,7 @@ static ssize_t driver_override_store(struct device *dev, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); - char *driver_override, *old = pdev->driver_override, *cp; + char *driver_override, *old, *cp; /* We need to keep extra room for a newline */ if (count >= (PAGE_SIZE - 1)) @@ -700,12 +700,15 @@ static ssize_t driver_override_store(struct device *dev, if (cp) *cp = '\0'; + device_lock(dev); + old = pdev->driver_override; if (strlen(driver_override)) { pdev->driver_override = driver_override; } else { kfree(driver_override); pdev->driver_override = NULL; } + device_unlock(dev); kfree(old); @@ -716,8 +719,12 @@ static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); + ssize_t len; - return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + device_lock(dev); + len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + device_unlock(dev); + return len; } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b0002daa50f3..6078dfc11b11 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -52,7 +52,6 @@ static void pci_pme_list_scan(struct work_struct *work); static LIST_HEAD(pci_pme_list); static DEFINE_MUTEX(pci_pme_list_mutex); static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); -static DEFINE_MUTEX(pci_bridge_mutex); struct pci_pme_device { struct list_head list; @@ -1351,16 +1350,10 @@ static void pci_enable_bridge(struct pci_dev *dev) if (bridge) pci_enable_bridge(bridge); - /* - * Hold pci_bridge_mutex to prevent a race when enabling two - * devices below the bridge simultaneously. The race may cause a - * PCI_COMMAND_MEMORY update to be lost (see changelog). - */ - mutex_lock(&pci_bridge_mutex); if (pci_is_enabled(dev)) { if (!dev->is_busmaster) pci_set_master(dev); - goto end; + return; } retval = pci_enable_device(dev); @@ -1368,8 +1361,6 @@ static void pci_enable_bridge(struct pci_dev *dev) dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", retval); pci_set_master(dev); -end: - mutex_unlock(&pci_bridge_mutex); } static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) @@ -1394,7 +1385,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) return 0; /* already enabled */ bridge = pci_upstream_bridge(dev); - if (bridge && !pci_is_enabled(bridge)) + if (bridge) pci_enable_bridge(bridge); /* only skip sriov related */ diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index be635f017756..083276e03c38 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -260,7 +260,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = { +static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = { /* * Boxes that should not use MSI for PCIe PME signaling. */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index a2afb44fad10..a4d33619a7bb 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1707,7 +1707,7 @@ static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id boot_interrupt_dmi_table[] = { +static const struct dmi_system_id boot_interrupt_dmi_table[] = { /* * Systems to exclude from boot interrupt reroute quirks */ diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 944674ee3464..19e17829f515 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c @@ -131,22 +131,27 @@ static irqreturn_t db1000_pcmcia_stschgirq(int irq, void *data) return IRQ_HANDLED; } +/* Db/Pb1200 have separate per-socket insertion and ejection + * interrupts which stay asserted as long as the card is + * inserted/missing. The one which caused us to be called + * needs to be disabled and the other one enabled. + */ static irqreturn_t db1200_pcmcia_cdirq(int irq, void *data) { + disable_irq_nosync(irq); + return IRQ_WAKE_THREAD; +} + +static irqreturn_t db1200_pcmcia_cdirq_fn(int irq, void *data) +{ struct db1x_pcmcia_sock *sock = data; - /* Db/Pb1200 have separate per-socket insertion and ejection - * interrupts which stay asserted as long as the card is - * inserted/missing. The one which caused us to be called - * needs to be disabled and the other one enabled. - */ - if (irq == sock->insert_irq) { - disable_irq_nosync(sock->insert_irq); + /* Wait a bit for the signals to stop bouncing. */ + msleep(100); + if (irq == sock->insert_irq) enable_irq(sock->eject_irq); - } else { - disable_irq_nosync(sock->eject_irq); + else enable_irq(sock->insert_irq); - } pcmcia_parse_events(&sock->socket, SS_DETECT); @@ -172,13 +177,13 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock) */ if ((sock->board_type == BOARD_TYPE_DB1200) || (sock->board_type == BOARD_TYPE_DB1300)) { - ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, - 0, "pcmcia_insert", sock); + ret = request_threaded_irq(sock->insert_irq, db1200_pcmcia_cdirq, + db1200_pcmcia_cdirq_fn, 0, "pcmcia_insert", sock); if (ret) goto out1; - ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, - 0, "pcmcia_eject", sock); + ret = request_threaded_irq(sock->eject_irq, db1200_pcmcia_cdirq, + db1200_pcmcia_cdirq_fn, 0, "pcmcia_eject", sock); if (ret) { free_irq(sock->insert_irq, sock); goto out1; diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 0a9b78705ee8..3303dd8d8eb5 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -235,6 +235,7 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn) ret = armpmu_register(pmu); if (ret) { pr_warn("Failed to register PMU for CPU%d\n", cpu); + kfree(pmu->name); return ret; } } diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 441912c10b82..5c8d452e35e2 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -44,6 +44,7 @@ source "drivers/phy/allwinner/Kconfig" source "drivers/phy/amlogic/Kconfig" source "drivers/phy/broadcom/Kconfig" source "drivers/phy/hisilicon/Kconfig" +source "drivers/phy/lantiq/Kconfig" source "drivers/phy/marvell/Kconfig" source "drivers/phy/mediatek/Kconfig" source "drivers/phy/motorola/Kconfig" diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index 06f3c500030d..3a52dcb09566 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -6,9 +6,9 @@ obj-$(CONFIG_GENERIC_PHY) += phy-core.o obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o obj-$(CONFIG_PHY_XGENE) += phy-xgene.o obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o - obj-$(CONFIG_ARCH_SUNXI) += allwinner/ obj-$(CONFIG_ARCH_MESON) += amlogic/ +obj-$(CONFIG_LANTIQ) += lantiq/ obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ obj-$(CONFIG_ARCH_RENESAS) += renesas/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ diff --git a/drivers/phy/lantiq/Kconfig b/drivers/phy/lantiq/Kconfig new file mode 100644 index 000000000000..326d88a6417d --- /dev/null +++ b/drivers/phy/lantiq/Kconfig @@ -0,0 +1,9 @@ +# +# Phy drivers for Lantiq / Intel platforms +# +config PHY_LANTIQ_RCU_USB2 + tristate "Lantiq XWAY SoC RCU based USB PHY" + depends on OF && (SOC_TYPE_XWAY || COMPILE_TEST) + select GENERIC_PHY + help + Support for the USB PHY(s) on the Lantiq / Intel XWAY family SoCs. diff --git a/drivers/phy/lantiq/Makefile b/drivers/phy/lantiq/Makefile new file mode 100644 index 000000000000..f73eb56a5416 --- /dev/null +++ b/drivers/phy/lantiq/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_PHY_LANTIQ_RCU_USB2) += phy-lantiq-rcu-usb2.o diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c new file mode 100644 index 000000000000..986224fca9e9 --- /dev/null +++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c @@ -0,0 +1,254 @@ +/* + * Lantiq XWAY SoC RCU module based USB 1.1/2.0 PHY driver + * + * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + * Copyright (C) 2017 Hauke Mehrtens <hauke@hauke-m.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <linux/reset.h> + +/* Transmitter HS Pre-Emphasis Enable */ +#define RCU_CFG1_TX_PEE BIT(0) +/* Disconnect Threshold */ +#define RCU_CFG1_DIS_THR_MASK 0x00038000 +#define RCU_CFG1_DIS_THR_SHIFT 15 + +struct ltq_rcu_usb2_bits { + u8 hostmode; + u8 slave_endianness; + u8 host_endianness; + bool have_ana_cfg; +}; + +struct ltq_rcu_usb2_priv { + struct regmap *regmap; + unsigned int phy_reg_offset; + unsigned int ana_cfg1_reg_offset; + const struct ltq_rcu_usb2_bits *reg_bits; + struct device *dev; + struct phy *phy; + struct clk *phy_gate_clk; + struct reset_control *ctrl_reset; + struct reset_control *phy_reset; +}; + +static const struct ltq_rcu_usb2_bits xway_rcu_usb2_reg_bits = { + .hostmode = 11, + .slave_endianness = 9, + .host_endianness = 10, + .have_ana_cfg = false, +}; + +static const struct ltq_rcu_usb2_bits xrx100_rcu_usb2_reg_bits = { + .hostmode = 11, + .slave_endianness = 17, + .host_endianness = 10, + .have_ana_cfg = false, +}; + +static const struct ltq_rcu_usb2_bits xrx200_rcu_usb2_reg_bits = { + .hostmode = 11, + .slave_endianness = 9, + .host_endianness = 10, + .have_ana_cfg = true, +}; + +static const struct of_device_id ltq_rcu_usb2_phy_of_match[] = { + { + .compatible = "lantiq,ase-usb2-phy", + .data = &xway_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,danube-usb2-phy", + .data = &xway_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,xrx100-usb2-phy", + .data = &xrx100_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,xrx200-usb2-phy", + .data = &xrx200_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,xrx300-usb2-phy", + .data = &xrx200_rcu_usb2_reg_bits, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, ltq_rcu_usb2_phy_of_match); + +static int ltq_rcu_usb2_phy_init(struct phy *phy) +{ + struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy); + + if (priv->reg_bits->have_ana_cfg) { + regmap_update_bits(priv->regmap, priv->ana_cfg1_reg_offset, + RCU_CFG1_TX_PEE, RCU_CFG1_TX_PEE); + regmap_update_bits(priv->regmap, priv->ana_cfg1_reg_offset, + RCU_CFG1_DIS_THR_MASK, 7 << RCU_CFG1_DIS_THR_SHIFT); + } + + /* Configure core to host mode */ + regmap_update_bits(priv->regmap, priv->phy_reg_offset, + BIT(priv->reg_bits->hostmode), 0); + + /* Select DMA endianness (Host-endian: big-endian) */ + regmap_update_bits(priv->regmap, priv->phy_reg_offset, + BIT(priv->reg_bits->slave_endianness), 0); + regmap_update_bits(priv->regmap, priv->phy_reg_offset, + BIT(priv->reg_bits->host_endianness), + BIT(priv->reg_bits->host_endianness)); + + return 0; +} + +static int ltq_rcu_usb2_phy_power_on(struct phy *phy) +{ + struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy); + struct device *dev = priv->dev; + int ret; + + reset_control_deassert(priv->phy_reset); + + ret = clk_prepare_enable(priv->phy_gate_clk); + if (ret) + dev_err(dev, "failed to enable PHY gate\n"); + + return ret; +} + +static int ltq_rcu_usb2_phy_power_off(struct phy *phy) +{ + struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy); + + reset_control_assert(priv->phy_reset); + + clk_disable_unprepare(priv->phy_gate_clk); + + return 0; +} + +static struct phy_ops ltq_rcu_usb2_phy_ops = { + .init = ltq_rcu_usb2_phy_init, + .power_on = ltq_rcu_usb2_phy_power_on, + .power_off = ltq_rcu_usb2_phy_power_off, + .owner = THIS_MODULE, +}; + +static int ltq_rcu_usb2_of_parse(struct ltq_rcu_usb2_priv *priv, + struct platform_device *pdev) +{ + struct device *dev = priv->dev; + const __be32 *offset; + int ret; + + priv->reg_bits = of_device_get_match_data(dev); + + priv->regmap = syscon_node_to_regmap(dev->of_node->parent); + if (IS_ERR(priv->regmap)) { + dev_err(dev, "Failed to lookup RCU regmap\n"); + return PTR_ERR(priv->regmap); + } + + offset = of_get_address(dev->of_node, 0, NULL, NULL); + if (!offset) { + dev_err(dev, "Failed to get RCU PHY reg offset\n"); + return -ENOENT; + } + priv->phy_reg_offset = __be32_to_cpu(*offset); + + if (priv->reg_bits->have_ana_cfg) { + offset = of_get_address(dev->of_node, 1, NULL, NULL); + if (!offset) { + dev_err(dev, "Failed to get RCU ANA CFG1 reg offset\n"); + return -ENOENT; + } + priv->ana_cfg1_reg_offset = __be32_to_cpu(*offset); + } + + priv->phy_gate_clk = devm_clk_get(dev, "phy"); + if (IS_ERR(priv->phy_gate_clk)) { + dev_err(dev, "Unable to get USB phy gate clk\n"); + return PTR_ERR(priv->phy_gate_clk); + } + + priv->ctrl_reset = devm_reset_control_get_shared(dev, "ctrl"); + if (IS_ERR(priv->ctrl_reset)) { + if (PTR_ERR(priv->ctrl_reset) != -EPROBE_DEFER) + dev_err(dev, "failed to get 'ctrl' reset\n"); + return PTR_ERR(priv->ctrl_reset); + } + + priv->phy_reset = devm_reset_control_get_optional(dev, "phy"); + if (IS_ERR(priv->phy_reset)) + return PTR_ERR(priv->phy_reset); + + return 0; +} + +static int ltq_rcu_usb2_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ltq_rcu_usb2_priv *priv; + struct phy_provider *provider; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + + ret = ltq_rcu_usb2_of_parse(priv, pdev); + if (ret) + return ret; + + /* Reset USB core through reset controller */ + reset_control_deassert(priv->ctrl_reset); + + reset_control_assert(priv->phy_reset); + + priv->phy = devm_phy_create(dev, dev->of_node, <q_rcu_usb2_phy_ops); + if (IS_ERR(priv->phy)) { + dev_err(dev, "failed to create PHY\n"); + return PTR_ERR(priv->phy); + } + + phy_set_drvdata(priv->phy, priv); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(provider)) + return PTR_ERR(provider); + + dev_set_drvdata(priv->dev, priv); + return 0; +} + +static struct platform_driver ltq_rcu_usb2_phy_driver = { + .probe = ltq_rcu_usb2_phy_probe, + .driver = { + .name = "lantiq-rcu-usb2-phy", + .of_match_table = ltq_rcu_usb2_phy_of_match, + } +}; +module_platform_driver(ltq_rcu_usb2_phy_driver); + +MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); +MODULE_DESCRIPTION("Lantiq XWAY USB2 PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index b8b6ab072cd0..71b944748304 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -550,9 +550,9 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on) spin_lock_irqsave(&info->irq_lock, flags); val = readl(info->base + reg); if (on) - val |= d->mask; + val |= (BIT(d->hwirq % GPIO_PER_REG)); else - val &= ~d->mask; + val &= ~(BIT(d->hwirq % GPIO_PER_REG)); writel(val, info->base + reg); spin_unlock_irqrestore(&info->irq_lock, flags); @@ -571,10 +571,10 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type) val = readl(info->base + reg); switch (type) { case IRQ_TYPE_EDGE_RISING: - val &= ~d->mask; + val &= ~(BIT(d->hwirq % GPIO_PER_REG)); break; case IRQ_TYPE_EDGE_FALLING: - val |= d->mask; + val |= (BIT(d->hwirq % GPIO_PER_REG)); break; default: spin_unlock_irqrestore(&info->irq_lock, flags); @@ -624,11 +624,27 @@ static void armada_37xx_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } +static unsigned int armada_37xx_irq_startup(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + int irq = d->hwirq - chip->irq_base; + /* + * The mask field is a "precomputed bitmask for accessing the + * chip registers" which was introduced for the generic + * irqchip framework. As we don't use this framework, we can + * reuse this field for our own usage. + */ + d->mask = BIT(irq % GPIO_PER_REG); + + armada_37xx_irq_unmask(d); + + return 0; +} + static int armada_37xx_irqchip_register(struct platform_device *pdev, struct armada_37xx_pinctrl *info) { struct device_node *np = info->dev->of_node; - int nrirqs = info->data->nr_pins; struct gpio_chip *gc = &info->gpio_chip; struct irq_chip *irqchip = &info->irq_chip; struct resource res; @@ -666,8 +682,8 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev, irqchip->irq_unmask = armada_37xx_irq_unmask; irqchip->irq_set_wake = armada_37xx_irq_set_wake; irqchip->irq_set_type = armada_37xx_irq_set_type; + irqchip->irq_startup = armada_37xx_irq_startup; irqchip->name = info->data->name; - ret = gpiochip_irqchip_add(gc, irqchip, 0, handle_edge_irq, IRQ_TYPE_NONE); if (ret) { @@ -680,19 +696,6 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev, * controller. But we do not take advantage of this and use * the chained irq with all of them. */ - for (i = 0; i < nrirqs; i++) { - struct irq_data *d = irq_get_irq_data(gc->irq_base + i); - - /* - * The mask field is a "precomputed bitmask for - * accessing the chip registers" which was introduced - * for the generic irqchip framework. As we don't use - * this framework, we can reuse this field for our own - * usage. - */ - d->mask = BIT(i % GPIO_PER_REG); - } - for (i = 0; i < nr_irq_parent; i++) { int irq = irq_of_parse_and_map(np, i); diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 38af1ec2df0c..3f6b34febbf1 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -36,6 +36,7 @@ #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinconf-generic.h> +#include "core.h" #include "pinctrl-utils.h" #include "pinctrl-amd.h" @@ -725,6 +726,69 @@ static const struct pinconf_ops amd_pinconf_ops = { .pin_config_group_set = amd_pinconf_group_set, }; +#ifdef CONFIG_PM_SLEEP +static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin) +{ + const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin); + + if (!pd) + return false; + + /* + * Only restore the pin if it is actually in use by the kernel (or + * by userspace). + */ + if (pd->mux_owner || pd->gpio_owner || + gpiochip_line_is_irq(&gpio_dev->gc, pin)) + return true; + + return false; +} + +int amd_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev); + struct pinctrl_desc *desc = gpio_dev->pctrl->desc; + int i; + + for (i = 0; i < desc->npins; i++) { + int pin = desc->pins[i].number; + + if (!amd_gpio_should_save(gpio_dev, pin)) + continue; + + gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4); + } + + return 0; +} + +int amd_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev); + struct pinctrl_desc *desc = gpio_dev->pctrl->desc; + int i; + + for (i = 0; i < desc->npins; i++) { + int pin = desc->pins[i].number; + + if (!amd_gpio_should_save(gpio_dev, pin)) + continue; + + writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4); + } + + return 0; +} + +static const struct dev_pm_ops amd_gpio_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend, + amd_gpio_resume) +}; +#endif + static struct pinctrl_desc amd_pinctrl_desc = { .pins = kerncz_pins, .npins = ARRAY_SIZE(kerncz_pins), @@ -764,6 +828,14 @@ static int amd_gpio_probe(struct platform_device *pdev) return irq_base; } +#ifdef CONFIG_PM_SLEEP + gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins, + sizeof(*gpio_dev->saved_regs), + GFP_KERNEL); + if (!gpio_dev->saved_regs) + return -ENOMEM; +#endif + gpio_dev->pdev = pdev; gpio_dev->gc.direction_input = amd_gpio_direction_input; gpio_dev->gc.direction_output = amd_gpio_direction_output; @@ -853,6 +925,9 @@ static struct platform_driver amd_gpio_driver = { .driver = { .name = "amd_gpio", .acpi_match_table = ACPI_PTR(amd_gpio_acpi_match), +#ifdef CONFIG_PM_SLEEP + .pm = &amd_gpio_pm_ops, +#endif }, .probe = amd_gpio_probe, .remove = amd_gpio_remove, diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h index 5b1cb965c767..8fa453a59da5 100644 --- a/drivers/pinctrl/pinctrl-amd.h +++ b/drivers/pinctrl/pinctrl-amd.h @@ -97,6 +97,7 @@ struct amd_gpio { unsigned int hwbank_num; struct resource *res; struct platform_device *pdev; + u32 *saved_regs; }; /* KERNCZ configuration*/ diff --git a/drivers/pinctrl/sprd/Kconfig b/drivers/pinctrl/sprd/Kconfig index 6f4a7f9ac6fd..bc7f3fab22f1 100644 --- a/drivers/pinctrl/sprd/Kconfig +++ b/drivers/pinctrl/sprd/Kconfig @@ -4,6 +4,8 @@ config PINCTRL_SPRD bool "Spreadtrum pinctrl driver" + depends on OF + depends on ARCH_SPRD || COMPILE_TEST select PINMUX select PINCONF select GENERIC_PINCONF @@ -13,5 +15,6 @@ config PINCTRL_SPRD config PINCTRL_SPRD_SC9860 bool "Spreadtrum SC9860 pinctrl driver" + depends on PINCTRL_SPRD help Say Y here to enable Spreadtrum SC9860 pinctrl driver diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c index 7e7b9ac7e836..63529911445c 100644 --- a/drivers/pinctrl/sprd/pinctrl-sprd.c +++ b/drivers/pinctrl/sprd/pinctrl-sprd.c @@ -353,13 +353,13 @@ static const struct pinctrl_ops sprd_pctrl_ops = { .dt_free_map = pinctrl_utils_free_map, }; -int sprd_pmx_get_function_count(struct pinctrl_dev *pctldev) +static int sprd_pmx_get_function_count(struct pinctrl_dev *pctldev) { return PIN_FUNC_MAX; } -const char *sprd_pmx_get_function_name(struct pinctrl_dev *pctldev, - unsigned int selector) +static const char *sprd_pmx_get_function_name(struct pinctrl_dev *pctldev, + unsigned int selector) { switch (selector) { case PIN_FUNC_1: @@ -375,10 +375,10 @@ const char *sprd_pmx_get_function_name(struct pinctrl_dev *pctldev, } } -int sprd_pmx_get_function_groups(struct pinctrl_dev *pctldev, - unsigned int selector, - const char * const **groups, - unsigned int * const num_groups) +static int sprd_pmx_get_function_groups(struct pinctrl_dev *pctldev, + unsigned int selector, + const char * const **groups, + unsigned int * const num_groups) { struct sprd_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); struct sprd_pinctrl_soc_info *info = pctl->info; @@ -400,7 +400,7 @@ static int sprd_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned long reg; unsigned int val = 0; - if (group_selector > info->ngroups) + if (group_selector >= info->ngroups) return -EINVAL; switch (func_selector) { @@ -734,7 +734,7 @@ static int sprd_pinconf_group_get(struct pinctrl_dev *pctldev, struct sprd_pin_group *grp; unsigned int pin_id; - if (selector > info->ngroups) + if (selector >= info->ngroups) return -EINVAL; grp = &info->groups[selector]; @@ -753,7 +753,7 @@ static int sprd_pinconf_group_set(struct pinctrl_dev *pctldev, struct sprd_pin_group *grp; int ret, i; - if (selector > info->ngroups) + if (selector >= info->ngroups) return -EINVAL; grp = &info->groups[selector]; @@ -813,7 +813,7 @@ static void sprd_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, const char *name; int i, ret; - if (selector > info->ngroups) + if (selector >= info->ngroups) return; grp = &info->groups[selector]; @@ -1100,12 +1100,16 @@ int sprd_pinctrl_remove(struct platform_device *pdev) void sprd_pinctrl_shutdown(struct platform_device *pdev) { - struct pinctrl *pinctl = devm_pinctrl_get(&pdev->dev); + struct pinctrl *pinctl; struct pinctrl_state *state; + pinctl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(pinctl)) + return; state = pinctrl_lookup_state(pinctl, "shutdown"); - if (!IS_ERR(state)) - pinctrl_select_state(pinctl, state); + if (IS_ERR(state)) + return; + pinctrl_select_state(pinctl, state); } MODULE_DESCRIPTION("SPREADTRUM Pin Controller Driver"); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h index c075ecb8e5db..0a3d2ac27503 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier.h +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h @@ -17,7 +17,7 @@ #define __PINCTRL_UNIPHIER_H__ #include <linux/bitops.h> -#include <linux/bug.h> +#include <linux/build_bug.h> #include <linux/kernel.h> #include <linux/types.h> diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index e8a44a9bc916..d8599736a41a 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -518,7 +518,7 @@ static struct chromeos_laptop cr48 = { .callback = chromeos_laptop_dmi_matched, \ .driver_data = (void *)&board_ -static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = { +static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = { { .ident = "Samsung Series 5 550", .matches = { diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c index 308a853ac4f1..b0693fdec8c6 100644 --- a/drivers/platform/chrome/chromeos_pstore.c +++ b/drivers/platform/chrome/chromeos_pstore.c @@ -14,7 +14,7 @@ #include <linux/platform_device.h> #include <linux/pstore_ram.h> -static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = { +static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = { { /* * Today all Chromebooks/boxes ship with Google_* as version and diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 2b6436d1b6a4..1baf720faf69 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -329,7 +329,7 @@ static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids); -static struct dmi_system_id cros_ec_lpc_dmi_table[] __initdata = { +static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { { /* * Today all Chromebooks/boxes ship with Google_* as version and diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index a8e4a539e704..6bcb750e1865 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -805,7 +805,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id) return 1; } -static struct dmi_system_id __initdata compal_dmi_table[] = { +static const struct dmi_system_id compal_dmi_table[] __initconst = { { .ident = "FL90/IFL90", .matches = { diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 85de30f93a9c..56a8195096a2 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -254,10 +254,12 @@ static int bl_update_status(struct backlight_device *b) { struct acpi_device *device = bl_get_data(b); - if (b->props.power == FB_BLANK_POWERDOWN) - call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3); - else - call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0); + if (fext) { + if (b->props.power == FB_BLANK_POWERDOWN) + call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3); + else + call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0); + } return set_lcd_level(device, b->props.brightness); } diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c index 458e6c948c11..c26baf77938e 100644 --- a/drivers/platform/x86/hdaps.c +++ b/drivers/platform/x86/hdaps.c @@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id) "ThinkPad T42p", so the order of the entries matters. If your ThinkPad is not recognized, please update to latest BIOS. This is especially the case for some R52 ThinkPads. */ -static struct dmi_system_id __initdata hdaps_whitelist[] = { +static const struct dmi_system_id hdaps_whitelist[] __initconst = { HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES), HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"), HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"), diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c index 610ac8391caa..18d55cee5bcd 100644 --- a/drivers/platform/x86/ibm_rtl.c +++ b/drivers/platform/x86/ibm_rtl.c @@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) { } -static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = { +static const struct dmi_system_id ibm_rtl_dmi_table[] __initconst = { { \ .matches = { \ DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \ diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c index 6aa33c4a809f..5747f63c8d9f 100644 --- a/drivers/platform/x86/intel_oaktrail.c +++ b/drivers/platform/x86/intel_oaktrail.c @@ -299,7 +299,7 @@ static int dmi_check_cb(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id __initdata oaktrail_dmi_table[] = { +static const struct dmi_system_id oaktrail_dmi_table[] __initconst = { { .ident = "OakTrail platform", .matches = { diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 8f98c211b440..4f3de2a8c4df 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -247,7 +247,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) return 1; }; -static struct dmi_system_id mlxplat_dmi_table[] __initdata = { +static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { { .callback = mlxplat_dmi_default_matched, .matches = { diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 61b9014d2610..d5bfcc602090 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -605,7 +605,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi) return 1; } -static struct dmi_system_id __initdata msi_dmi_table[] = { +static const struct dmi_system_id msi_dmi_table[] __initconst = { { .ident = "MSI S270", .matches = { diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 0c703feaeb88..d3cb26f6df73 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata samsung_dmi_table[] = { +static const struct dmi_system_id samsung_dmi_table[] __initconst = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c index e6aac725a0af..a2fb7fbc3273 100644 --- a/drivers/platform/x86/samsung-q10.c +++ b/drivers/platform/x86/samsung-q10.c @@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id) return 1; } -static struct dmi_system_id __initdata samsungq10_dmi_table[] = { +static const struct dmi_system_id samsungq10_dmi_table[] __initconst = { { .ident = "Samsung Q10", .matches = { diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index bfae79534f44..a16cea2be9c3 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -4880,7 +4880,7 @@ static struct acpi_driver sony_pic_driver = { .drv.pm = &sony_pic_pm, }; -static struct dmi_system_id __initdata sonypi_dmi_table[] = { +static const struct dmi_system_id sonypi_dmi_table[] __initconst = { { .ident = "Sony Vaio", .matches = { diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c index 440528676170..03d7620cd6d7 100644 --- a/drivers/platform/x86/toshiba-wmi.c +++ b/drivers/platform/x86/toshiba-wmi.c @@ -64,7 +64,7 @@ static void toshiba_wmi_notify(u32 value, void *context) kfree(response.pointer); } -static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = { +static const struct dmi_system_id toshiba_wmi_dmi_table[] __initconst = { { .ident = "Toshiba laptop", .matches = { diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c index 0ced908e7aa8..e681140b85d8 100644 --- a/drivers/pnp/pnpbios/core.c +++ b/drivers/pnp/pnpbios/core.c @@ -495,7 +495,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id pnpbios_dmi_table[] __initdata = { +static const struct dmi_system_id pnpbios_dmi_table[] __initconst = { { /* PnPBIOS GPF on boot */ .callback = exploding_pnp_bios, .ident = "Higraded P14H", diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 52d5251660b9..e0c393214264 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -47,6 +47,12 @@ config RESET_IMX7 help This enables the reset controller driver for i.MX7 SoCs. +config RESET_LANTIQ + bool "Lantiq XWAY Reset Driver" if COMPILE_TEST + default SOC_TYPE_XWAY + help + This enables the reset controller driver for Lantiq / Intel XWAY SoCs. + config RESET_LPC18XX bool "LPC18xx/43xx Reset Driver" if COMPILE_TEST default ARCH_LPC18XX diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index b62783f50fe5..d368367110e5 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_RESET_ATH79) += reset-ath79.o obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o obj-$(CONFIG_RESET_HSDK_V1) += reset-hsdk-v1.o obj-$(CONFIG_RESET_IMX7) += reset-imx7.o +obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o obj-$(CONFIG_RESET_MESON) += reset-meson.o obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o diff --git a/drivers/reset/reset-lantiq.c b/drivers/reset/reset-lantiq.c new file mode 100644 index 000000000000..11a582e50d30 --- /dev/null +++ b/drivers/reset/reset-lantiq.c @@ -0,0 +1,212 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin <blogic@phrozen.org> + * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG + * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + * Copyright (C) 2017 Hauke Mehrtens <hauke@hauke-m.de> + */ + +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/property.h> + +#define LANTIQ_RCU_RESET_TIMEOUT 10000 + +struct lantiq_rcu_reset_priv { + struct reset_controller_dev rcdev; + struct device *dev; + struct regmap *regmap; + u32 reset_offset; + u32 status_offset; +}; + +static struct lantiq_rcu_reset_priv *to_lantiq_rcu_reset_priv( + struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct lantiq_rcu_reset_priv, rcdev); +} + +static int lantiq_rcu_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct lantiq_rcu_reset_priv *priv = to_lantiq_rcu_reset_priv(rcdev); + unsigned int status = (id >> 8) & 0x1f; + u32 val; + int ret; + + ret = regmap_read(priv->regmap, priv->status_offset, &val); + if (ret) + return ret; + + return !!(val & BIT(status)); +} + +static int lantiq_rcu_reset_status_timeout(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + int ret; + int retry = LANTIQ_RCU_RESET_TIMEOUT; + + do { + ret = lantiq_rcu_reset_status(rcdev, id); + if (ret < 0) + return ret; + if (ret == assert) + return 0; + usleep_range(20, 40); + } while (--retry); + + return -ETIMEDOUT; +} + +static int lantiq_rcu_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct lantiq_rcu_reset_priv *priv = to_lantiq_rcu_reset_priv(rcdev); + unsigned int set = id & 0x1f; + u32 val = assert ? BIT(set) : 0; + int ret; + + ret = regmap_update_bits(priv->regmap, priv->reset_offset, BIT(set), + val); + if (ret) { + dev_err(priv->dev, "Failed to set reset bit %u\n", set); + return ret; + } + + + ret = lantiq_rcu_reset_status_timeout(rcdev, id, assert); + if (ret) + dev_err(priv->dev, "Failed to %s bit %u\n", + assert ? "assert" : "deassert", set); + + return ret; +} + +static int lantiq_rcu_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return lantiq_rcu_reset_update(rcdev, id, true); +} + +static int lantiq_rcu_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return lantiq_rcu_reset_update(rcdev, id, false); +} + +static int lantiq_rcu_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int ret; + + ret = lantiq_rcu_reset_assert(rcdev, id); + if (ret) + return ret; + + return lantiq_rcu_reset_deassert(rcdev, id); +} + +static const struct reset_control_ops lantiq_rcu_reset_ops = { + .assert = lantiq_rcu_reset_assert, + .deassert = lantiq_rcu_reset_deassert, + .status = lantiq_rcu_reset_status, + .reset = lantiq_rcu_reset_reset, +}; + +static int lantiq_rcu_reset_of_parse(struct platform_device *pdev, + struct lantiq_rcu_reset_priv *priv) +{ + struct device *dev = &pdev->dev; + const __be32 *offset; + + priv->regmap = syscon_node_to_regmap(dev->of_node->parent); + if (IS_ERR(priv->regmap)) { + dev_err(&pdev->dev, "Failed to lookup RCU regmap\n"); + return PTR_ERR(priv->regmap); + } + + offset = of_get_address(dev->of_node, 0, NULL, NULL); + if (!offset) { + dev_err(&pdev->dev, "Failed to get RCU reset offset\n"); + return -ENOENT; + } + priv->reset_offset = __be32_to_cpu(*offset); + + offset = of_get_address(dev->of_node, 1, NULL, NULL); + if (!offset) { + dev_err(&pdev->dev, "Failed to get RCU status offset\n"); + return -ENOENT; + } + priv->status_offset = __be32_to_cpu(*offset); + + return 0; +} + +static int lantiq_rcu_reset_xlate(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + unsigned int status, set; + + set = reset_spec->args[0]; + status = reset_spec->args[1]; + + if (set >= rcdev->nr_resets || status >= rcdev->nr_resets) + return -EINVAL; + + return (status << 8) | set; +} + +static int lantiq_rcu_reset_probe(struct platform_device *pdev) +{ + struct lantiq_rcu_reset_priv *priv; + int err; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &pdev->dev; + platform_set_drvdata(pdev, priv); + + err = lantiq_rcu_reset_of_parse(pdev, priv); + if (err) + return err; + + priv->rcdev.ops = &lantiq_rcu_reset_ops; + priv->rcdev.owner = THIS_MODULE; + priv->rcdev.of_node = pdev->dev.of_node; + priv->rcdev.nr_resets = 32; + priv->rcdev.of_xlate = lantiq_rcu_reset_xlate; + priv->rcdev.of_reset_n_cells = 2; + + return reset_controller_register(&priv->rcdev); +} + +static const struct of_device_id lantiq_rcu_reset_dt_ids[] = { + { .compatible = "lantiq,danube-reset", }, + { .compatible = "lantiq,xrx200-reset", }, + { }, +}; +MODULE_DEVICE_TABLE(of, lantiq_rcu_reset_dt_ids); + +static struct platform_driver lantiq_rcu_reset_driver = { + .probe = lantiq_rcu_reset_probe, + .driver = { + .name = "lantiq-reset", + .of_match_table = lantiq_rcu_reset_dt_ids, + }, +}; +module_platform_driver(lantiq_rcu_reset_driver); + +MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); +MODULE_DESCRIPTION("Lantiq XWAY RCU Reset Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index ea19b4ff87a2..29f35e29d480 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1644,7 +1644,9 @@ void dasd_generic_handle_state_change(struct dasd_device *device) dasd_schedule_device_bh(device); if (device->block) { dasd_schedule_block_bh(device->block); - blk_mq_run_hw_queues(device->block->request_queue, true); + if (device->block->request_queue) + blk_mq_run_hw_queues(device->block->request_queue, + true); } } EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); @@ -3759,7 +3761,9 @@ int dasd_generic_path_operational(struct dasd_device *device) dasd_schedule_device_bh(device); if (device->block) { dasd_schedule_block_bh(device->block); - blk_mq_run_hw_queues(device->block->request_queue, true); + if (device->block->request_queue) + blk_mq_run_hw_queues(device->block->request_queue, + true); } if (!device->stopped) @@ -4025,7 +4029,9 @@ int dasd_generic_restore_device(struct ccw_device *cdev) if (device->block) { dasd_schedule_block_bh(device->block); - blk_mq_run_hw_queues(device->block->request_queue, true); + if (device->block->request_queue) + blk_mq_run_hw_queues(device->block->request_queue, + true); } clear_bit(DASD_FLAG_SUSPENDED, &device->flags); diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 2e7fd966c515..eb51893c74a4 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -249,7 +249,7 @@ static void scm_request_requeue(struct scm_request *scmrq) static void scm_request_finish(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; - int *error; + blk_status_t *error; int i; for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { @@ -415,7 +415,7 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error) static void scm_blk_request_done(struct request *req) { - int *error = blk_mq_rq_to_pdu(req); + blk_status_t *error = blk_mq_rq_to_pdu(req); blk_mq_end_request(req, *error); } @@ -450,7 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) atomic_set(&bdev->queued_reqs, 0); bdev->tag_set.ops = &scm_mq_ops; - bdev->tag_set.cmd_size = sizeof(int); + bdev->tag_set.cmd_size = sizeof(blk_status_t); bdev->tag_set.nr_hw_queues = nr_requests; bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 489b583f263d..e5c32f4b5287 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1225,10 +1225,16 @@ static int device_is_disconnected(struct ccw_device *cdev) static int recovery_check(struct device *dev, void *data) { struct ccw_device *cdev = to_ccwdev(dev); + struct subchannel *sch; int *redo = data; spin_lock_irq(cdev->ccwlock); switch (cdev->private->state) { + case DEV_STATE_ONLINE: + sch = to_subchannel(cdev->dev.parent); + if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm) + break; + /* fall through */ case DEV_STATE_DISCONNECTED: CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", cdev->private->dev_id.ssid, @@ -1260,7 +1266,7 @@ static void recovery_work_func(struct work_struct *unused) } spin_unlock_irq(&recovery_lock); } else - CIO_MSG_EVENT(4, "recovery: end\n"); + CIO_MSG_EVENT(3, "recovery: end\n"); } static DECLARE_WORK(recovery_work, recovery_work_func); @@ -1274,11 +1280,11 @@ static void recovery_func(unsigned long data) schedule_work(&recovery_work); } -static void ccw_device_schedule_recovery(void) +void ccw_device_schedule_recovery(void) { unsigned long flags; - CIO_MSG_EVENT(4, "recovery: schedule\n"); + CIO_MSG_EVENT(3, "recovery: schedule\n"); spin_lock_irqsave(&recovery_lock, flags); if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { recovery_phase = 0; diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index ec497af99dd8..69cb70f080a5 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -134,6 +134,7 @@ void ccw_device_set_disconnected(struct ccw_device *cdev); void ccw_device_set_notoper(struct ccw_device *cdev); void ccw_device_set_timeout(struct ccw_device *, int); +void ccw_device_schedule_recovery(void); /* Channel measurement facility related */ void retry_set_schib(struct ccw_device *cdev); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 12016e32e519..f98ea674c3d8 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -476,6 +476,17 @@ static void create_fake_irb(struct irb *irb, int type) } } +static void ccw_device_handle_broken_paths(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm; + + if (broken_paths && (cdev->private->path_broken_mask != broken_paths)) + ccw_device_schedule_recovery(); + + cdev->private->path_broken_mask = broken_paths; +} + void ccw_device_verify_done(struct ccw_device *cdev, int err) { struct subchannel *sch; @@ -508,6 +519,7 @@ callback: memset(&cdev->private->irb, 0, sizeof(struct irb)); } ccw_device_report_path_events(cdev); + ccw_device_handle_broken_paths(cdev); break; case -ETIME: case -EUSERS: diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 220f49145b2f..9a1b56b2df3e 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -131,6 +131,8 @@ struct ccw_device_private { not operable */ u8 path_gone_mask; /* mask of paths, that became unavailable */ u8 path_new_mask; /* mask of paths, that became available */ + u8 path_broken_mask; /* mask of paths, which were found to be + unusable */ struct { unsigned int fast:1; /* post with "channel end" */ unsigned int repall:1; /* report every interrupt status */ diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index a64285ab0728..af3e4d3f9735 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -699,13 +699,13 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) int status; dresp = (struct aac_mount *) fib_data(fibptr); - if (!(fibptr->dev->supplement_adapter_info.supported_options2 & - AAC_OPTION_VARIABLE_BLOCK_SIZE)) + if (!aac_supports_2T(fibptr->dev)) { dresp->mnt[0].capacityhigh = 0; - if ((le32_to_cpu(dresp->status) != ST_OK) || - (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { - _aac_probe_container2(context, fibptr); - return; + if ((le32_to_cpu(dresp->status) == ST_OK) && + (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { + _aac_probe_container2(context, fibptr); + return; + } } scsicmd = (struct scsi_cmnd *) context; diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 92fabf2b0c24..403a639574e5 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2701,6 +2701,11 @@ static inline int aac_is_src(struct aac_dev *dev) return 0; } +static inline int aac_supports_2T(struct aac_dev *dev) +{ + return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64); +} + char * get_container_type(unsigned type); extern int numacb; extern char aac_driver_version[]; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 87cc4a93e637..62beb2596466 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -906,12 +906,14 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) bus = aac_logical_to_phys(scmd_channel(cmd)); cid = scmd_id(cmd); - info = &aac->hba_map[bus][cid]; - if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || - info->devtype != AAC_DEVTYPE_NATIVE_RAW) + + if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) return FAILED; - if (info->reset_state > 0) + info = &aac->hba_map[bus][cid]; + + if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && + info->reset_state > 0) return FAILED; pr_err("%s: Host adapter reset request. SCSI hang ?\n", @@ -962,12 +964,14 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) bus = aac_logical_to_phys(scmd_channel(cmd)); cid = scmd_id(cmd); - info = &aac->hba_map[bus][cid]; - if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || - info->devtype != AAC_DEVTYPE_NATIVE_RAW) + + if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) return FAILED; - if (info->reset_state > 0) + info = &aac->hba_map[bus][cid]; + + if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && + info->reset_state > 0) return FAILED; pr_err("%s: Host adapter reset request. SCSI hang ?\n", diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 48c2b2b34b72..0c9361c87ec8 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -740,6 +740,8 @@ static void aac_send_iop_reset(struct aac_dev *dev) aac_set_intx_mode(dev); src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); + + msleep(5000); } static void aac_send_hardware_soft_reset(struct aac_dev *dev) diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 690816f3c6af..421fe869a11e 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -2725,9 +2725,9 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt) * Params : SCpnt - command causing reset * Returns : one of SCSI_RESET_ macros */ -int acornscsi_host_reset(struct Scsi_Host *shpnt) +int acornscsi_host_reset(struct scsi_cmnd *SCpnt) { - AS_Host *host = (AS_Host *)shpnt->hostdata; + AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; struct scsi_cmnd *SCptr; host->stats.resets += 1; @@ -2741,7 +2741,7 @@ int acornscsi_host_reset(struct Scsi_Host *shpnt) printk(KERN_WARNING "acornscsi_reset: "); print_sbic_status(asr, ssr, host->scsi.phase); - for (devidx = 0; devidx < 9; devidx ++) { + for (devidx = 0; devidx < 9; devidx++) acornscsi_dumplog(host, devidx); } #endif diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 7e7ae786121b..100bc4c8798d 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -6131,6 +6131,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) "Extents and RPI headers enabled.\n"); } mempool_free(mboxq, phba->mbox_mem_pool); + rc = -EIO; goto out_free_bsmbx; } diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 79ba3ce063a4..23bdb1ca106e 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -884,7 +884,7 @@ out_err: wcqe->total_data_placed); nCmd->transferred_length = 0; nCmd->rcv_rsplen = 0; - nCmd->status = NVME_SC_FC_TRANSPORT_ERROR; + nCmd->status = NVME_SC_INTERNAL; } } diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 1f59e7a74c7b..6b33a1f24f56 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -180,7 +180,7 @@ static void qla_nvme_sp_done(void *ptr, int res) goto rel; if (unlikely(res == QLA_FUNCTION_FAILED)) - fd->status = NVME_SC_FC_TRANSPORT_ERROR; + fd->status = NVME_SC_INTERNAL; else fd->status = 0; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 38942050b265..dab876c65473 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -580,7 +580,8 @@ int scsi_check_sense(struct scsi_cmnd *scmd) if (sshdr.asc == 0x20 || /* Invalid command operation code */ sshdr.asc == 0x21 || /* Logical block address out of range */ sshdr.asc == 0x24 || /* Invalid field in cdb */ - sshdr.asc == 0x26) { /* Parameter value invalid */ + sshdr.asc == 0x26 || /* Parameter value invalid */ + sshdr.asc == 0x27) { /* Write protected */ set_host_byte(scmd, DID_TARGET_FAILURE); } return SUCCESS; diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 3c6bc0081fcb..cbd4495d0ff9 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2739,7 +2739,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, list_for_each_entry(rport, &fc_host->rports, peers) { - if ((rport->port_state == FC_PORTSTATE_BLOCKED) && + if ((rport->port_state == FC_PORTSTATE_BLOCKED || + rport->port_state == FC_PORTSTATE_NOTPRESENT) && (rport->channel == channel)) { switch (fc_host->tgtid_bind_type) { @@ -2876,7 +2877,6 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); rport->port_id = ids->port_id; - rport->roles = ids->roles; rport->port_state = FC_PORTSTATE_ONLINE; rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; @@ -2885,15 +2885,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, fci->f->dd_fcrport_size); spin_unlock_irqrestore(shost->host_lock, flags); - if (ids->roles & FC_PORT_ROLE_FCP_TARGET) { - scsi_target_unblock(&rport->dev, SDEV_RUNNING); - - /* initiate a scan of the target */ - spin_lock_irqsave(shost->host_lock, flags); - rport->flags |= FC_RPORT_SCAN_PENDING; - scsi_queue_work(shost, &rport->scan_work); - spin_unlock_irqrestore(shost->host_lock, flags); - } + fc_remote_port_rolechg(rport, ids->roles); return rport; } } @@ -3571,7 +3563,7 @@ fc_vport_sched_delete(struct work_struct *work) static enum blk_eh_timer_return fc_bsg_job_timeout(struct request *req) { - struct bsg_job *job = (void *) req->special; + struct bsg_job *job = blk_mq_rq_to_pdu(req); struct Scsi_Host *shost = fc_bsg_to_shost(job); struct fc_rport *rport = fc_bsg_to_rport(job); struct fc_internal *i = to_fc_internal(shost->transportt); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 8934f19bce8e..0190aeff5f7f 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -3689,7 +3689,7 @@ iscsi_if_rx(struct sk_buff *skb) uint32_t group; nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < sizeof(*nlh) || + if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || skb->len < nlh->nlmsg_len) { break; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 11c1738c2100..fb9f8b5f4673 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2915,8 +2915,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) sd_config_discard(sdkp, SD_LBP_WS16); else if (sdkp->lbpws10) sd_config_discard(sdkp, SD_LBP_WS10); - else if (sdkp->lbpu && sdkp->max_unmap_blocks) - sd_config_discard(sdkp, SD_LBP_UNMAP); else sd_config_discard(sdkp, SD_LBP_DISABLE); } diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cf0e71db9e51..0419c2298eab 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -828,6 +828,39 @@ static int max_sectors_bytes(struct request_queue *q) return max_sectors << 9; } +static void +sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) +{ + Sg_request *srp; + int val; + unsigned int ms; + + val = 0; + list_for_each_entry(srp, &sfp->rq_list, entry) { + if (val > SG_MAX_QUEUE) + break; + rinfo[val].req_state = srp->done + 1; + rinfo[val].problem = + srp->header.masked_status & + srp->header.host_status & + srp->header.driver_status; + if (srp->done) + rinfo[val].duration = + srp->header.duration; + else { + ms = jiffies_to_msecs(jiffies); + rinfo[val].duration = + (ms > srp->header.duration) ? + (ms - srp->header.duration) : 0; + } + rinfo[val].orphan = srp->orphan; + rinfo[val].sg_io_owned = srp->sg_io_owned; + rinfo[val].pack_id = srp->header.pack_id; + rinfo[val].usr_ptr = srp->header.usr_ptr; + val++; + } +} + static long sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { @@ -1012,38 +1045,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return -EFAULT; else { sg_req_info_t *rinfo; - unsigned int ms; - rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, - GFP_KERNEL); + rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, + GFP_KERNEL); if (!rinfo) return -ENOMEM; read_lock_irqsave(&sfp->rq_list_lock, iflags); - val = 0; - list_for_each_entry(srp, &sfp->rq_list, entry) { - if (val >= SG_MAX_QUEUE) - break; - memset(&rinfo[val], 0, SZ_SG_REQ_INFO); - rinfo[val].req_state = srp->done + 1; - rinfo[val].problem = - srp->header.masked_status & - srp->header.host_status & - srp->header.driver_status; - if (srp->done) - rinfo[val].duration = - srp->header.duration; - else { - ms = jiffies_to_msecs(jiffies); - rinfo[val].duration = - (ms > srp->header.duration) ? - (ms - srp->header.duration) : 0; - } - rinfo[val].orphan = srp->orphan; - rinfo[val].sg_io_owned = srp->sg_io_owned; - rinfo[val].pack_id = srp->header.pack_id; - rinfo[val].usr_ptr = srp->header.usr_ptr; - val++; - } + sg_fill_request_table(sfp, rinfo); read_unlock_irqrestore(&sfp->rq_list_lock, iflags); result = __copy_to_user(p, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 280a6a91a9e2..2fcaff864584 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_ARCH_DOVE) += dove/ obj-$(CONFIG_MACH_DOVE) += dove/ obj-y += fsl/ obj-$(CONFIG_ARCH_MXC) += imx/ +obj-$(CONFIG_SOC_XWAY) += lantiq/ obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ obj-$(CONFIG_ARCH_MESON) += amlogic/ obj-$(CONFIG_ARCH_QCOM) += qcom/ diff --git a/drivers/soc/lantiq/Makefile b/drivers/soc/lantiq/Makefile new file mode 100644 index 000000000000..be9e866d53e5 --- /dev/null +++ b/drivers/soc/lantiq/Makefile @@ -0,0 +1,2 @@ +obj-y += fpi-bus.o +obj-$(CONFIG_XRX200_PHY_FW) += gphy.o diff --git a/drivers/soc/lantiq/fpi-bus.c b/drivers/soc/lantiq/fpi-bus.c new file mode 100644 index 000000000000..a671c9984c4c --- /dev/null +++ b/drivers/soc/lantiq/fpi-bus.c @@ -0,0 +1,87 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2011-2015 John Crispin <blogic@phrozen.org> + * Copyright (C) 2015 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + * Copyright (C) 2017 Hauke Mehrtens <hauke@hauke-m.de> + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regmap.h> + +#include <lantiq_soc.h> + +#define XBAR_ALWAYS_LAST 0x430 +#define XBAR_FPI_BURST_EN BIT(1) +#define XBAR_AHB_BURST_EN BIT(2) + +#define RCU_VR9_BE_AHB1S 0x00000008 + +static int ltq_fpi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res_xbar; + struct regmap *rcu_regmap; + void __iomem *xbar_membase; + u32 rcu_ahb_endianness_reg_offset; + int ret; + + res_xbar = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xbar_membase = devm_ioremap_resource(dev, res_xbar); + if (IS_ERR(xbar_membase)) + return PTR_ERR(xbar_membase); + + /* RCU configuration is optional */ + rcu_regmap = syscon_regmap_lookup_by_phandle(np, "lantiq,rcu"); + if (IS_ERR(rcu_regmap)) + return PTR_ERR(rcu_regmap); + + ret = device_property_read_u32(dev, "lantiq,offset-endianness", + &rcu_ahb_endianness_reg_offset); + if (ret) { + dev_err(&pdev->dev, "Failed to get RCU reg offset\n"); + return ret; + } + + ret = regmap_update_bits(rcu_regmap, rcu_ahb_endianness_reg_offset, + RCU_VR9_BE_AHB1S, RCU_VR9_BE_AHB1S); + if (ret) { + dev_warn(&pdev->dev, + "Failed to configure RCU AHB endianness\n"); + return ret; + } + + /* disable fpi burst */ + ltq_w32_mask(XBAR_FPI_BURST_EN, 0, xbar_membase + XBAR_ALWAYS_LAST); + + return of_platform_populate(dev->of_node, NULL, NULL, dev); +} + +static const struct of_device_id ltq_fpi_match[] = { + { .compatible = "lantiq,xrx200-fpi" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ltq_fpi_match); + +static struct platform_driver ltq_fpi_driver = { + .probe = ltq_fpi_probe, + .driver = { + .name = "fpi-xway", + .of_match_table = ltq_fpi_match, + }, +}; + +module_platform_driver(ltq_fpi_driver); + +MODULE_DESCRIPTION("Lantiq FPI bus driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c new file mode 100644 index 000000000000..8d8659463b3e --- /dev/null +++ b/drivers/soc/lantiq/gphy.c @@ -0,0 +1,260 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2012 John Crispin <blogic@phrozen.org> + * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + * Copyright (C) 2017 Hauke Mehrtens <hauke@hauke-m.de> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/firmware.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/reboot.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/property.h> +#include <dt-bindings/mips/lantiq_rcu_gphy.h> + +#include <lantiq_soc.h> + +#define XRX200_GPHY_FW_ALIGN (16 * 1024) + +struct xway_gphy_priv { + struct clk *gphy_clk_gate; + struct reset_control *gphy_reset; + struct reset_control *gphy_reset2; + struct notifier_block gphy_reboot_nb; + void __iomem *membase; + char *fw_name; +}; + +struct xway_gphy_match_data { + char *fe_firmware_name; + char *ge_firmware_name; +}; + +static const struct xway_gphy_match_data xrx200a1x_gphy_data = { + .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", + .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", +}; + +static const struct xway_gphy_match_data xrx200a2x_gphy_data = { + .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin", + .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin", +}; + +static const struct xway_gphy_match_data xrx300_gphy_data = { + .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin", + .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin", +}; + +static const struct of_device_id xway_gphy_match[] = { + { .compatible = "lantiq,xrx200a1x-gphy", .data = &xrx200a1x_gphy_data }, + { .compatible = "lantiq,xrx200a2x-gphy", .data = &xrx200a2x_gphy_data }, + { .compatible = "lantiq,xrx300-gphy", .data = &xrx300_gphy_data }, + { .compatible = "lantiq,xrx330-gphy", .data = &xrx300_gphy_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, xway_gphy_match); + +static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb) +{ + return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb); +} + +static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb, + unsigned long code, void *unused) +{ + struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb); + + if (priv) { + reset_control_assert(priv->gphy_reset); + reset_control_assert(priv->gphy_reset2); + } + + return NOTIFY_DONE; +} + +static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, + dma_addr_t *dev_addr) +{ + const struct firmware *fw; + void *fw_addr; + dma_addr_t dma_addr; + size_t size; + int ret; + + ret = request_firmware(&fw, priv->fw_name, dev); + if (ret) { + dev_err(dev, "failed to load firmware: %s, error: %i\n", + priv->fw_name, ret); + return ret; + } + + /* + * GPHY cores need the firmware code in a persistent and contiguous + * memory area with a 16 kB boundary aligned start address. + */ + size = fw->size + XRX200_GPHY_FW_ALIGN; + + fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL); + if (fw_addr) { + fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); + *dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN); + memcpy(fw_addr, fw->data, fw->size); + } else { + dev_err(dev, "failed to alloc firmware memory\n"); + ret = -ENOMEM; + } + + release_firmware(fw); + + return ret; +} + +static int xway_gphy_of_probe(struct platform_device *pdev, + struct xway_gphy_priv *priv) +{ + struct device *dev = &pdev->dev; + const struct xway_gphy_match_data *gphy_fw_name_cfg; + u32 gphy_mode; + int ret; + struct resource *res_gphy; + + gphy_fw_name_cfg = of_device_get_match_data(dev); + + priv->gphy_clk_gate = devm_clk_get(dev, NULL); + if (IS_ERR(priv->gphy_clk_gate)) { + dev_err(dev, "Failed to lookup gate clock\n"); + return PTR_ERR(priv->gphy_clk_gate); + } + + res_gphy = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->membase = devm_ioremap_resource(dev, res_gphy); + if (IS_ERR(priv->membase)) + return PTR_ERR(priv->membase); + + priv->gphy_reset = devm_reset_control_get(dev, "gphy"); + if (IS_ERR(priv->gphy_reset)) { + if (PTR_ERR(priv->gphy_reset) != -EPROBE_DEFER) + dev_err(dev, "Failed to lookup gphy reset\n"); + return PTR_ERR(priv->gphy_reset); + } + + priv->gphy_reset2 = devm_reset_control_get_optional(dev, "gphy2"); + if (IS_ERR(priv->gphy_reset2)) + return PTR_ERR(priv->gphy_reset2); + + ret = device_property_read_u32(dev, "lantiq,gphy-mode", &gphy_mode); + /* Default to GE mode */ + if (ret) + gphy_mode = GPHY_MODE_GE; + + switch (gphy_mode) { + case GPHY_MODE_FE: + priv->fw_name = gphy_fw_name_cfg->fe_firmware_name; + break; + case GPHY_MODE_GE: + priv->fw_name = gphy_fw_name_cfg->ge_firmware_name; + break; + default: + dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode); + return -EINVAL; + } + + return 0; +} + +static int xway_gphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct xway_gphy_priv *priv; + dma_addr_t fw_addr = 0; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = xway_gphy_of_probe(pdev, priv); + if (ret) + return ret; + + ret = clk_prepare_enable(priv->gphy_clk_gate); + if (ret) + return ret; + + ret = xway_gphy_load(dev, priv, &fw_addr); + if (ret) { + clk_disable_unprepare(priv->gphy_clk_gate); + return ret; + } + + reset_control_assert(priv->gphy_reset); + reset_control_assert(priv->gphy_reset2); + + iowrite32be(fw_addr, priv->membase); + + reset_control_deassert(priv->gphy_reset); + reset_control_deassert(priv->gphy_reset2); + + /* assert the gphy reset because it can hang after a reboot: */ + priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify; + priv->gphy_reboot_nb.priority = -1; + + ret = register_reboot_notifier(&priv->gphy_reboot_nb); + if (ret) + dev_warn(dev, "Failed to register reboot notifier\n"); + + platform_set_drvdata(pdev, priv); + + return ret; +} + +static int xway_gphy_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct xway_gphy_priv *priv = platform_get_drvdata(pdev); + int ret; + + reset_control_assert(priv->gphy_reset); + reset_control_assert(priv->gphy_reset2); + + iowrite32be(0, priv->membase); + + clk_disable_unprepare(priv->gphy_clk_gate); + + ret = unregister_reboot_notifier(&priv->gphy_reboot_nb); + if (ret) + dev_warn(dev, "Failed to unregister reboot notifier\n"); + + return 0; +} + +static struct platform_driver xway_gphy_driver = { + .probe = xway_gphy_probe, + .remove = xway_gphy_remove, + .driver = { + .name = "xway-rcu-gphy", + .of_match_table = xway_gphy_match, + }, +}; + +module_platform_driver(xway_gphy_driver); + +MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin"); +MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin"); +MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); +MODULE_DESCRIPTION("Lantiq XWAY GPHY Firmware Loader"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 6ba270e0494d..0f695df14c9d 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -294,19 +294,9 @@ static int ashmem_release(struct inode *ignored, struct file *file) return 0; } -/** - * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file - * @file: The associated backing file. - * @buf: The buffer of data being written to - * @len: The number of bytes being read - * @pos: The position of the first byte to read. - * - * Return: 0 if successful, or another return code if not. - */ -static ssize_t ashmem_read(struct file *file, char __user *buf, - size_t len, loff_t *pos) +static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter) { - struct ashmem_area *asma = file->private_data; + struct ashmem_area *asma = iocb->ki_filp->private_data; int ret = 0; mutex_lock(&ashmem_mutex); @@ -320,20 +310,17 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, goto out_unlock; } - mutex_unlock(&ashmem_mutex); - /* * asma and asma->file are used outside the lock here. We assume * once asma->file is set it will never be changed, and will not * be destroyed until all references to the file are dropped and * ashmem_release is called. */ - ret = __vfs_read(asma->file, buf, len, pos); - if (ret >= 0) - /** Update backing file pos, since f_ops->read() doesn't */ - asma->file->f_pos = *pos; - return ret; - + mutex_unlock(&ashmem_mutex); + ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0); + mutex_lock(&ashmem_mutex); + if (ret > 0) + asma->file->f_pos = iocb->ki_pos; out_unlock: mutex_unlock(&ashmem_mutex); return ret; @@ -834,7 +821,7 @@ static const struct file_operations ashmem_fops = { .owner = THIS_MODULE, .open = ashmem_open, .release = ashmem_release, - .read = ashmem_read, + .read_iter = ashmem_read_iter, .llseek = ashmem_llseek, .mmap = ashmem_mmap, .unlocked_ioctl = ashmem_ioctl, diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c index 0d33e520f635..cc18e25103ca 100644 --- a/drivers/staging/comedi/drivers/serial2002.c +++ b/drivers/staging/comedi/drivers/serial2002.c @@ -106,16 +106,8 @@ static long serial2002_tty_ioctl(struct file *f, unsigned int op, static int serial2002_tty_write(struct file *f, unsigned char *buf, int count) { - const char __user *p = (__force const char __user *)buf; - int result; - loff_t offset = 0; - mm_segment_t oldfs; - - oldfs = get_fs(); - set_fs(KERNEL_DS); - result = __vfs_write(f, p, count, &offset); - set_fs(oldfs); - return result; + loff_t pos = 0; + return kernel_write(f, buf, count, &pos); } static void serial2002_tty_read_poll_wait(struct file *f, int timeout) @@ -148,19 +140,14 @@ static int serial2002_tty_read(struct file *f, int timeout) { unsigned char ch; int result; + loff_t pos = 0; result = -1; if (!IS_ERR(f)) { - mm_segment_t oldfs; - char __user *p = (__force char __user *)&ch; - loff_t offset = 0; - - oldfs = get_fs(); - set_fs(KERNEL_DS); if (f->f_op->poll) { serial2002_tty_read_poll_wait(f, timeout); - if (__vfs_read(f, p, 1, &offset) == 1) + if (kernel_read(f, &ch, 1, &pos) == 1) result = ch; } else { /* Device does not support poll, busy wait */ @@ -171,14 +158,13 @@ static int serial2002_tty_read(struct file *f, int timeout) if (retries >= timeout) break; - if (__vfs_read(f, p, 1, &offset) == 1) { + if (kernel_read(f, &ch, 1, &pos) == 1) { result = ch; break; } usleep_range(100, 1000); } } - set_fs(oldfs); } return result; } diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c index 68f283a2744c..f916b475e767 100644 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c @@ -731,8 +731,7 @@ int cfs_tracefile_dump_all_pages(char *filename) __LASSERT_TAGE_INVARIANT(tage); buf = kmap(tage->page); - rc = vfs_write(filp, (__force const char __user *)buf, - tage->used, &filp->f_pos); + rc = kernel_write(filp, buf, tage->used, &filp->f_pos); kunmap(tage->page); if (rc != (int)tage->used) { @@ -976,7 +975,6 @@ static int tracefiled(void *arg) struct tracefiled_ctl *tctl = arg; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; - mm_segment_t __oldfs; struct file *filp; char *buf; int last_loop = 0; @@ -1014,8 +1012,6 @@ static int tracefiled(void *arg) __LASSERT(list_empty(&pc.pc_pages)); goto end_loop; } - __oldfs = get_fs(); - set_fs(get_ds()); list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { static loff_t f_pos; @@ -1028,8 +1024,7 @@ static int tracefiled(void *arg) f_pos = i_size_read(file_inode(filp)); buf = kmap(tage->page); - rc = vfs_write(filp, (__force const char __user *)buf, - tage->used, &f_pos); + rc = kernel_write(filp, buf, tage->used, &f_pos); kunmap(tage->page); if (rc != (int)tage->used) { @@ -1040,7 +1035,6 @@ static int tracefiled(void *arg) break; } } - set_fs(__oldfs); filp_close(filp, NULL); put_pages_on_daemon_list(&pc); diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index d855129768f8..25393e3a0fe8 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -210,7 +210,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, data->ocd_ibits_known = MDS_INODELOCK_FULL; data->ocd_version = LUSTRE_VERSION_CODE; - if (sb->s_flags & MS_RDONLY) + if (sb_rdonly(sb)) data->ocd_connect_flags |= OBD_CONNECT_RDONLY; if (sbi->ll_flags & LL_SBI_USER_XATTR) data->ocd_connect_flags |= OBD_CONNECT_XATTR; @@ -2031,7 +2031,7 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data) int err; __u32 read_only; - if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { + if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) { read_only = *flags & MS_RDONLY; err = obd_set_info_async(NULL, sbi->ll_md_exp, sizeof(KEY_READ_ONLY), diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c index 4897dbd3286d..5cc2b3255207 100644 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ b/drivers/staging/lustre/lustre/llite/namei.c @@ -561,8 +561,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, } } - if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE && - dentry->d_sb->s_flags & MS_RDONLY) + if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE && sb_rdonly(dentry->d_sb)) return ERR_PTR(-EROFS); if (it->it_op & IT_CREAT) diff --git a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c index 8f0707a27a83..4f0a42633d5a 100644 --- a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c +++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c @@ -52,7 +52,6 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload) struct kuc_hdr *kuch = (struct kuc_hdr *)payload; ssize_t count = kuch->kuc_msglen; loff_t offset = 0; - mm_segment_t fs; int rc = -ENXIO; if (IS_ERR_OR_NULL(filp)) @@ -63,18 +62,14 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload) return rc; } - fs = get_fs(); - set_fs(KERNEL_DS); while (count > 0) { - rc = vfs_write(filp, (void __force __user *)payload, - count, &offset); + rc = kernel_write(filp, payload, count, &offset); if (rc < 0) break; count -= rc; payload += rc; rc = 0; } - set_fs(fs); if (rc < 0) CWARN("message send failed (%d)\n", rc); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index a91b7c25ffd4..928127642574 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -896,13 +896,14 @@ static int core_alua_write_tpg_metadata( u32 md_buf_len) { struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600); + loff_t pos = 0; int ret; if (IS_ERR(file)) { pr_err("filp_open(%s) for ALUA metadata failed\n", path); return -ENODEV; } - ret = kernel_write(file, md_buf, md_buf_len, 0); + ret = kernel_write(file, md_buf, md_buf_len, &pos); if (ret < 0) pr_err("Error writing ALUA metadata file: %s\n", path); fput(file); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 24cf11d9e50a..c629817a8854 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -443,7 +443,7 @@ fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, for (prot = 0; prot < prot_length;) { sector_t len = min_t(sector_t, bufsize, prot_length - prot); - ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot); + ssize_t ret = kernel_write(prot_fd, buf, len, &pos); if (ret != len) { pr_err("vfs_write to prot file failed: %zd\n", ret); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 6d5def64db61..dd2cd8048582 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1974,6 +1974,7 @@ static int __core_scsi3_write_aptpl_to_file( char path[512]; u32 pr_aptpl_buf_len; int ret; + loff_t pos = 0; memset(path, 0, 512); @@ -1993,7 +1994,7 @@ static int __core_scsi3_write_aptpl_to_file( pr_aptpl_buf_len = (strlen(buf) + 1); /* Add extra for NULL */ - ret = kernel_write(file, buf, pr_aptpl_buf_len, 0); + ret = kernel_write(file, buf, pr_aptpl_buf_len, &pos); if (ret < 0) pr_debug("Error writing APTPL metadata file: %s\n", path); diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index ae8cfc81ffc5..d9123f995705 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -371,7 +371,7 @@ static const struct file_operations port_regs_ops = { }; #endif /* CONFIG_DEBUG_FS */ -static struct dmi_system_id pch_uart_dmi_table[] = { +static const struct dmi_system_id pch_uart_dmi_table[] = { { .ident = "CM-iTC", { diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index f95bddd6513f..d6bd0244b008 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -686,9 +686,8 @@ static int do_read(struct fsg_common *common) /* Perform the read */ file_offset_tmp = file_offset; - nread = vfs_read(curlun->filp, - (char __user *)bh->buf, - amount, &file_offset_tmp); + nread = kernel_read(curlun->filp, bh->buf, amount, + &file_offset_tmp); VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, (unsigned long long)file_offset, (int)nread); if (signal_pending(current)) @@ -883,8 +882,8 @@ static int do_write(struct fsg_common *common) /* Perform the write */ file_offset_tmp = file_offset; - nwritten = vfs_write(curlun->filp, (char __user *)bh->buf, - amount, &file_offset_tmp); + nwritten = kernel_write(curlun->filp, bh->buf, amount, + &file_offset_tmp); VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, (unsigned long long)file_offset, (int)nwritten); if (signal_pending(current)) @@ -1021,9 +1020,8 @@ static int do_verify(struct fsg_common *common) /* Perform the read */ file_offset_tmp = file_offset; - nread = vfs_read(curlun->filp, - (char __user *) bh->buf, - amount, &file_offset_tmp); + nread = kernel_read(curlun->filp, bh->buf, amount, + &file_offset_tmp); VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, (unsigned long long) file_offset, (int) nread); @@ -2453,13 +2451,6 @@ static int fsg_main_thread(void *common_) /* Allow the thread to be frozen */ set_freezable(); - /* - * Arrange for userspace references to be interpreted as kernel - * pointers. That way we can pass a kernel pointer to a routine - * that expects a __user pointer and it will work okay. - */ - set_fs(get_ds()); - /* The main loop */ while (common->state != FSG_STATE_TERMINATED) { if (exception_in_progress(common) || signal_pending(current)) { diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c index 84a110a719cb..96312c3afc07 100644 --- a/drivers/video/backlight/kb3886_bl.c +++ b/drivers/video/backlight/kb3886_bl.c @@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo; static unsigned long kb3886bl_flags; #define KB3886BL_SUSPENDED 0x01 -static struct dmi_system_id kb3886bl_device_table[] __initdata = { +static const struct dmi_system_id kb3886bl_device_table[] __initconst = { { .ident = "Sahara Touch-iT", .matches = { diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 2111d06f8c81..7f1f1fbcef9e 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -117,7 +117,7 @@ config DUMMY_CONSOLE_ROWS Select 25 if you use a 640x480 resolution by default. config FRAMEBUFFER_CONSOLE - tristate "Framebuffer Console support" + bool "Framebuffer Console support" depends on FB && !UML select VT_HW_CONSOLE_BINDING select CRC32 diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile index 43bfa485db96..eb2cbec52643 100644 --- a/drivers/video/console/Makefile +++ b/drivers/video/console/Makefile @@ -7,13 +7,5 @@ obj-$(CONFIG_SGI_NEWPORT_CONSOLE) += newport_con.o obj-$(CONFIG_STI_CONSOLE) += sticon.o sticore.o obj-$(CONFIG_VGA_CONSOLE) += vgacon.o obj-$(CONFIG_MDA_CONSOLE) += mdacon.o -obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon.o bitblit.o softcursor.o -ifeq ($(CONFIG_FB_TILEBLITTING),y) -obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += tileblit.o -endif -ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE_ROTATION),y) -obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \ - fbcon_ccw.o -endif obj-$(CONFIG_FB_STI) += sticore.o diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index dc06cb6a15dc..445b1dc5d441 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -398,9 +398,8 @@ static const char *vgacon_startup(void) #endif } - /* boot_params.screen_info initialized? */ - if ((screen_info.orig_video_mode == 0) && - (screen_info.orig_video_lines == 0) && + /* boot_params.screen_info reasonably initialized? */ + if ((screen_info.orig_video_lines == 0) || (screen_info.orig_video_cols == 0)) goto no_vga; diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c index c0c6b88d3839..d48e96088f76 100644 --- a/drivers/video/fbdev/68328fb.c +++ b/drivers/video/fbdev/68328fb.c @@ -72,7 +72,7 @@ static struct fb_var_screeninfo mc68x328fb_default __initdata = { .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo mc68x328fb_fix __initdata = { +static const struct fb_fix_screeninfo mc68x328fb_fix __initconst = { .id = "68328fb", .type = FB_TYPE_PACKED_PIXELS, .xpanstep = 1, diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 5c6696bb56da..5e58f5ec0a28 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2173,7 +2173,7 @@ config FB_PS3_DEFAULT_SIZE_M config FB_XILINX tristate "Xilinx frame buffer support" - depends on FB && (XILINX_VIRTEX || MICROBLAZE || ARCH_ZYNQ) + depends on FB && (XILINX_VIRTEX || MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP) select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index ffc2c33c6cef..36d25190b48c 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -1035,7 +1035,7 @@ static struct clcd_vendor_data vendor_nomadik = { .init_panel = nomadik_clcd_init_panel, }; -static struct amba_id clcdfb_id_table[] = { +static const struct amba_id clcdfb_id_table[] = { { .id = 0x00041110, .mask = 0x000ffffe, diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c index 6a317de7082c..13ba371e70aa 100644 --- a/drivers/video/fbdev/arkfb.c +++ b/drivers/video/fbdev/arkfb.c @@ -1157,7 +1157,7 @@ fail: /* List of boards that we are trying to support */ -static struct pci_device_id ark_devices[] = { +static const struct pci_device_id ark_devices[] = { {PCI_DEVICE(0xEDD8, 0xA099)}, {0, 0, 0, 0, 0, 0, 0} }; diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c index 91eea4583382..ea31054a28ca 100644 --- a/drivers/video/fbdev/asiliantfb.c +++ b/drivers/video/fbdev/asiliantfb.c @@ -592,7 +592,7 @@ static void asiliantfb_remove(struct pci_dev *dp) framebuffer_release(p); } -static struct pci_device_id asiliantfb_pci_tbl[] = { +static const struct pci_device_id asiliantfb_pci_tbl[] = { { PCI_VENDOR_ID_CT, PCI_DEVICE_ID_CT_69000, PCI_ANY_ID, PCI_ANY_ID }, { 0 } }; diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 669ecc755fa9..e06358da4b99 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -320,7 +320,7 @@ static inline void atmel_lcdfb_power_control(struct atmel_lcdfb_info *sinfo, int } } -static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = { +static const struct fb_fix_screeninfo atmel_lcdfb_fix __initconst = { .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .xpanstep = 0, diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c index fa07242a78d2..db18474607c9 100644 --- a/drivers/video/fbdev/aty/aty128fb.c +++ b/drivers/video/fbdev/aty/aty128fb.c @@ -116,7 +116,7 @@ static const struct fb_var_screeninfo default_var = { /* default modedb mode */ /* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */ -static struct fb_videomode defaultmode = { +static const struct fb_videomode defaultmode = { .refresh = 60, .xres = 640, .yres = 480, @@ -166,7 +166,7 @@ static int aty128_pci_resume(struct pci_dev *pdev); static int aty128_do_resume(struct pci_dev *pdev); /* supported Rage128 chipsets */ -static struct pci_device_id aty128_pci_tbl[] = { +static const struct pci_device_id aty128_pci_tbl[] = { { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M3_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LF, diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index b55fdac9c9f5..3ec72f19114b 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -274,7 +274,7 @@ static struct fb_var_screeninfo default_var = { 0, FB_VMODE_NONINTERLACED }; -static struct fb_videomode defmode = { +static const struct fb_videomode defmode = { /* 640x480 @ 60 Hz, 31.5 kHz hsync */ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED @@ -1855,7 +1855,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT) case ATYIO_CLKR: if (M64_HAS(INTEGRATED)) { - struct atyclk clk; + struct atyclk clk = { 0 }; union aty_pll *pll = &par->pll; u32 dsp_config = pll->ct.dsp_config; u32 dsp_on_off = pll->ct.dsp_on_off; @@ -3756,7 +3756,7 @@ static void atyfb_pci_remove(struct pci_dev *pdev) atyfb_remove(info); } -static struct pci_device_id atyfb_pci_tbl[] = { +static const struct pci_device_id atyfb_pci_tbl[] = { #ifdef CONFIG_FB_ATY_GX { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GX) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64CX) }, diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c index 6b4c7872b375..1e2ec360f8c1 100644 --- a/drivers/video/fbdev/aty/radeon_base.c +++ b/drivers/video/fbdev/aty/radeon_base.c @@ -96,7 +96,7 @@ #define CHIP_DEF(id, family, flags) \ { PCI_VENDOR_ID_ATI, id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (flags) | (CHIP_FAMILY_##family) } -static struct pci_device_id radeonfb_pci_table[] = { +static const struct pci_device_id radeonfb_pci_table[] = { /* Radeon Xpress 200m */ CHIP_DEF(PCI_CHIP_RS480_5955, RS480, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY), CHIP_DEF(PCI_CHIP_RS482_5975, RS480, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY), @@ -2241,7 +2241,7 @@ static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj, return radeon_show_one_edid(buf, off, count, rinfo->mon2_EDID); } -static struct bin_attribute edid1_attr = { +static const struct bin_attribute edid1_attr = { .attr = { .name = "edid1", .mode = 0444, @@ -2250,7 +2250,7 @@ static struct bin_attribute edid1_attr = { .read = radeon_show_edid1, }; -static struct bin_attribute edid2_attr = { +static const struct bin_attribute edid2_attr = { .attr = { .name = "edid2", .mode = 0444, diff --git a/drivers/video/fbdev/bfin-lq035q1-fb.c b/drivers/video/fbdev/bfin-lq035q1-fb.c index b594a58ff21d..b459354ad940 100644 --- a/drivers/video/fbdev/bfin-lq035q1-fb.c +++ b/drivers/video/fbdev/bfin-lq035q1-fb.c @@ -841,7 +841,7 @@ static int bfin_lq035q1_resume(struct device *dev) return 0; } -static struct dev_pm_ops bfin_lq035q1_dev_pm_ops = { +static const struct dev_pm_ops bfin_lq035q1_dev_pm_ops = { .suspend = bfin_lq035q1_suspend, .resume = bfin_lq035q1_resume, }; diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c index 8c5b281f0b29..7aa972072357 100644 --- a/drivers/video/fbdev/bw2.c +++ b/drivers/video/fbdev/bw2.c @@ -333,8 +333,8 @@ static int bw2_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: bwtwo at %lx:%lx\n", - dp->full_name, par->which_io, info->fix.smem_start); + printk(KERN_INFO "%pOF: bwtwo at %lx:%lx\n", + dp, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c index 43e915eaf606..8de88b129b62 100644 --- a/drivers/video/fbdev/cg14.c +++ b/drivers/video/fbdev/cg14.c @@ -553,8 +553,8 @@ static int cg14_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: cgfourteen at %lx:%lx, %dMB\n", - dp->full_name, + printk(KERN_INFO "%pOF: cgfourteen at %lx:%lx, %dMB\n", + dp, par->iospace, info->fix.smem_start, par->ramsize >> 20); diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c index 716391f22e75..6c334260cf53 100644 --- a/drivers/video/fbdev/cg3.c +++ b/drivers/video/fbdev/cg3.c @@ -412,8 +412,8 @@ static int cg3_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: cg3 at %lx:%lx\n", - dp->full_name, par->which_io, info->fix.smem_start); + printk(KERN_INFO "%pOF: cg3 at %lx:%lx\n", + dp, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/cg6.c b/drivers/video/fbdev/cg6.c index bdf901ed5291..0296c21acc78 100644 --- a/drivers/video/fbdev/cg6.c +++ b/drivers/video/fbdev/cg6.c @@ -810,8 +810,8 @@ static int cg6_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: CGsix [%s] at %lx:%lx\n", - dp->full_name, info->fix.id, + printk(KERN_INFO "%pOF: CGsix [%s] at %lx:%lx\n", + dp, info->fix.id, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c index 59abdc6a97f6..f103665cad43 100644 --- a/drivers/video/fbdev/chipsfb.c +++ b/drivers/video/fbdev/chipsfb.c @@ -292,7 +292,7 @@ static void chips_hw_init(void) write_fr(chips_init_fr[i].addr, chips_init_fr[i].data); } -static struct fb_fix_screeninfo chipsfb_fix = { +static const struct fb_fix_screeninfo chipsfb_fix = { .id = "C&T 65550", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, @@ -309,7 +309,7 @@ static struct fb_fix_screeninfo chipsfb_fix = { .smem_len = 0x100000, /* 1MB */ }; -static struct fb_var_screeninfo chipsfb_var = { +static const struct fb_var_screeninfo chipsfb_var = { .xres = 800, .yres = 600, .xres_virtual = 800, diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c index 9da90bd242f4..0ef633e278a1 100644 --- a/drivers/video/fbdev/cobalt_lcdfb.c +++ b/drivers/video/fbdev/cobalt_lcdfb.c @@ -126,7 +126,7 @@ static void lcd_clear(struct fb_info *info) lcd_write_control(info, LCD_RESET); } -static struct fb_fix_screeninfo cobalt_lcdfb_fix = { +static const struct fb_fix_screeninfo cobalt_lcdfb_fix = { .id = "cobalt-lcd", .type = FB_TYPE_TEXT, .type_aux = FB_AUX_TEXT_MDA, diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile index 9e3ddf225393..73493bbd7a15 100644 --- a/drivers/video/fbdev/core/Makefile +++ b/drivers/video/fbdev/core/Makefile @@ -4,6 +4,20 @@ obj-$(CONFIG_FB) += fb.o fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \ modedb.o fbcvt.o fb-$(CONFIG_FB_DEFERRED_IO) += fb_defio.o + +ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE),y) +fb-y += fbcon.o bitblit.o softcursor.o +ifeq ($(CONFIG_FB_TILEBLITTING),y) +fb-y += tileblit.o +endif +ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE_ROTATION),y) +fb-y += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \ + fbcon_ccw.o +endif +ifeq ($(CONFIG_DMI),y) +fb-y += fbcon_dmi_quirks.o +endif +endif fb-objs := $(fb-y) obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o diff --git a/drivers/video/console/bitblit.c b/drivers/video/fbdev/core/bitblit.c index dbfe4eecf12e..790900d646c0 100644 --- a/drivers/video/console/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -203,7 +203,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, } static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -213,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int bs = info->var.yres - bh; struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -416,7 +416,3 @@ void fbcon_set_bitops(struct fbcon_ops *ops) EXPORT_SYMBOL(fbcon_set_bitops); -MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); -MODULE_DESCRIPTION("Bit Blitting Operation"); -MODULE_LICENSE("GPL"); - diff --git a/drivers/video/console/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 12ded23f1aaf..04612f938bab 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -68,6 +68,7 @@ #include <linux/kd.h> #include <linux/slab.h> #include <linux/fb.h> +#include <linux/fbcon.h> #include <linux/vt_kern.h> #include <linux/selection.h> #include <linux/font.h> @@ -135,8 +136,9 @@ static char fontname[40]; static int info_idx = -1; /* console rotation */ -static int initial_rotation; +static int initial_rotation = -1; static int fbcon_has_sysfs; +static int margin_color; static const struct consw fb_con; @@ -491,6 +493,13 @@ static int __init fb_console_setup(char *this_opt) initial_rotation = 0; continue; } + + if (!strncmp(options, "margin:", 7)) { + options += 7; + if (*options) + margin_color = simple_strtoul(options, &options, 0); + continue; + } } return 1; } @@ -563,7 +572,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, unsigned short *save = NULL, *r, *q; int logo_height; - if (info->flags & FBINFO_MODULE) { + if (info->fbops->owner) { logo_shown = FBCON_LOGO_DONTSHOW; return; } @@ -954,7 +963,10 @@ static const char *fbcon_startup(void) ops->cur_rotate = -1; ops->cur_blink_jiffies = HZ / 5; info->fbcon_par = ops; - p->con_rotate = initial_rotation; + if (initial_rotation != -1) + p->con_rotate = initial_rotation; + else + p->con_rotate = fbcon_platform_get_rotate(info); set_blitting_type(vc, info); if (info->fix.type != FB_TYPE_TEXT) { @@ -1091,7 +1103,10 @@ static void fbcon_init(struct vc_data *vc, int init) ops = info->fbcon_par; ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); - p->con_rotate = initial_rotation; + if (initial_rotation != -1) + p->con_rotate = initial_rotation; + else + p->con_rotate = fbcon_platform_get_rotate(info); set_blitting_type(vc, info); cols = vc->vc_cols; @@ -1299,7 +1314,7 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only) struct fbcon_ops *ops = info->fbcon_par; if (!fbcon_is_inactive(vc, info)) - ops->clear_margins(vc, info, bottom_only); + ops->clear_margins(vc, info, margin_color, bottom_only); } static void fbcon_cursor(struct vc_data *vc, int mode) @@ -3606,7 +3621,7 @@ static void fbcon_exit(void) fbcon_has_exited = 1; } -static int __init fb_console_init(void) +void __init fb_console_init(void) { int i; @@ -3628,11 +3643,8 @@ static int __init fb_console_init(void) console_unlock(); fbcon_start(); - return 0; } -fs_initcall(fb_console_init); - #ifdef MODULE static void __exit fbcon_deinit_device(void) @@ -3647,7 +3659,7 @@ static void __exit fbcon_deinit_device(void) } } -static void __exit fb_console_exit(void) +void __exit fb_console_exit(void) { console_lock(); fb_unregister_client(&fbcon_event_notifier); @@ -3657,9 +3669,4 @@ static void __exit fb_console_exit(void) do_unregister_con_driver(&fb_con); console_unlock(); } - -module_exit(fb_console_exit); - #endif - -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 7aaa4eabbba0..18f3ac144237 100644 --- a/drivers/video/console/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -60,7 +60,7 @@ struct fbcon_ops { const unsigned short *s, int count, int yy, int xx, int fg, int bg); void (*clear_margins)(struct vc_data *vc, struct fb_info *info, - int bottom_only); + int color, int bottom_only); void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode, int softback_lines, int fg, int bg); int (*update_start)(struct fb_info *info); @@ -261,5 +261,10 @@ extern void fbcon_set_rotate(struct fbcon_ops *ops); #define fbcon_set_rotate(x) do {} while(0) #endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */ -#endif /* _VIDEO_FBCON_H */ +#ifdef CONFIG_DMI +int fbcon_platform_get_rotate(struct fb_info *info); +#else +#define fbcon_platform_get_rotate(i) FB_ROTATE_UR +#endif /* CONFIG_DMI */ +#endif /* _VIDEO_FBCON_H */ diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index 5a3cbf6dff4d..37a8b0b22566 100644 --- a/drivers/video/console/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -189,7 +189,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, } static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -198,7 +198,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int bs = vc->vc_rows*ch; struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -418,7 +418,3 @@ void fbcon_rotate_ccw(struct fbcon_ops *ops) ops->update_start = ccw_update_start; } EXPORT_SYMBOL(fbcon_rotate_ccw); - -MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); -MODULE_DESCRIPTION("Console Rotation (270 degrees) Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index e7ee44db4e98..1888f8c866e8 100644 --- a/drivers/video/console/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -172,7 +172,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, } static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -181,7 +181,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int rs = info->var.yres - rw; struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -401,7 +401,3 @@ void fbcon_rotate_cw(struct fbcon_ops *ops) ops->update_start = cw_update_start; } EXPORT_SYMBOL(fbcon_rotate_cw); - -MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); -MODULE_DESCRIPTION("Console Rotation (90 degrees) Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/core/fbcon_dmi_quirks.c b/drivers/video/fbdev/core/fbcon_dmi_quirks.c new file mode 100644 index 000000000000..6904e47d1e51 --- /dev/null +++ b/drivers/video/fbdev/core/fbcon_dmi_quirks.c @@ -0,0 +1,145 @@ +/* + * fbcon_dmi_quirks.c -- DMI based quirk detection for fbcon + * + * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#include <linux/dmi.h> +#include <linux/fb.h> +#include <linux/kernel.h> +#include "fbcon.h" + +/* + * Some x86 clamshell design devices use portrait tablet screens and a display + * engine which cannot rotate in hardware, so we need to rotate the fbcon to + * compensate. Unfortunately these (cheap) devices also typically have quite + * generic DMI data, so we match on a combination of DMI data, screen resolution + * and a list of known BIOS dates to avoid false positives. + */ + +struct fbcon_dmi_rotate_data { + int width; + int height; + const char * const *bios_dates; + int rotate; +}; + +static const struct fbcon_dmi_rotate_data rotate_data_asus_t100ha = { + .width = 800, + .height = 1280, + .rotate = FB_ROTATE_CCW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_gpd_pocket = { + .width = 1200, + .height = 1920, + .bios_dates = (const char * const []){ "05/26/2017", "06/28/2017", + "07/05/2017", "08/07/2017", NULL }, + .rotate = FB_ROTATE_CW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_gpd_win = { + .width = 720, + .height = 1280, + .bios_dates = (const char * const []){ + "10/25/2016", "11/18/2016", "12/23/2016", "12/26/2016", + "02/21/2017", "03/20/2017", "05/25/2017", NULL }, + .rotate = FB_ROTATE_CW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_itworks_tw891 = { + .width = 800, + .height = 1280, + .bios_dates = (const char * const []){ "10/16/2015", NULL }, + .rotate = FB_ROTATE_CW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_vios_lth17 = { + .width = 800, + .height = 1280, + .rotate = FB_ROTATE_CW, +}; + +static const struct dmi_system_id rotate_data[] = { + { /* Asus T100HA */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"), + }, + .driver_data = (void *)&rotate_data_asus_t100ha, + }, { /* + * GPD Pocket, note that the the DMI data is less generic then + * it seems, devices with a board-vendor of "AMI Corporation" + * are quite rare, as are devices which have both board- *and* + * product-id set to "Default String" + */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + }, + .driver_data = (void *)&rotate_data_gpd_pocket, + }, { /* GPD Win (same note on DMI match as GPD Pocket) */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + }, + .driver_data = (void *)&rotate_data_gpd_win, + }, { /* I.T.Works TW891 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TW891"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"), + }, + .driver_data = (void *)&rotate_data_itworks_tw891, + }, { /* VIOS LTH17 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "VIOS"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "LTH17"), + }, + .driver_data = (void *)&rotate_data_vios_lth17, + }, + {} +}; + +int fbcon_platform_get_rotate(struct fb_info *info) +{ + const struct dmi_system_id *match; + const struct fbcon_dmi_rotate_data *data; + const char *bios_date; + int i; + + for (match = dmi_first_match(rotate_data); + match; + match = dmi_first_match(match + 1)) { + data = match->driver_data; + + if (data->width != info->var.xres || + data->height != info->var.yres) + continue; + + if (!data->bios_dates) + return data->rotate; + + bios_date = dmi_get_system_info(DMI_BIOS_DATE); + if (!bios_date) + continue; + + for (i = 0; data->bios_dates[i]; i++) { + if (!strcmp(data->bios_dates[i], bios_date)) + return data->rotate; + } + } + + return FB_ROTATE_UR; +} diff --git a/drivers/video/console/fbcon_rotate.c b/drivers/video/fbdev/core/fbcon_rotate.c index db6528f2d3f2..8a51e4d95cc5 100644 --- a/drivers/video/console/fbcon_rotate.c +++ b/drivers/video/fbdev/core/fbcon_rotate.c @@ -110,7 +110,3 @@ void fbcon_set_rotate(struct fbcon_ops *ops) } } EXPORT_SYMBOL(fbcon_set_rotate); - -MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); -MODULE_DESCRIPTION("Console Rotation Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h index e233444cda66..e233444cda66 100644 --- a/drivers/video/console/fbcon_rotate.h +++ b/drivers/video/fbdev/core/fbcon_rotate.h diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 19e3714abfe8..f98eee263597 100644 --- a/drivers/video/console/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -220,7 +220,7 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, } static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -228,7 +228,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int bh = info->var.yres - (vc->vc_rows*ch); struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -446,7 +446,3 @@ void fbcon_rotate_ud(struct fbcon_ops *ops) ops->update_start = ud_update_start; } EXPORT_SYMBOL(fbcon_rotate_ud); - -MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); -MODULE_DESCRIPTION("Console Rotation (180 degrees) Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 25e862c487f6..f741ba8df01b 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -32,6 +32,7 @@ #include <linux/device.h> #include <linux/efi.h> #include <linux/fb.h> +#include <linux/fbcon.h> #include <linux/mem_encrypt.h> #include <asm/fb.h> @@ -316,7 +317,7 @@ static void fb_set_logo(struct fb_info *info, for (i = 0; i < logo->height; i++) { for (j = 0; j < logo->width; src++) { d = *src ^ xor; - for (k = 7; k >= 0; k--) { + for (k = 7; k >= 0 && j < logo->width; k--) { *dst++ = ((d >> k) & 1) ? fg : 0; j++; } @@ -463,7 +464,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, /* Return if the frame buffer is not mapped or suspended */ if (logo == NULL || info->state != FBINFO_STATE_RUNNING || - info->flags & FBINFO_MODULE) + info->fbops->owner) return 0; image.depth = 8; @@ -601,7 +602,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate) memset(&fb_logo, 0, sizeof(struct logo_data)); if (info->flags & FBINFO_MISC_TILEBLITTING || - info->flags & FBINFO_MODULE) + info->fbops->owner) return 0; if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -1892,6 +1893,9 @@ fbmem_init(void) fb_class = NULL; goto err_class; } + + fb_console_init(); + return 0; err_class: @@ -1906,6 +1910,8 @@ module_init(fbmem_init); static void __exit fbmem_exit(void) { + fb_console_exit(); + remove_proc_entry("fb", NULL); class_destroy(fb_class); unregister_chrdev(FB_MAJOR, "fb"); diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 41d7979d81c5..2b2d67328514 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -1479,8 +1479,8 @@ int of_get_fb_videomode(struct device_node *np, struct fb_videomode *fb, if (ret) return ret; - pr_debug("%s: got %dx%d display mode from %s\n", - of_node_full_name(np), vm.hactive, vm.vactive, np->name); + pr_debug("%pOF: got %dx%d display mode from %s\n", + np, vm.hactive, vm.vactive, np->name); dump_fb_videomode(fb); return 0; diff --git a/drivers/video/console/softcursor.c b/drivers/video/fbdev/core/softcursor.c index 46dd8f5d2e9e..fc93f254498e 100644 --- a/drivers/video/console/softcursor.c +++ b/drivers/video/fbdev/core/softcursor.c @@ -76,7 +76,3 @@ int soft_cursor(struct fb_info *info, struct fb_cursor *cursor) } EXPORT_SYMBOL(soft_cursor); - -MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>"); -MODULE_DESCRIPTION("Generic software cursor"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 15e8e1a89c45..93390312957f 100644 --- a/drivers/video/console/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -74,7 +74,7 @@ static void tile_putcs(struct vc_data *vc, struct fb_info *info, } static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { return; } @@ -152,8 +152,3 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info) } EXPORT_SYMBOL(fbcon_set_tileops); - -MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); -MODULE_DESCRIPTION("Tile Blitting Operation"); -MODULE_LICENSE("GPL"); - diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c index 99acf538a8b8..9a5751cb4e16 100644 --- a/drivers/video/fbdev/cyber2000fb.c +++ b/drivers/video/fbdev/cyber2000fb.c @@ -1336,7 +1336,7 @@ static void cyber2000fb_i2c_unregister(struct cfb_info *cfb) * These parameters give * 640x480, hsync 31.5kHz, vsync 60Hz */ -static struct fb_videomode cyber2000fb_default_mode = { +static const struct fb_videomode cyber2000fb_default_mode = { .refresh = 60, .xres = 640, .yres = 480, diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index c229b1a0d13b..a74096c53cb5 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c @@ -1341,7 +1341,7 @@ static int fb_probe(struct platform_device *device) { struct da8xx_lcdc_platform_data *fb_pdata = dev_get_platdata(&device->dev); - static struct resource *lcdc_regs; + struct resource *lcdc_regs; struct lcd_ctrl_config *lcd_cfg; struct fb_videomode *lcdc_info; struct fb_info *da8xx_fb_info; diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c index 3526899da61b..7b1492d34e98 100644 --- a/drivers/video/fbdev/dnfb.c +++ b/drivers/video/fbdev/dnfb.c @@ -126,7 +126,7 @@ struct fb_var_screeninfo dnfb_var = { .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo dnfb_fix = { +static const struct fb_fix_screeninfo dnfb_fix = { .id = "Apollo Mono", .smem_start = (FRAME_BUFFER_START + IO_BASE), .smem_len = FRAME_BUFFER_LEN, diff --git a/drivers/video/fbdev/fb-puv3.c b/drivers/video/fbdev/fb-puv3.c index 88fa2e70a0bb..d9e816d53531 100644 --- a/drivers/video/fbdev/fb-puv3.c +++ b/drivers/video/fbdev/fb-puv3.c @@ -69,7 +69,7 @@ static const struct fb_videomode unifb_modes[] = { 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, }; -static struct fb_var_screeninfo unifb_default = { +static const struct fb_var_screeninfo unifb_default = { .xres = 640, .yres = 480, .xres_virtual = 640, diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c index dda31e0a45af..6b1915872af1 100644 --- a/drivers/video/fbdev/ffb.c +++ b/drivers/video/fbdev/ffb.c @@ -997,9 +997,9 @@ static int ffb_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: %s at %016lx, type %d, " + printk(KERN_INFO "%pOF: %s at %016lx, type %d, " "DAC pnum[%x] rev[%d] manuf_rev[%d]\n", - dp->full_name, + dp, ((par->flags & FFB_FLAG_AFB) ? "AFB" : "FFB"), par->physbase, par->board_type, dac_pnum, dac_rev, dac_mrev); diff --git a/drivers/video/fbdev/fm2fb.c b/drivers/video/fbdev/fm2fb.c index e69d47af9932..ac7a4ebfd390 100644 --- a/drivers/video/fbdev/fm2fb.c +++ b/drivers/video/fbdev/fm2fb.c @@ -213,7 +213,7 @@ static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id); -static struct zorro_device_id fm2fb_devices[] = { +static const struct zorro_device_id fm2fb_devices[] = { { ZORRO_PROD_BSC_FRAMEMASTER_II }, { ZORRO_PROD_HELFRICH_RAINBOW_II }, { 0 } diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c index ec9fc9ac23de..f4f76373b2a8 100644 --- a/drivers/video/fbdev/geode/gxfb_core.c +++ b/drivers/video/fbdev/geode/gxfb_core.c @@ -474,7 +474,7 @@ static void gxfb_remove(struct pci_dev *pdev) framebuffer_release(info); } -static struct pci_device_id gxfb_id_table[] = { +static const struct pci_device_id gxfb_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_GX_VIDEO) }, { 0, } }; diff --git a/drivers/video/fbdev/grvga.c b/drivers/video/fbdev/grvga.c index b471f92969b1..8fc8f46dadeb 100644 --- a/drivers/video/fbdev/grvga.c +++ b/drivers/video/fbdev/grvga.c @@ -70,7 +70,7 @@ static const struct fb_videomode grvga_modedb[] = { } }; -static struct fb_fix_screeninfo grvga_fix = { +static const struct fb_fix_screeninfo grvga_fix = { .id = "AG SVGACTRL", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c index 2488baab7c89..d18f7b31932c 100644 --- a/drivers/video/fbdev/i810/i810_main.c +++ b/drivers/video/fbdev/i810/i810_main.c @@ -107,7 +107,7 @@ static const char * const i810_pci_list[] = { "Intel(R) 815 (Internal Graphics with AGP) Framebuffer Device" }; -static struct pci_device_id i810fb_pci_tbl[] = { +static const struct pci_device_id i810fb_pci_tbl[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, @@ -1542,7 +1542,7 @@ static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor) return 0; } -static struct fb_ops i810fb_ops = { +static const struct fb_ops i810fb_ops = { .owner = THIS_MODULE, .fb_open = i810fb_open, .fb_release = i810fb_release, diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index 4363c64d74e8..ecdcf358ad5e 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1318,7 +1318,7 @@ imsttfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) } } -static struct pci_device_id imsttfb_pci_tbl[] = { +static const struct pci_device_id imsttfb_pci_tbl[] = { { PCI_VENDOR_ID_IMS, PCI_DEVICE_ID_IMS_TT128, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IBM }, { PCI_VENDOR_ID_IMS, PCI_DEVICE_ID_IMS_TT3D, diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index ffc391208b27..d7463a2a5d83 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -173,7 +173,7 @@ static int intelfb_set_fbinfo(struct intelfb_info *dinfo); #define INTELFB_CLASS_MASK 0 #endif -static struct pci_device_id intelfb_pci_table[] = { +static const struct pci_device_id intelfb_pci_table[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_830M, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_830M }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_845G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_845G }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_85XGM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_85XGM }, diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c index f77478fb3d14..a7bd9f25911b 100644 --- a/drivers/video/fbdev/kyro/fbdev.c +++ b/drivers/video/fbdev/kyro/fbdev.c @@ -633,7 +633,7 @@ static int kyrofb_ioctl(struct fb_info *info, return 0; } -static struct pci_device_id kyrofb_pci_tbl[] = { +static const struct pci_device_id kyrofb_pci_tbl[] = { { PCI_VENDOR_ID_ST, PCI_DEVICE_ID_STG4000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c index 62e59dc90ee6..71862188f528 100644 --- a/drivers/video/fbdev/leo.c +++ b/drivers/video/fbdev/leo.c @@ -619,8 +619,8 @@ static int leo_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: leo at %lx:%lx\n", - dp->full_name, + printk(KERN_INFO "%pOF: leo at %lx:%lx\n", + dp, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index f6a0b9af97a9..b9b284d79631 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -1198,7 +1198,7 @@ static int matroxfb_blank(int blank, struct fb_info *info) return 0; } -static struct fb_ops matroxfb_ops = { +static const struct fb_ops matroxfb_ops = { .owner = THIS_MODULE, .fb_open = matroxfb_open, .fb_release = matroxfb_release, @@ -1573,14 +1573,14 @@ static struct board { NULL}}; #ifndef MODULE -static struct fb_videomode defaultmode = { +static const struct fb_videomode defaultmode = { /* 640x480 @ 60Hz, 31.5 kHz */ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED }; -#endif /* !MODULE */ static int hotplug = 0; +#endif /* !MODULE */ static void setDefaultOutputs(struct matrox_fb_info *minfo) { @@ -1623,7 +1623,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b) unsigned int memsize; int err; - static struct pci_device_id intel_82437[] = { + static const struct pci_device_id intel_82437[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437) }, { }, }; @@ -1794,9 +1794,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b) minfo->fbops = matroxfb_ops; minfo->fbcon.fbops = &minfo->fbops; minfo->fbcon.pseudo_palette = minfo->cmap; - /* after __init time we are like module... no logo */ - minfo->fbcon.flags = hotplug ? FBINFO_FLAG_MODULE : FBINFO_FLAG_DEFAULT; - minfo->fbcon.flags |= FBINFO_PARTIAL_PAN_OK | /* Prefer panning for scroll under MC viewer/edit */ + minfo->fbcon.flags = FBINFO_PARTIAL_PAN_OK | /* Prefer panning for scroll under MC viewer/edit */ FBINFO_HWACCEL_COPYAREA | /* We have hw-assisted bmove */ FBINFO_HWACCEL_FILLRECT | /* And fillrect */ FBINFO_HWACCEL_IMAGEBLIT | /* And imageblit */ @@ -2116,7 +2114,7 @@ static void pci_remove_matrox(struct pci_dev* pdev) { matroxfb_remove(minfo, 1); } -static struct pci_device_id matroxfb_devices[] = { +static const struct pci_device_id matroxfb_devices[] = { #ifdef CONFIG_FB_MATROX_MILLENIUM {PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_MIL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, diff --git a/drivers/video/fbdev/maxinefb.c b/drivers/video/fbdev/maxinefb.c index cab7333208ea..5bb1b5c308a7 100644 --- a/drivers/video/fbdev/maxinefb.c +++ b/drivers/video/fbdev/maxinefb.c @@ -39,7 +39,7 @@ static struct fb_info fb_info; -static struct fb_var_screeninfo maxinefb_defined = { +static const struct fb_var_screeninfo maxinefb_defined = { .xres = 1024, .yres = 768, .xres_virtual = 1024, diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c index f9ec5c0484fa..cd372527c9e4 100644 --- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c @@ -982,7 +982,7 @@ static inline int mb862xx_pci_gdc_init(struct mb862xxfb_par *par) #define CHIP_ID(id) \ { PCI_DEVICE(PCI_VENDOR_ID_FUJITSU_LIMITED, id) } -static struct pci_device_id mb862xx_pci_tbl[] = { +static const struct pci_device_id mb862xx_pci_tbl[] = { /* MB86295/MB86296 */ CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALP), CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALPA), diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c index 698df9543e30..539b85da0897 100644 --- a/drivers/video/fbdev/mbx/mbxfb.c +++ b/drivers/video/fbdev/mbx/mbxfb.c @@ -79,7 +79,7 @@ struct mbxfb_info { }; -static struct fb_var_screeninfo mbxfb_default = { +static const struct fb_var_screeninfo mbxfb_default = { .xres = 640, .yres = 480, .xres_virtual = 640, @@ -102,7 +102,7 @@ static struct fb_var_screeninfo mbxfb_default = { .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }; -static struct fb_fix_screeninfo mbxfb_fix = { +static const struct fb_fix_screeninfo mbxfb_fix = { .id = "MBX", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c index db023a97d1ea..5d3a444083f7 100644 --- a/drivers/video/fbdev/neofb.c +++ b/drivers/video/fbdev/neofb.c @@ -2138,7 +2138,7 @@ static void neofb_remove(struct pci_dev *dev) } } -static struct pci_device_id neofb_devices[] = { +static const struct pci_device_id neofb_devices[] = { {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2070, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2070}, diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c index ce7dab7299fe..418a2d0d06a9 100644 --- a/drivers/video/fbdev/nvidia/nvidia.c +++ b/drivers/video/fbdev/nvidia/nvidia.c @@ -55,7 +55,7 @@ /* HW cursor parameters */ #define MAX_CURS 32 -static struct pci_device_id nvidiafb_pci_tbl[] = { +static const struct pci_device_id nvidiafb_pci_tbl[] = { {PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0}, { 0, } diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 9be884b0c778..90d38de34479 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -383,7 +383,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR; } -static void __init offb_init_fb(const char *name, const char *full_name, +static void __init offb_init_fb(const char *name, int width, int height, int depth, int pitch, unsigned long address, int foreign_endian, struct device_node *dp) @@ -402,14 +402,13 @@ static void __init offb_init_fb(const char *name, const char *full_name, "Using unsupported %dx%d %s at %lx, depth=%d, pitch=%d\n", width, height, name, address, depth, pitch); if (depth != 8 && depth != 15 && depth != 16 && depth != 32) { - printk(KERN_ERR "%s: can't use depth = %d\n", full_name, - depth); + printk(KERN_ERR "%pOF: can't use depth = %d\n", dp, depth); release_mem_region(res_start, res_size); return; } info = framebuffer_alloc(sizeof(u32) * 16, NULL); - + if (info == 0) { release_mem_region(res_start, res_size); return; @@ -515,7 +514,7 @@ static void __init offb_init_fb(const char *name, const char *full_name, if (register_framebuffer(info) < 0) goto out_err; - fb_info(info, "Open Firmware frame buffer device on %s\n", full_name); + fb_info(info, "Open Firmware frame buffer device on %pOF\n", dp); return; out_err: @@ -644,7 +643,6 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) if (strcmp(dp->name, "valkyrie") == 0) address += 0x1000; offb_init_fb(no_real_node ? "bootx" : dp->name, - no_real_node ? "display" : dp->full_name, width, height, depth, pitch, address, foreign_endian, no_real_node ? NULL : dp); } diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c index df9e6ebcfad5..e3a85432f926 100644 --- a/drivers/video/fbdev/omap/lcd_mipid.c +++ b/drivers/video/fbdev/omap/lcd_mipid.c @@ -496,7 +496,7 @@ static void mipid_cleanup(struct lcd_panel *panel) mipid_esd_stop_check(md); } -static struct lcd_panel mipid_panel = { +static const struct lcd_panel mipid_panel = { .config = OMAP_LCDC_PANEL_TFT, .bpp = 16, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c index f14691ce8d02..6cd759c01037 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c @@ -18,7 +18,7 @@ #include <video/omapfb_dss.h> -static struct omap_video_timings lb035q02_timings = { +static const struct omap_video_timings lb035q02_timings = { .x_res = 320, .y_res = 240, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c index 468560a6daae..f2c2fef3db74 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c @@ -509,7 +509,7 @@ static struct attribute *bldev_attrs[] = { NULL, }; -static struct attribute_group bldev_attr_group = { +static const struct attribute_group bldev_attr_group = { .attrs = bldev_attrs, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c index b529a8c2b652..57e9e146ff74 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c @@ -41,7 +41,7 @@ struct panel_drv_data { struct spi_device *spi_dev; }; -static struct omap_video_timings td028ttec1_panel_timings = { +static const struct omap_video_timings td028ttec1_panel_timings = { .x_res = 480, .y_res = 640, .pixelclock = 22153000, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c index 51e628b85f4a..ea8c79a42b41 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c @@ -282,7 +282,7 @@ static struct attribute *tpo_td043_attrs[] = { NULL, }; -static struct attribute_group tpo_td043_attr_group = { +static const struct attribute_group tpo_td043_attr_group = { .attrs = tpo_td043_attrs, }; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c index d356a252ab4a..f1eb8b0f8a2a 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c @@ -16,6 +16,7 @@ #include <linux/err.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_graph.h> #include <linux/seq_file.h> #include <video/omapfb_dss.h> @@ -128,7 +129,7 @@ static struct device_node *omapdss_of_get_remote_port(const struct device_node * { struct device_node *np; - np = of_parse_phandle(node, "remote-endpoint", 0); + np = of_graph_get_remote_endpoint(node); if (!np) return NULL; diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c index 1f6ee76af878..64de5cda541d 100644 --- a/drivers/video/fbdev/p9100.c +++ b/drivers/video/fbdev/p9100.c @@ -304,8 +304,8 @@ static int p9100_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: p9100 at %lx:%lx\n", - dp->full_name, + printk(KERN_INFO "%pOF: p9100 at %lx:%lx\n", + dp, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c index 1a4070f719c2..bd6c2f5f6095 100644 --- a/drivers/video/fbdev/pm2fb.c +++ b/drivers/video/fbdev/pm2fb.c @@ -1732,7 +1732,7 @@ static void pm2fb_remove(struct pci_dev *pdev) framebuffer_release(info); } -static struct pci_device_id pm2fb_id_table[] = { +static const struct pci_device_id pm2fb_id_table[] = { { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TVP4020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_3DLABS, PCI_DEVICE_ID_3DLABS_PERMEDIA2, diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c index 6ff5077a2e15..6130aa56a1e9 100644 --- a/drivers/video/fbdev/pm3fb.c +++ b/drivers/video/fbdev/pm3fb.c @@ -1479,7 +1479,7 @@ static void pm3fb_remove(struct pci_dev *dev) } } -static struct pci_device_id pm3fb_id_table[] = { +static const struct pci_device_id pm3fb_id_table[] = { { PCI_VENDOR_ID_3DLABS, 0x0a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } diff --git a/drivers/video/fbdev/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c index 39922f072db4..ca7e9390d1e7 100644 --- a/drivers/video/fbdev/pmag-aa-fb.c +++ b/drivers/video/fbdev/pmag-aa-fb.c @@ -67,7 +67,7 @@ struct aafb_par { struct bt431_regs __iomem *bt431; }; -static struct fb_var_screeninfo aafb_defined = { +static const struct fb_var_screeninfo aafb_defined = { .xres = 1280, .yres = 1024, .xres_virtual = 2048, @@ -90,7 +90,7 @@ static struct fb_var_screeninfo aafb_defined = { .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo aafb_fix = { +static const struct fb_fix_screeninfo aafb_fix = { .id = "PMAG-AA", .smem_len = (2048 * 1024), .type = FB_TYPE_PACKED_PIXELS, diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c index 1fd02f40708e..3b9249449ea6 100644 --- a/drivers/video/fbdev/pmag-ba-fb.c +++ b/drivers/video/fbdev/pmag-ba-fb.c @@ -43,7 +43,7 @@ struct pmagbafb_par { }; -static struct fb_var_screeninfo pmagbafb_defined = { +static const struct fb_var_screeninfo pmagbafb_defined = { .xres = 1024, .yres = 864, .xres_virtual = 1024, @@ -67,7 +67,7 @@ static struct fb_var_screeninfo pmagbafb_defined = { .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo pmagbafb_fix = { +static const struct fb_fix_screeninfo pmagbafb_fix = { .id = "PMAG-BA", .smem_len = (1024 * 1024), .type = FB_TYPE_PACKED_PIXELS, diff --git a/drivers/video/fbdev/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c index 46e96c451506..e58df36233c4 100644 --- a/drivers/video/fbdev/pmagb-b-fb.c +++ b/drivers/video/fbdev/pmagb-b-fb.c @@ -44,7 +44,7 @@ struct pmagbbfb_par { }; -static struct fb_var_screeninfo pmagbbfb_defined = { +static const struct fb_var_screeninfo pmagbbfb_defined = { .bits_per_pixel = 8, .red.length = 8, .green.length = 8, @@ -57,7 +57,7 @@ static struct fb_var_screeninfo pmagbbfb_defined = { .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo pmagbbfb_fix = { +static const struct fb_fix_screeninfo pmagbbfb_fix = { .id = "PMAGB-BA", .smem_len = (2048 * 1024), .type = FB_TYPE_PACKED_PIXELS, diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c index b269abd932aa..5ed2db39d823 100644 --- a/drivers/video/fbdev/ps3fb.c +++ b/drivers/video/fbdev/ps3fb.c @@ -952,7 +952,7 @@ static struct fb_ops ps3fb_ops = { .fb_compat_ioctl = ps3fb_ioctl }; -static struct fb_fix_screeninfo ps3fb_fix = { +static const struct fb_fix_screeninfo ps3fb_fix = { .id = DEVICE_NAME, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c index a2564ab91e62..867c5218968f 100644 --- a/drivers/video/fbdev/pvr2fb.c +++ b/drivers/video/fbdev/pvr2fb.c @@ -154,7 +154,7 @@ static struct fb_fix_screeninfo pvr2_fix = { .accel = FB_ACCEL_NONE, }; -static struct fb_var_screeninfo pvr2_var = { +static const struct fb_var_screeninfo pvr2_var = { .xres = 640, .yres = 480, .xres_virtual = 640, @@ -966,7 +966,7 @@ static void pvr2fb_pci_remove(struct pci_dev *pdev) pci_release_regions(pdev); } -static struct pci_device_id pvr2fb_pci_tbl[] = { +static const struct pci_device_id pvr2fb_pci_tbl[] = { { PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NEON250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, }, diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 50bce45e7f3d..933619da1a94 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -626,8 +626,8 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) /* request the IRQ */ irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "no IRQ defined\n"); - return -ENODEV; + dev_err(dev, "no IRQ defined: %d\n", irq); + return irq; } ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq, diff --git a/drivers/video/fbdev/q40fb.c b/drivers/video/fbdev/q40fb.c index 04ea330ccf5d..0b93aa964d43 100644 --- a/drivers/video/fbdev/q40fb.c +++ b/drivers/video/fbdev/q40fb.c @@ -36,7 +36,7 @@ static struct fb_fix_screeninfo q40fb_fix = { .accel = FB_ACCEL_NONE, }; -static struct fb_var_screeninfo q40fb_var = { +static const struct fb_var_screeninfo q40fb_var = { .xres = 1024, .yres = 512, .xres_virtual = 1024, diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c index 2ef26ad99341..1ea78bb911fb 100644 --- a/drivers/video/fbdev/riva/fbdev.c +++ b/drivers/video/fbdev/riva/fbdev.c @@ -101,7 +101,7 @@ static int rivafb_blank(int blank, struct fb_info *info); * * ------------------------------------------------------------------------- */ -static struct pci_device_id rivafb_pci_tbl[] = { +static const struct pci_device_id rivafb_pci_tbl[] = { { PCI_VENDOR_ID_NVIDIA_SGS, PCI_DEVICE_ID_NVIDIA_SGS_RIVA128, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TNT, diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c index 13b109073c63..d63f23e26f7d 100644 --- a/drivers/video/fbdev/s3fb.c +++ b/drivers/video/fbdev/s3fb.c @@ -1483,7 +1483,7 @@ static int s3_pci_resume(struct pci_dev* dev) /* List of boards that we are trying to support */ -static struct pci_device_id s3_devices[] = { +static const struct pci_device_id s3_devices[] = { {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8810), .driver_data = CHIP_XXX_TRIO}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8811), .driver_data = CHIP_XXX_TRIO}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8812), .driver_data = CHIP_M65_AURORA64VP}, diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c index c30a91c1137c..c20468362f11 100644 --- a/drivers/video/fbdev/savage/savagefb_driver.c +++ b/drivers/video/fbdev/savage/savagefb_driver.c @@ -2429,7 +2429,7 @@ static int savagefb_resume(struct pci_dev* dev) } -static struct pci_device_id savagefb_devices[] = { +static const struct pci_device_id savagefb_devices[] = { {PCI_VENDOR_ID_S3, PCI_CHIP_SUPSAV_MX128, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_SUPERSAVAGE}, diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c index 20f7234e809e..1ec9c3e0e1d8 100644 --- a/drivers/video/fbdev/sis/init301.c +++ b/drivers/video/fbdev/sis/init301.c @@ -6848,8 +6848,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short if(SiS_Pr->SiS_VGAHDE >= 1280) { tempch = 20; tempbx &= ~0x20; - } else if(SiS_Pr->SiS_VGAHDE >= 1024) { - tempch = 25; } else { tempch = 25; /* OK */ } @@ -7964,14 +7962,9 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short } } } else { /* ---- PAL ---- */ - /* We don't play around with FSCI in PAL mode */ - if(resindex == 0x04) { - SiS_SetCH70xxANDOR(SiS_Pr,0x20,0x00,0xEF); /* loop filter off */ - SiS_SetCH70xxANDOR(SiS_Pr,0x21,0x01,0xFE); /* ACIV on */ - } else { - SiS_SetCH70xxANDOR(SiS_Pr,0x20,0x00,0xEF); /* loop filter off */ - SiS_SetCH70xxANDOR(SiS_Pr,0x21,0x01,0xFE); /* ACIV on */ - } + /* We don't play around with FSCI in PAL mode */ + SiS_SetCH70xxANDOR(SiS_Pr, 0x20, 0x00, 0xEF); /* loop filter off */ + SiS_SetCH70xxANDOR(SiS_Pr, 0x21, 0x01, 0xFE); /* ACIV on */ } #endif /* 300 */ @@ -9657,8 +9650,6 @@ SetDelayComp(struct SiS_Private *SiS_Pr, unsigned short ModeNo) delay = 0x0a; } else if(IS_SIS740) { delay = 0x00; - } else if(SiS_Pr->ChipType < SIS_330) { - delay = 0x0c; } else { delay = 0x0c; } diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c index e219a0a22077..7f4e908330bf 100644 --- a/drivers/video/fbdev/skeletonfb.c +++ b/drivers/video/fbdev/skeletonfb.c @@ -84,7 +84,7 @@ struct xxx_par; * if we don't use modedb. If we do use modedb see xxxfb_init how to use it * to get a fb_var_screeninfo. Otherwise define a default var as well. */ -static struct fb_fix_screeninfo xxxfb_fix = { +static const struct fb_fix_screeninfo xxxfb_fix = { .id = "FB's name", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, @@ -866,7 +866,7 @@ static int xxxfb_resume(struct pci_dev *dev) #define xxxfb_resume NULL #endif /* CONFIG_PM */ -static struct pci_device_id xxxfb_id_table[] = { +static const struct pci_device_id xxxfb_id_table[] = { { PCI_VENDOR_ID_XXX, PCI_DEVICE_ID_XXX, PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16, PCI_CLASS_MASK, 0 }, diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index 67e314fdd947..076dd2711630 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -46,7 +46,7 @@ static char *fb_mode = "640x480-16@60"; static unsigned long default_bpp = 16; -static struct fb_videomode sm501_default_mode = { +static const struct fb_videomode sm501_default_mode = { .refresh = 60, .xres = 640, .yres = 480, diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index 73cb4ffff3c5..502d0de2feec 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -33,8 +33,8 @@ #include "sm712.h" /* -* Private structure -*/ + * Private structure + */ struct smtcfb_info { struct pci_dev *pdev; struct fb_info *fb; @@ -785,7 +785,7 @@ static void __init sm7xx_vga_setup(char *options) smtc_scr_info.lfb_height = 0; smtc_scr_info.lfb_depth = 0; - pr_debug("sm7xx_vga_setup = %s\n", options); + pr_debug("%s = %s\n", __func__, options); for (i = 0; i < ARRAY_SIZE(vesa_mode_table); i++) { if (strstr(options, vesa_mode_table[i].index)) { @@ -798,8 +798,8 @@ static void __init sm7xx_vga_setup(char *options) } } -static void sm712_setpalette(int regno, unsigned red, unsigned green, - unsigned blue, struct fb_info *info) +static void sm712_setpalette(int regno, unsigned int red, unsigned int green, + unsigned int blue, struct fb_info *info) { /* set bit 5:4 = 01 (write LCD RAM only) */ smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x10); @@ -896,8 +896,9 @@ static int smtc_blank(int blank_mode, struct fb_info *info) return 0; } -static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green, - unsigned blue, unsigned trans, struct fb_info *info) +static int smtc_setcolreg(unsigned int regno, unsigned int red, + unsigned int green, unsigned int blue, + unsigned int trans, struct fb_info *info) { struct smtcfb_info *sfb; u32 val; @@ -1477,7 +1478,7 @@ static int smtcfb_pci_probe(struct pci_dev *pdev, } /* can support 32 bpp */ - if (15 == sfb->fb->var.bits_per_pixel) + if (sfb->fb->var.bits_per_pixel == 15) sfb->fb->var.bits_per_pixel = 16; sfb->fb->var.xres_virtual = sfb->fb->var.xres; diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index 449fceaf79d5..2275e80b5776 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -122,7 +122,7 @@ static const u32 smscufx_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST | FBINFO_VIRTFB | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR; -static struct usb_device_id id_table[] = { +static const struct usb_device_id id_table[] = { {USB_DEVICE(0x0424, 0x9d00),}, {USB_DEVICE(0x0424, 0x9d01),}, {}, diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c index fb37f6e05391..8fe37c0ef2f5 100644 --- a/drivers/video/fbdev/sunxvr1000.c +++ b/drivers/video/fbdev/sunxvr1000.c @@ -33,8 +33,8 @@ static int gfb_get_props(struct gfb_info *gp) gp->depth = of_getintprop_default(gp->of_node, "depth", 32); if (!gp->width || !gp->height) { - printk(KERN_ERR "gfb: Critical properties missing for %s\n", - gp->of_node->full_name); + printk(KERN_ERR "gfb: Critical properties missing for %pOF\n", + gp->of_node); return -EINVAL; } @@ -151,12 +151,12 @@ static int gfb_probe(struct platform_device *op) if (err) goto err_unmap_fb; - printk("gfb: Found device at %s\n", dp->full_name); + printk("gfb: Found device at %pOF\n", dp); err = register_framebuffer(info); if (err < 0) { - printk(KERN_ERR "gfb: Could not register framebuffer %s\n", - dp->full_name); + printk(KERN_ERR "gfb: Could not register framebuffer %pOF\n", + dp); goto err_unmap_fb; } diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c index 1a053292f2eb..544465ba1dc0 100644 --- a/drivers/video/fbdev/sunxvr2500.c +++ b/drivers/video/fbdev/sunxvr2500.c @@ -220,7 +220,7 @@ err_out: return err; } -static struct pci_device_id s3d_pci_table[] = { +static const struct pci_device_id s3d_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x002c), }, { PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x002d), }, { PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x002e), }, diff --git a/drivers/video/fbdev/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c index dc0d886e4e7e..bc595937df08 100644 --- a/drivers/video/fbdev/sunxvr500.c +++ b/drivers/video/fbdev/sunxvr500.c @@ -393,7 +393,7 @@ err_out: return err; } -static struct pci_device_id e3d_pci_table[] = { +static const struct pci_device_id e3d_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a0), }, { PCI_DEVICE(0x1091, 0x7a0), }, { PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a2), }, diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c index 54ad08854c94..c98d8a569ccd 100644 --- a/drivers/video/fbdev/tcx.c +++ b/drivers/video/fbdev/tcx.c @@ -467,8 +467,8 @@ static int tcx_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: TCX at %lx:%lx, %s\n", - dp->full_name, + printk(KERN_INFO "%pOF: TCX at %lx:%lx, %s\n", + dp, par->which_io, info->fix.smem_start, par->lowdepth ? "8-bit only" : "24-bit depth"); diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c index d5fa313806fe..dec1fed9880e 100644 --- a/drivers/video/fbdev/tdfxfb.c +++ b/drivers/video/fbdev/tdfxfb.c @@ -120,7 +120,7 @@ static const struct fb_var_screeninfo tdfx_var = { static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void tdfxfb_remove(struct pci_dev *pdev); -static struct pci_device_id tdfxfb_id_table[] = { +static const struct pci_device_id tdfxfb_id_table[] = { { PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_BANSHEE, PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0 }, diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c index 8a5bbc13082e..284706184b1b 100644 --- a/drivers/video/fbdev/tridentfb.c +++ b/drivers/video/fbdev/tridentfb.c @@ -1737,7 +1737,7 @@ static void trident_pci_remove(struct pci_dev *dev) } /* List of boards that we are trying to support */ -static struct pci_device_id trident_devices[] = { +static const struct pci_device_id trident_devices[] = { {PCI_VENDOR_ID_TRIDENT, BLADE3D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_TRIDENT, CYBERBLADEi7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_TRIDENT, CYBERBLADEi7D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 05ef657235df..ef08a104fb42 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -54,7 +54,7 @@ static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST | * which is compatible with all known USB 2.0 era graphics chips and firmware, * but allows DisplayLink to increment those for any future incompatible chips */ -static struct usb_device_id id_table[] = { +static const struct usb_device_id id_table[] = { {.idVendor = 0x17e9, .bInterfaceClass = 0xff, .bInterfaceSubClass = 0x00, @@ -1465,7 +1465,7 @@ static ssize_t metrics_reset_store(struct device *fbdev, return count; } -static struct bin_attribute edid_attr = { +static const struct bin_attribute edid_attr = { .attr.name = "edid", .attr.mode = 0666, .size = EDID_LENGTH, @@ -1655,7 +1655,6 @@ static int dlfb_usb_probe(struct usb_interface *interface, error: if (dev) { - kref_put(&dev->kref, dlfb_free); /* ref for framebuffer */ kref_put(&dev->kref, dlfb_free); /* last ref from kref_init */ /* dev has been deallocated. Do not dereference */ diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c index 6f8c0b9fc558..73676eb0244a 100644 --- a/drivers/video/fbdev/uvesafb.c +++ b/drivers/video/fbdev/uvesafb.c @@ -1666,7 +1666,7 @@ static struct attribute *uvesafb_dev_attrs[] = { NULL, }; -static struct attribute_group uvesafb_dev_attgrp = { +static const struct attribute_group uvesafb_dev_attgrp = { .name = NULL, .attrs = uvesafb_dev_attrs, }; diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c index ce4c4729a5e8..6f8d444eb0e3 100644 --- a/drivers/video/fbdev/vermilion/vermilion.c +++ b/drivers/video/fbdev/vermilion/vermilion.c @@ -55,7 +55,7 @@ static struct list_head global_has_mode; static struct fb_ops vmlfb_ops; static struct vml_sys *subsys = NULL; static char *vml_default_mode = "1024x768@60"; -static struct fb_videomode defaultmode = { +static const struct fb_videomode defaultmode = { NULL, 60, 1024, 768, 12896, 144, 24, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED }; @@ -1044,7 +1044,7 @@ static struct fb_ops vmlfb_ops = { .fb_setcolreg = vmlfb_setcolreg }; -static struct pci_device_id vml_ids[] = { +static const struct pci_device_id vml_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, VML_DEVICE_VDC)}, {0} }; diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c index 1d28e16888e9..77774d8abf94 100644 --- a/drivers/video/fbdev/via/via-core.c +++ b/drivers/video/fbdev/via/via-core.c @@ -724,7 +724,7 @@ static void via_pci_remove(struct pci_dev *pdev) } -static struct pci_device_id via_pci_table[] = { +static const struct pci_device_id via_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID), .driver_data = UNICHROME_CLE266 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID), diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c index dd0f18e42d3e..5cac871db3ee 100644 --- a/drivers/video/fbdev/vt8623fb.c +++ b/drivers/video/fbdev/vt8623fb.c @@ -81,7 +81,7 @@ static struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, static struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END}; static struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END}; -static struct svga_timing_regs vt8623_timing_regs = { +static const struct svga_timing_regs vt8623_timing_regs = { vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs, vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs, vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs, @@ -888,7 +888,7 @@ fail: /* List of boards that we are trying to support */ -static struct pci_device_id vt8623_devices[] = { +static const struct pci_device_id vt8623_devices[] = { {PCI_DEVICE(PCI_VENDOR_ID_VIA, 0x3122)}, {0, 0, 0, 0, 0, 0, 0} }; diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c index 17dc119c7a98..8628829b470d 100644 --- a/drivers/video/fbdev/xilinxfb.c +++ b/drivers/video/fbdev/xilinxfb.c @@ -41,7 +41,6 @@ #define DRIVER_NAME "xilinxfb" - /* * Xilinx calls it "TFT LCD Controller" though it can also be used for * the VGA port on the Xilinx ML40x board. This is a hardware display @@ -92,15 +91,16 @@ struct xilinxfb_platform_data { u32 xvirt, yvirt; /* resolution of memory buffer */ /* Physical address of framebuffer memory; If non-zero, driver - * will use provided memory address instead of allocating one from - * the consistent pool. */ + * will use provided memory address instead of allocating one from + * the consistent pool. + */ u32 fb_phys; }; /* * Default xilinxfb configuration */ -static struct xilinxfb_platform_data xilinx_fb_default_pdata = { +static const struct xilinxfb_platform_data xilinx_fb_default_pdata = { .xres = 640, .yres = 480, .xvirt = 1024, @@ -110,14 +110,14 @@ static struct xilinxfb_platform_data xilinx_fb_default_pdata = { /* * Here are the default fb_fix_screeninfo and fb_var_screeninfo structures */ -static struct fb_fix_screeninfo xilinx_fb_fix = { +static const struct fb_fix_screeninfo xilinx_fb_fix = { .id = "Xilinx", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .accel = FB_ACCEL_NONE }; -static struct fb_var_screeninfo xilinx_fb_var = { +static const struct fb_var_screeninfo xilinx_fb_var = { .bits_per_pixel = BITS_PER_PIXEL, .red = { RED_SHIFT, 8, 0 }, @@ -128,18 +128,18 @@ static struct fb_var_screeninfo xilinx_fb_var = { .activate = FB_ACTIVATE_NOW }; - #define BUS_ACCESS_FLAG 0x1 /* 1 = BUS, 0 = DCR */ #define LITTLE_ENDIAN_ACCESS 0x2 /* LITTLE ENDIAN IO functions */ struct xilinxfb_drvdata { - struct fb_info info; /* FB driver info record */ phys_addr_t regs_phys; /* phys. address of the control - registers */ + * registers + */ void __iomem *regs; /* virt. address of the control - registers */ + * registers + */ #ifdef CONFIG_PPC_DCR dcr_host_t dcr_host; unsigned int dcr_len; @@ -148,7 +148,7 @@ struct xilinxfb_drvdata { dma_addr_t fb_phys; /* phys. address of the frame buffer */ int fb_alloced; /* Flag, was the fb memory alloced? */ - u8 flags; /* features of the driver */ + u8 flags; /* features of the driver */ u32 reg_ctrl_default; @@ -165,7 +165,7 @@ struct xilinxfb_drvdata { * which bus its connected and call the appropriate write API. */ static void xilinx_fb_out32(struct xilinxfb_drvdata *drvdata, u32 offset, - u32 val) + u32 val) { if (drvdata->flags & BUS_ACCESS_FLAG) { if (drvdata->flags & LITTLE_ENDIAN_ACCESS) @@ -195,8 +195,8 @@ static u32 xilinx_fb_in32(struct xilinxfb_drvdata *drvdata, u32 offset) } static int -xilinx_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, - unsigned transp, struct fb_info *fbi) +xilinx_fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, + unsigned int blue, unsigned int transp, struct fb_info *fbi) { u32 *palette = fbi->pseudo_palette; @@ -205,9 +205,11 @@ xilinx_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, if (fbi->var.grayscale) { /* Convert color to grayscale. - * grayscale = 0.30*R + 0.59*G + 0.11*B */ - red = green = blue = - (red * 77 + green * 151 + blue * 28 + 127) >> 8; + * grayscale = 0.30*R + 0.59*G + 0.11*B + */ + blue = (red * 77 + green * 151 + blue * 28 + 127) >> 8; + green = blue; + red = green; } /* fbi->fix.visual is always FB_VISUAL_TRUECOLOR */ @@ -241,13 +243,11 @@ xilinx_fb_blank(int blank_mode, struct fb_info *fbi) xilinx_fb_out32(drvdata, REG_CTRL, 0); default: break; - } return 0; /* success */ } -static struct fb_ops xilinxfb_ops = -{ +static struct fb_ops xilinxfb_ops = { .owner = THIS_MODULE, .fb_setcolreg = xilinx_fb_setcolreg, .fb_blank = xilinx_fb_blank, @@ -286,7 +286,8 @@ static int xilinxfb_assign(struct platform_device *pdev, } else { drvdata->fb_alloced = 1; drvdata->fb_virt = dma_alloc_coherent(dev, PAGE_ALIGN(fbsize), - &drvdata->fb_phys, GFP_KERNEL); + &drvdata->fb_phys, + GFP_KERNEL); } if (!drvdata->fb_virt) { @@ -300,7 +301,7 @@ static int xilinxfb_assign(struct platform_device *pdev, /* Tell the hardware where the frame buffer is */ xilinx_fb_out32(drvdata, REG_FB_ADDR, drvdata->fb_phys); rc = xilinx_fb_in32(drvdata, REG_FB_ADDR); - /* Endianess detection */ + /* Endianness detection */ if (rc != drvdata->fb_phys) { drvdata->flags |= LITTLE_ENDIAN_ACCESS; xilinx_fb_out32(drvdata, REG_FB_ADDR, drvdata->fb_phys); @@ -310,8 +311,7 @@ static int xilinxfb_assign(struct platform_device *pdev, drvdata->reg_ctrl_default = REG_CTRL_ENABLE; if (pdata->rotate_screen) drvdata->reg_ctrl_default |= REG_CTRL_ROTATE; - xilinx_fb_out32(drvdata, REG_CTRL, - drvdata->reg_ctrl_default); + xilinx_fb_out32(drvdata, REG_CTRL, drvdata->reg_ctrl_default); /* Fill struct fb_info */ drvdata->info.device = dev; @@ -364,7 +364,7 @@ err_regfb: err_cmap: if (drvdata->fb_alloced) dma_free_coherent(dev, PAGE_ALIGN(fbsize), drvdata->fb_virt, - drvdata->fb_phys); + drvdata->fb_phys); else iounmap(drvdata->fb_virt); @@ -435,12 +435,12 @@ static int xilinxfb_of_probe(struct platform_device *pdev) * Fill the resource structure if its direct BUS interface * otherwise fill the dcr_host structure. */ - if (tft_access) { + if (tft_access) drvdata->flags |= BUS_ACCESS_FLAG; - } #ifdef CONFIG_PPC_DCR else { int start; + start = dcr_resource_start(pdev->dev.of_node, 0); drvdata->dcr_len = dcr_resource_len(pdev->dev.of_node, 0); drvdata->dcr_host = dcr_map(pdev->dev.of_node, start, drvdata->dcr_len); @@ -452,19 +452,19 @@ static int xilinxfb_of_probe(struct platform_device *pdev) #endif prop = of_get_property(pdev->dev.of_node, "phys-size", &size); - if ((prop) && (size >= sizeof(u32)*2)) { + if ((prop) && (size >= sizeof(u32) * 2)) { pdata.screen_width_mm = prop[0]; pdata.screen_height_mm = prop[1]; } prop = of_get_property(pdev->dev.of_node, "resolution", &size); - if ((prop) && (size >= sizeof(u32)*2)) { + if ((prop) && (size >= sizeof(u32) * 2)) { pdata.xres = prop[0]; pdata.yres = prop[1]; } prop = of_get_property(pdev->dev.of_node, "virtual-resolution", &size); - if ((prop) && (size >= sizeof(u32)*2)) { + if ((prop) && (size >= sizeof(u32) * 2)) { pdata.xvirt = prop[0]; pdata.yvirt = prop[1]; } @@ -482,7 +482,7 @@ static int xilinxfb_of_remove(struct platform_device *op) } /* Match table for of_platform binding */ -static struct of_device_id xilinxfb_of_match[] = { +static const struct of_device_id xilinxfb_of_match[] = { { .compatible = "xlnx,xps-tft-1.00.a", }, { .compatible = "xlnx,xps-tft-2.00.a", }, { .compatible = "xlnx,xps-tft-2.01.a", }, diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c index 32b0a7543433..8ce0a99bf17c 100644 --- a/drivers/video/of_display_timing.c +++ b/drivers/video/of_display_timing.c @@ -31,8 +31,7 @@ static int parse_timing_property(const struct device_node *np, const char *name, prop = of_find_property(np, name, &length); if (!prop) { - pr_err("%s: could not find property %s\n", - of_node_full_name(np), name); + pr_err("%pOF: could not find property %s\n", np, name); return -EINVAL; } @@ -44,8 +43,7 @@ static int parse_timing_property(const struct device_node *np, const char *name, } else if (cells == 3) { ret = of_property_read_u32_array(np, name, &result->min, cells); } else { - pr_err("%s: illegal timing specification in %s\n", - of_node_full_name(np), name); + pr_err("%pOF: illegal timing specification in %s\n", np, name); return -EINVAL; } @@ -105,8 +103,7 @@ static int of_parse_display_timing(const struct device_node *np, dt->flags |= DISPLAY_FLAGS_DOUBLECLK; if (ret) { - pr_err("%s: error reading timing properties\n", - of_node_full_name(np)); + pr_err("%pOF: error reading timing properties\n", np); return -EINVAL; } @@ -129,8 +126,7 @@ int of_get_display_timing(const struct device_node *np, const char *name, timing_np = of_get_child_by_name(np, name); if (!timing_np) { - pr_err("%s: could not find node '%s'\n", - of_node_full_name(np), name); + pr_err("%pOF: could not find node '%s'\n", np, name); return -ENOENT; } @@ -154,15 +150,13 @@ struct display_timings *of_get_display_timings(const struct device_node *np) timings_np = of_get_child_by_name(np, "display-timings"); if (!timings_np) { - pr_err("%s: could not find display-timings node\n", - of_node_full_name(np)); + pr_err("%pOF: could not find display-timings node\n", np); return NULL; } disp = kzalloc(sizeof(*disp), GFP_KERNEL); if (!disp) { - pr_err("%s: could not allocate struct disp'\n", - of_node_full_name(np)); + pr_err("%pOF: could not allocate struct disp'\n", np); goto dispfail; } @@ -172,28 +166,25 @@ struct display_timings *of_get_display_timings(const struct device_node *np) entry = of_get_next_child(timings_np, NULL); /* if there is no child, it is useless to go on */ if (!entry) { - pr_err("%s: no timing specifications given\n", - of_node_full_name(np)); + pr_err("%pOF: no timing specifications given\n", np); goto entryfail; } - pr_debug("%s: using %s as default timing\n", - of_node_full_name(np), entry->name); + pr_debug("%pOF: using %s as default timing\n", np, entry->name); native_mode = entry; disp->num_timings = of_get_child_count(timings_np); if (disp->num_timings == 0) { /* should never happen, as entry was already found above */ - pr_err("%s: no timings specified\n", of_node_full_name(np)); + pr_err("%pOF: no timings specified\n", np); goto entryfail; } disp->timings = kzalloc(sizeof(struct display_timing *) * disp->num_timings, GFP_KERNEL); if (!disp->timings) { - pr_err("%s: could not allocate timings array\n", - of_node_full_name(np)); + pr_err("%pOF: could not allocate timings array\n", np); goto entryfail; } @@ -206,8 +197,8 @@ struct display_timings *of_get_display_timings(const struct device_node *np) dt = kzalloc(sizeof(*dt), GFP_KERNEL); if (!dt) { - pr_err("%s: could not allocate display_timing struct\n", - of_node_full_name(np)); + pr_err("%pOF: could not allocate display_timing struct\n", + np); goto timingfail; } @@ -217,8 +208,8 @@ struct display_timings *of_get_display_timings(const struct device_node *np) * to not encourage wrong devicetrees, fail in case of * an error */ - pr_err("%s: error in timing %d\n", - of_node_full_name(np), disp->num_timings + 1); + pr_err("%pOF: error in timing %d\n", + np, disp->num_timings + 1); kfree(dt); goto timingfail; } @@ -236,8 +227,8 @@ struct display_timings *of_get_display_timings(const struct device_node *np) */ of_node_put(native_mode); - pr_debug("%s: got %d timings. Using timing #%d as default\n", - of_node_full_name(np), disp->num_timings, + pr_debug("%pOF: got %d timings. Using timing #%d as default\n", + np, disp->num_timings, disp->native_mode + 1); return disp; diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c index b5102aa6090d..9b5f9de88fec 100644 --- a/drivers/video/of_videomode.c +++ b/drivers/video/of_videomode.c @@ -36,7 +36,7 @@ int of_get_videomode(struct device_node *np, struct videomode *vm, disp = of_get_display_timings(np); if (!disp) { - pr_err("%s: no timings specified\n", of_node_full_name(np)); + pr_err("%pOF: no timings specified\n", np); return -EINVAL; } diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c index 53da001f0838..7dd0da644a7f 100644 --- a/drivers/watchdog/asm9260_wdt.c +++ b/drivers/watchdog/asm9260_wdt.c @@ -82,7 +82,7 @@ static unsigned int asm9260_wdt_gettimeleft(struct watchdog_device *wdd) counter = ioread32(priv->iobase + HW_WDTV); - return DIV_ROUND_CLOSEST(counter, priv->wdt_freq); + return counter / priv->wdt_freq; } static int asm9260_wdt_updatetimeout(struct watchdog_device *wdd) @@ -296,7 +296,7 @@ static int asm9260_wdt_probe(struct platform_device *pdev) if (ret) return ret; - priv->rst = devm_reset_control_get(&pdev->dev, "wdt_rst"); + priv->rst = devm_reset_control_get_exclusive(&pdev->dev, "wdt_rst"); if (IS_ERR(priv->rst)) return PTR_ERR(priv->rst); diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index 1c652582de40..79cc766cd30f 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c @@ -23,9 +23,21 @@ struct aspeed_wdt { u32 ctrl; }; +struct aspeed_wdt_config { + u32 ext_pulse_width_mask; +}; + +static const struct aspeed_wdt_config ast2400_config = { + .ext_pulse_width_mask = 0xff, +}; + +static const struct aspeed_wdt_config ast2500_config = { + .ext_pulse_width_mask = 0xfffff, +}; + static const struct of_device_id aspeed_wdt_of_table[] = { - { .compatible = "aspeed,ast2400-wdt" }, - { .compatible = "aspeed,ast2500-wdt" }, + { .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config }, + { .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config }, { }, }; MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); @@ -36,12 +48,45 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); #define WDT_CTRL 0x0C #define WDT_CTRL_RESET_MODE_SOC (0x00 << 5) #define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5) +#define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5) #define WDT_CTRL_1MHZ_CLK BIT(4) #define WDT_CTRL_WDT_EXT BIT(3) #define WDT_CTRL_WDT_INTR BIT(2) #define WDT_CTRL_RESET_SYSTEM BIT(1) #define WDT_CTRL_ENABLE BIT(0) +/* + * WDT_RESET_WIDTH controls the characteristics of the external pulse (if + * enabled), specifically: + * + * * Pulse duration + * * Drive mode: push-pull vs open-drain + * * Polarity: Active high or active low + * + * Pulse duration configuration is available on both the AST2400 and AST2500, + * though the field changes between SoCs: + * + * AST2400: Bits 7:0 + * AST2500: Bits 19:0 + * + * This difference is captured in struct aspeed_wdt_config. + * + * The AST2500 exposes the drive mode and polarity options, but not in a + * regular fashion. For read purposes, bit 31 represents active high or low, + * and bit 30 represents push-pull or open-drain. With respect to write, magic + * values need to be written to the top byte to change the state of the drive + * mode and polarity bits. Any other value written to the top byte has no + * effect on the state of the drive mode or polarity bits. However, the pulse + * width value must be preserved (as desired) if written. + */ +#define WDT_RESET_WIDTH 0x18 +#define WDT_RESET_WIDTH_ACTIVE_HIGH BIT(31) +#define WDT_ACTIVE_HIGH_MAGIC (0xA5 << 24) +#define WDT_ACTIVE_LOW_MAGIC (0x5A << 24) +#define WDT_RESET_WIDTH_PUSH_PULL BIT(30) +#define WDT_PUSH_PULL_MAGIC (0xA8 << 24) +#define WDT_OPEN_DRAIN_MAGIC (0x8A << 24) + #define WDT_RESTART_MAGIC 0x4755 /* 32 bits at 1MHz, in milliseconds */ @@ -138,8 +183,13 @@ static const struct watchdog_info aspeed_wdt_info = { static int aspeed_wdt_probe(struct platform_device *pdev) { + const struct aspeed_wdt_config *config; + const struct of_device_id *ofdid; struct aspeed_wdt *wdt; struct resource *res; + struct device_node *np; + const char *reset_type; + u32 duration; int ret; wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); @@ -164,20 +214,88 @@ static int aspeed_wdt_probe(struct platform_device *pdev) wdt->wdd.timeout = WDT_DEFAULT_TIMEOUT; watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev); + np = pdev->dev.of_node; + + ofdid = of_match_node(aspeed_wdt_of_table, np); + if (!ofdid) + return -EINVAL; + config = ofdid->data; + + wdt->ctrl = WDT_CTRL_1MHZ_CLK; + /* * Control reset on a per-device basis to ensure the - * host is not affected by a BMC reboot, so only reset - * the SOC and not the full chip + * host is not affected by a BMC reboot */ - wdt->ctrl = WDT_CTRL_RESET_MODE_SOC | - WDT_CTRL_1MHZ_CLK | - WDT_CTRL_RESET_SYSTEM; + ret = of_property_read_string(np, "aspeed,reset-type", &reset_type); + if (ret) { + wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM; + } else { + if (!strcmp(reset_type, "cpu")) + wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU; + else if (!strcmp(reset_type, "soc")) + wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC; + else if (!strcmp(reset_type, "system")) + wdt->ctrl |= WDT_CTRL_RESET_SYSTEM; + else if (strcmp(reset_type, "none")) + return -EINVAL; + } + if (of_property_read_bool(np, "aspeed,external-signal")) + wdt->ctrl |= WDT_CTRL_WDT_EXT; + + writel(wdt->ctrl, wdt->base + WDT_CTRL); if (readl(wdt->base + WDT_CTRL) & WDT_CTRL_ENABLE) { aspeed_wdt_start(&wdt->wdd); set_bit(WDOG_HW_RUNNING, &wdt->wdd.status); } + if (of_device_is_compatible(np, "aspeed,ast2500-wdt")) { + u32 reg = readl(wdt->base + WDT_RESET_WIDTH); + + reg &= config->ext_pulse_width_mask; + if (of_property_read_bool(np, "aspeed,ext-push-pull")) + reg |= WDT_PUSH_PULL_MAGIC; + else + reg |= WDT_OPEN_DRAIN_MAGIC; + + writel(reg, wdt->base + WDT_RESET_WIDTH); + + reg &= config->ext_pulse_width_mask; + if (of_property_read_bool(np, "aspeed,ext-active-high")) + reg |= WDT_ACTIVE_HIGH_MAGIC; + else + reg |= WDT_ACTIVE_LOW_MAGIC; + + writel(reg, wdt->base + WDT_RESET_WIDTH); + } + + if (!of_property_read_u32(np, "aspeed,ext-pulse-duration", &duration)) { + u32 max_duration = config->ext_pulse_width_mask + 1; + + if (duration == 0 || duration > max_duration) { + dev_err(&pdev->dev, "Invalid pulse duration: %uus\n", + duration); + duration = max(1U, min(max_duration, duration)); + dev_info(&pdev->dev, "Pulse duration set to %uus\n", + duration); + } + + /* + * The watchdog is always configured with a 1MHz source, so + * there is no need to scale the microsecond value. However we + * need to offset it - from the datasheet: + * + * "This register decides the asserting duration of wdt_ext and + * wdt_rstarm signal. The default value is 0xFF. It means the + * default asserting duration of wdt_ext and wdt_rstarm is + * 256us." + * + * This implies a value of 0 gives a 1us pulse. + */ + writel(duration - 1, wdt->base + WDT_RESET_WIDTH); + } + ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); if (ret) { dev_err(&pdev->dev, "failed to register\n"); diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c index c1b8e534fb55..f88f546e8050 100644 --- a/drivers/watchdog/bcm7038_wdt.c +++ b/drivers/watchdog/bcm7038_wdt.c @@ -136,7 +136,9 @@ static int bcm7038_wdt_probe(struct platform_device *pdev) wdt->clk = devm_clk_get(dev, NULL); /* If unable to get clock, use default frequency */ if (!IS_ERR(wdt->clk)) { - clk_prepare_enable(wdt->clk); + err = clk_prepare_enable(wdt->clk); + if (err) + return err; wdt->rate = clk_get_rate(wdt->clk); /* Prevent divide-by-zero exception */ if (!wdt->rate) diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c index 05c000081e9d..064cf7b6c1c5 100644 --- a/drivers/watchdog/cadence_wdt.c +++ b/drivers/watchdog/cadence_wdt.c @@ -52,12 +52,12 @@ static int wdt_timeout; static int nowayout = WATCHDOG_NOWAYOUT; -module_param(wdt_timeout, int, 0); +module_param(wdt_timeout, int, 0644); MODULE_PARM_DESC(wdt_timeout, "Watchdog time in seconds. (default=" __MODULE_STRING(CDNS_WDT_DEFAULT_TIMEOUT) ")"); -module_param(nowayout, int, 0); +module_param(nowayout, int, 0644); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); @@ -368,7 +368,7 @@ static int cdns_wdt_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, wdt); - dev_dbg(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n", + dev_info(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n", wdt->regs, cdns_wdt_device->timeout, nowayout ? ", nowayout" : ""); diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c index 38dd60f0cfcc..4410337f4f7f 100644 --- a/drivers/watchdog/coh901327_wdt.c +++ b/drivers/watchdog/coh901327_wdt.c @@ -218,7 +218,7 @@ static const struct watchdog_info coh901327_ident = { .identity = DRV_NAME, }; -static struct watchdog_ops coh901327_ops = { +static const struct watchdog_ops coh901327_ops = { .owner = THIS_MODULE, .start = coh901327_start, .stop = coh901327_stop, diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c index 4691c5509129..2a20fc163ed0 100644 --- a/drivers/watchdog/da9063_wdt.c +++ b/drivers/watchdog/da9063_wdt.c @@ -36,11 +36,6 @@ static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 }; #define DA9063_WDG_TIMEOUT wdt_timeout[3] #define DA9063_RESET_PROTECTION_MS 256 -struct da9063_watchdog { - struct da9063 *da9063; - struct watchdog_device wdtdev; -}; - static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs) { unsigned int i; @@ -61,14 +56,14 @@ static int _da9063_wdt_set_timeout(struct da9063 *da9063, unsigned int regval) static int da9063_wdt_start(struct watchdog_device *wdd) { - struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd); + struct da9063 *da9063 = watchdog_get_drvdata(wdd); unsigned int selector; int ret; - selector = da9063_wdt_timeout_to_sel(wdt->wdtdev.timeout); - ret = _da9063_wdt_set_timeout(wdt->da9063, selector); + selector = da9063_wdt_timeout_to_sel(wdd->timeout); + ret = _da9063_wdt_set_timeout(da9063, selector); if (ret) - dev_err(wdt->da9063->dev, "Watchdog failed to start (err = %d)\n", + dev_err(da9063->dev, "Watchdog failed to start (err = %d)\n", ret); return ret; @@ -76,13 +71,13 @@ static int da9063_wdt_start(struct watchdog_device *wdd) static int da9063_wdt_stop(struct watchdog_device *wdd) { - struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd); + struct da9063 *da9063 = watchdog_get_drvdata(wdd); int ret; - ret = regmap_update_bits(wdt->da9063->regmap, DA9063_REG_CONTROL_D, + ret = regmap_update_bits(da9063->regmap, DA9063_REG_CONTROL_D, DA9063_TWDSCALE_MASK, DA9063_TWDSCALE_DISABLE); if (ret) - dev_alert(wdt->da9063->dev, "Watchdog failed to stop (err = %d)\n", + dev_alert(da9063->dev, "Watchdog failed to stop (err = %d)\n", ret); return ret; @@ -90,13 +85,13 @@ static int da9063_wdt_stop(struct watchdog_device *wdd) static int da9063_wdt_ping(struct watchdog_device *wdd) { - struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd); + struct da9063 *da9063 = watchdog_get_drvdata(wdd); int ret; - ret = regmap_write(wdt->da9063->regmap, DA9063_REG_CONTROL_F, + ret = regmap_write(da9063->regmap, DA9063_REG_CONTROL_F, DA9063_WATCHDOG); if (ret) - dev_alert(wdt->da9063->dev, "Failed to ping the watchdog (err = %d)\n", + dev_alert(da9063->dev, "Failed to ping the watchdog (err = %d)\n", ret); return ret; @@ -105,14 +100,14 @@ static int da9063_wdt_ping(struct watchdog_device *wdd) static int da9063_wdt_set_timeout(struct watchdog_device *wdd, unsigned int timeout) { - struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd); + struct da9063 *da9063 = watchdog_get_drvdata(wdd); unsigned int selector; int ret; selector = da9063_wdt_timeout_to_sel(timeout); - ret = _da9063_wdt_set_timeout(wdt->da9063, selector); + ret = _da9063_wdt_set_timeout(da9063, selector); if (ret) - dev_err(wdt->da9063->dev, "Failed to set watchdog timeout (err = %d)\n", + dev_err(da9063->dev, "Failed to set watchdog timeout (err = %d)\n", ret); else wdd->timeout = wdt_timeout[selector]; @@ -123,13 +118,13 @@ static int da9063_wdt_set_timeout(struct watchdog_device *wdd, static int da9063_wdt_restart(struct watchdog_device *wdd, unsigned long action, void *data) { - struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd); + struct da9063 *da9063 = watchdog_get_drvdata(wdd); int ret; - ret = regmap_write(wdt->da9063->regmap, DA9063_REG_CONTROL_F, + ret = regmap_write(da9063->regmap, DA9063_REG_CONTROL_F, DA9063_SHUTDOWN); if (ret) - dev_alert(wdt->da9063->dev, "Failed to shutdown (err = %d)\n", + dev_alert(da9063->dev, "Failed to shutdown (err = %d)\n", ret); return ret; @@ -152,7 +147,7 @@ static const struct watchdog_ops da9063_watchdog_ops = { static int da9063_wdt_probe(struct platform_device *pdev) { struct da9063 *da9063; - struct da9063_watchdog *wdt; + struct watchdog_device *wdd; if (!pdev->dev.parent) return -EINVAL; @@ -161,27 +156,25 @@ static int da9063_wdt_probe(struct platform_device *pdev) if (!da9063) return -EINVAL; - wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); - if (!wdt) + wdd = devm_kzalloc(&pdev->dev, sizeof(*wdd), GFP_KERNEL); + if (!wdd) return -ENOMEM; - wdt->da9063 = da9063; - - wdt->wdtdev.info = &da9063_watchdog_info; - wdt->wdtdev.ops = &da9063_watchdog_ops; - wdt->wdtdev.min_timeout = DA9063_WDT_MIN_TIMEOUT; - wdt->wdtdev.max_timeout = DA9063_WDT_MAX_TIMEOUT; - wdt->wdtdev.min_hw_heartbeat_ms = DA9063_RESET_PROTECTION_MS; - wdt->wdtdev.timeout = DA9063_WDG_TIMEOUT; - wdt->wdtdev.parent = &pdev->dev; + wdd->info = &da9063_watchdog_info; + wdd->ops = &da9063_watchdog_ops; + wdd->min_timeout = DA9063_WDT_MIN_TIMEOUT; + wdd->max_timeout = DA9063_WDT_MAX_TIMEOUT; + wdd->min_hw_heartbeat_ms = DA9063_RESET_PROTECTION_MS; + wdd->timeout = DA9063_WDG_TIMEOUT; + wdd->parent = &pdev->dev; - wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS; + wdd->status = WATCHDOG_NOWAYOUT_INIT_STATUS; - watchdog_set_restart_priority(&wdt->wdtdev, 128); + watchdog_set_restart_priority(wdd, 128); - watchdog_set_drvdata(&wdt->wdtdev, wdt); + watchdog_set_drvdata(wdd, da9063); - return devm_watchdog_register_device(&pdev->dev, &wdt->wdtdev); + return devm_watchdog_register_device(&pdev->dev, wdd); } static struct platform_driver da9063_wdt_driver = { diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c index 6f591084bb7a..806a04a676b7 100644 --- a/drivers/watchdog/diag288_wdt.c +++ b/drivers/watchdog/diag288_wdt.c @@ -213,7 +213,7 @@ static const struct watchdog_ops wdt_ops = { .set_timeout = wdt_set_timeout, }; -static struct watchdog_info wdt_info = { +static const struct watchdog_info wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = "z Watchdog", diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index c4f65873bfa4..347f0389b089 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -306,15 +306,16 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev) iTCO_vendor_pre_keepalive(p->smi_res, wd_dev->timeout); - /* Reset the timeout status bit so that the timer - * needs to count down twice again before rebooting */ - outw(0x0008, TCO1_STS(p)); /* write 1 to clear bit */ - /* Reload the timer by writing to the TCO Timer Counter register */ - if (p->iTCO_version >= 2) + if (p->iTCO_version >= 2) { outw(0x01, TCO_RLD(p)); - else if (p->iTCO_version == 1) + } else if (p->iTCO_version == 1) { + /* Reset the timeout status bit so that the timer + * needs to count down twice again before rebooting */ + outw(0x0008, TCO1_STS(p)); /* write 1 to clear bit */ + outb(0x01, TCO_RLD(p)); + } spin_unlock(&p->io_lock); return 0; @@ -327,8 +328,11 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t) unsigned char val8; unsigned int tmrval; - /* The timer counts down twice before rebooting */ - tmrval = seconds_to_ticks(p, t) / 2; + tmrval = seconds_to_ticks(p, t); + + /* For TCO v1 the timer counts down twice before rebooting */ + if (p->iTCO_version == 1) + tmrval /= 2; /* from the specs: */ /* "Values of 0h-3h are ignored and should not be attempted" */ @@ -381,8 +385,6 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev) spin_lock(&p->io_lock); val16 = inw(TCO_RLD(p)); val16 &= 0x3ff; - if (!(inw(TCO1_STS(p)) & 0x0008)) - val16 += (inw(TCOv2_TMR(p)) & 0x3ff); spin_unlock(&p->io_lock); time_left = ticks_to_seconds(p, val16); diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c index dd1e7eaef50f..e96faea24925 100644 --- a/drivers/watchdog/it87_wdt.c +++ b/drivers/watchdog/it87_wdt.c @@ -253,7 +253,7 @@ static const struct watchdog_info ident = { .identity = WATCHDOG_NAME, }; -static struct watchdog_ops wdt_ops = { +static const struct watchdog_ops wdt_ops = { .owner = THIS_MODULE, .start = wdt_start, .stop = wdt_stop, diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c index e0823677d8c1..7f43cefa0eae 100644 --- a/drivers/watchdog/lantiq_wdt.c +++ b/drivers/watchdog/lantiq_wdt.c @@ -4,6 +4,7 @@ * by the Free Software Foundation. * * Copyright (C) 2010 John Crispin <john@phrozen.org> + * Copyright (C) 2017 Hauke Mehrtens <hauke@hauke-m.de> * Based on EP93xx wdt driver */ @@ -17,9 +18,20 @@ #include <linux/uaccess.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> #include <lantiq_soc.h> +#define LTQ_XRX_RCU_RST_STAT 0x0014 +#define LTQ_XRX_RCU_RST_STAT_WDT BIT(31) + +/* CPU0 Reset Source Register */ +#define LTQ_FALCON_SYS1_CPU0RS 0x0060 +/* reset cause mask */ +#define LTQ_FALCON_SYS1_CPU0RS_MASK 0x0007 +#define LTQ_FALCON_SYS1_CPU0RS_WDT 0x02 + /* * Section 3.4 of the datasheet * The password sequence protects the WDT control register from unintended @@ -186,16 +198,70 @@ static struct miscdevice ltq_wdt_miscdev = { .fops = <q_wdt_fops, }; +typedef int (*ltq_wdt_bootstatus_set)(struct platform_device *pdev); + +static int ltq_wdt_bootstatus_xrx(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regmap *rcu_regmap; + u32 val; + int err; + + rcu_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap"); + if (IS_ERR(rcu_regmap)) + return PTR_ERR(rcu_regmap); + + err = regmap_read(rcu_regmap, LTQ_XRX_RCU_RST_STAT, &val); + if (err) + return err; + + if (val & LTQ_XRX_RCU_RST_STAT_WDT) + ltq_wdt_bootstatus = WDIOF_CARDRESET; + + return 0; +} + +static int ltq_wdt_bootstatus_falcon(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regmap *rcu_regmap; + u32 val; + int err; + + rcu_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, + "lantiq,rcu"); + if (IS_ERR(rcu_regmap)) + return PTR_ERR(rcu_regmap); + + err = regmap_read(rcu_regmap, LTQ_FALCON_SYS1_CPU0RS, &val); + if (err) + return err; + + if ((val & LTQ_FALCON_SYS1_CPU0RS_MASK) == LTQ_FALCON_SYS1_CPU0RS_WDT) + ltq_wdt_bootstatus = WDIOF_CARDRESET; + + return 0; +} + static int ltq_wdt_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct clk *clk; + ltq_wdt_bootstatus_set ltq_wdt_bootstatus_set; + int ret; ltq_wdt_membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ltq_wdt_membase)) return PTR_ERR(ltq_wdt_membase); + ltq_wdt_bootstatus_set = of_device_get_match_data(&pdev->dev); + if (ltq_wdt_bootstatus_set) { + ret = ltq_wdt_bootstatus_set(pdev); + if (ret) + return ret; + } + /* we do not need to enable the clock as it is always running */ clk = clk_get_io(); if (IS_ERR(clk)) { @@ -205,10 +271,6 @@ ltq_wdt_probe(struct platform_device *pdev) ltq_io_region_clk_rate = clk_get_rate(clk); clk_put(clk); - /* find out if the watchdog caused the last reboot */ - if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST) - ltq_wdt_bootstatus = WDIOF_CARDRESET; - dev_info(&pdev->dev, "Init done\n"); return misc_register(<q_wdt_miscdev); } @@ -222,7 +284,9 @@ ltq_wdt_remove(struct platform_device *pdev) } static const struct of_device_id ltq_wdt_match[] = { - { .compatible = "lantiq,wdt" }, + { .compatible = "lantiq,wdt", .data = NULL}, + { .compatible = "lantiq,xrx100-wdt", .data = ltq_wdt_bootstatus_xrx }, + { .compatible = "lantiq,falcon-wdt", .data = ltq_wdt_bootstatus_falcon }, {}, }; MODULE_DEVICE_TABLE(of, ltq_wdt_match); diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c index 68c41fa2be27..2c9f53eaff4f 100644 --- a/drivers/watchdog/max77620_wdt.c +++ b/drivers/watchdog/max77620_wdt.c @@ -201,7 +201,7 @@ static int max77620_wdt_remove(struct platform_device *pdev) return 0; } -static struct platform_device_id max77620_wdt_devtype[] = { +static const struct platform_device_id max77620_wdt_devtype[] = { { .name = "max77620-watchdog", }, { }, }; diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c index b29c6fde7473..ea60b29494fb 100644 --- a/drivers/watchdog/mei_wdt.c +++ b/drivers/watchdog/mei_wdt.c @@ -670,7 +670,7 @@ static int mei_wdt_remove(struct mei_cl_device *cldev) #define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \ 0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB) -static struct mei_cl_device_id mei_wdt_tbl[] = { +static const struct mei_cl_device_id mei_wdt_tbl[] = { { .uuid = MEI_UUID_WD, .version = MEI_CL_VERSION_ANY }, /* required last entry */ { } diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c index 491b9bf13d84..304274c67735 100644 --- a/drivers/watchdog/meson_wdt.c +++ b/drivers/watchdog/meson_wdt.c @@ -155,7 +155,9 @@ static const struct watchdog_ops meson_wdt_ops = { static const struct of_device_id meson_wdt_dt_ids[] = { { .compatible = "amlogic,meson6-wdt", .data = &meson6_wdt_data }, + { .compatible = "amlogic,meson8-wdt", .data = &meson6_wdt_data }, { .compatible = "amlogic,meson8b-wdt", .data = &meson8b_wdt_data }, + { .compatible = "amlogic,meson8m2-wdt", .data = &meson8b_wdt_data }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, meson_wdt_dt_ids); diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c index 48a06067075d..db38f8017218 100644 --- a/drivers/watchdog/mt7621_wdt.c +++ b/drivers/watchdog/mt7621_wdt.c @@ -105,7 +105,7 @@ static int mt7621_wdt_bootcause(void) return 0; } -static struct watchdog_info mt7621_wdt_info = { +static const struct watchdog_info mt7621_wdt_info = { .identity = "Mediatek Watchdog", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; @@ -135,7 +135,7 @@ static int mt7621_wdt_probe(struct platform_device *pdev) if (IS_ERR(mt7621_wdt_base)) return PTR_ERR(mt7621_wdt_base); - mt7621_wdt_reset = devm_reset_control_get(&pdev->dev, NULL); + mt7621_wdt_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (!IS_ERR(mt7621_wdt_reset)) reset_control_deassert(mt7621_wdt_reset); diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c index b5cdceb36cff..0ec419a3f7ed 100644 --- a/drivers/watchdog/octeon-wdt-main.c +++ b/drivers/watchdog/octeon-wdt-main.c @@ -1,7 +1,7 @@ /* * Octeon Watchdog driver * - * Copyright (C) 2007, 2008, 2009, 2010 Cavium Networks + * Copyright (C) 2007-2017 Cavium, Inc. * * Converted to use WATCHDOG_CORE by Aaro Koskinen <aaro.koskinen@iki.fi>. * @@ -59,20 +59,23 @@ #include <linux/interrupt.h> #include <linux/watchdog.h> #include <linux/cpumask.h> -#include <linux/bitops.h> -#include <linux/kernel.h> #include <linux/module.h> -#include <linux/string.h> #include <linux/delay.h> #include <linux/cpu.h> -#include <linux/smp.h> -#include <linux/fs.h> #include <linux/irq.h> #include <asm/mipsregs.h> #include <asm/uasm.h> #include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-boot-vector.h> +#include <asm/octeon/cvmx-ciu2-defs.h> +#include <asm/octeon/cvmx-rst-defs.h> + +/* Watchdog interrupt major block number (8 MSBs of intsn) */ +#define WD_BLOCK_NUMBER 0x01 + +static int divisor; /* The count needed to achieve timeout_sec. */ static unsigned int timeout_cnt; @@ -84,7 +87,7 @@ static unsigned int max_timeout_sec; static unsigned int timeout_sec; /* Set to non-zero when userspace countdown mode active */ -static int do_coundown; +static bool do_countdown; static unsigned int countdown_reset; static unsigned int per_cpu_countdown[NR_CPUS]; @@ -92,152 +95,38 @@ static cpumask_t irq_enabled_cpus; #define WD_TIMO 60 /* Default heartbeat = 60 seconds */ +#define CVMX_GSERX_SCRATCH(offset) (CVMX_ADD_IO_SEG(0x0001180090000020ull) + ((offset) & 15) * 0x1000000ull) + static int heartbeat = WD_TIMO; -module_param(heartbeat, int, S_IRUGO); +module_param(heartbeat, int, 0444); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (0 < heartbeat, default=" __MODULE_STRING(WD_TIMO) ")"); static bool nowayout = WATCHDOG_NOWAYOUT; -module_param(nowayout, bool, S_IRUGO); +module_param(nowayout, bool, 0444); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -static u32 nmi_stage1_insns[64] __initdata; -/* We need one branch and therefore one relocation per target label. */ -static struct uasm_label labels[5] __initdata; -static struct uasm_reloc relocs[5] __initdata; - -enum lable_id { - label_enter_bootloader = 1 -}; +static int disable; +module_param(disable, int, 0444); +MODULE_PARM_DESC(disable, + "Disable the watchdog entirely (default=0)"); -/* Some CP0 registers */ -#define K0 26 -#define C0_CVMMEMCTL 11, 7 -#define C0_STATUS 12, 0 -#define C0_EBASE 15, 1 -#define C0_DESAVE 31, 0 +static struct cvmx_boot_vector_element *octeon_wdt_bootvector; void octeon_wdt_nmi_stage2(void); -static void __init octeon_wdt_build_stage1(void) -{ - int i; - int len; - u32 *p = nmi_stage1_insns; -#ifdef CONFIG_HOTPLUG_CPU - struct uasm_label *l = labels; - struct uasm_reloc *r = relocs; -#endif - - /* - * For the next few instructions running the debugger may - * cause corruption of k0 in the saved registers. Since we're - * about to crash, nobody probably cares. - * - * Save K0 into the debug scratch register - */ - uasm_i_dmtc0(&p, K0, C0_DESAVE); - - uasm_i_mfc0(&p, K0, C0_STATUS); -#ifdef CONFIG_HOTPLUG_CPU - if (octeon_bootloader_entry_addr) - uasm_il_bbit0(&p, &r, K0, ilog2(ST0_NMI), - label_enter_bootloader); -#endif - /* Force 64-bit addressing enabled */ - uasm_i_ori(&p, K0, K0, ST0_UX | ST0_SX | ST0_KX); - uasm_i_mtc0(&p, K0, C0_STATUS); - -#ifdef CONFIG_HOTPLUG_CPU - if (octeon_bootloader_entry_addr) { - uasm_i_mfc0(&p, K0, C0_EBASE); - /* Coreid number in K0 */ - uasm_i_andi(&p, K0, K0, 0xf); - /* 8 * coreid in bits 16-31 */ - uasm_i_dsll_safe(&p, K0, K0, 3 + 16); - uasm_i_ori(&p, K0, K0, 0x8001); - uasm_i_dsll_safe(&p, K0, K0, 16); - uasm_i_ori(&p, K0, K0, 0x0700); - uasm_i_drotr_safe(&p, K0, K0, 32); - /* - * Should result in: 0x8001,0700,0000,8*coreid which is - * CVMX_CIU_WDOGX(coreid) - 0x0500 - * - * Now ld K0, CVMX_CIU_WDOGX(coreid) - */ - uasm_i_ld(&p, K0, 0x500, K0); - /* - * If bit one set handle the NMI as a watchdog event. - * otherwise transfer control to bootloader. - */ - uasm_il_bbit0(&p, &r, K0, 1, label_enter_bootloader); - uasm_i_nop(&p); - } -#endif - - /* Clear Dcache so cvmseg works right. */ - uasm_i_cache(&p, 1, 0, 0); - - /* Use K0 to do a read/modify/write of CVMMEMCTL */ - uasm_i_dmfc0(&p, K0, C0_CVMMEMCTL); - /* Clear out the size of CVMSEG */ - uasm_i_dins(&p, K0, 0, 0, 6); - /* Set CVMSEG to its largest value */ - uasm_i_ori(&p, K0, K0, 0x1c0 | 54); - /* Store the CVMMEMCTL value */ - uasm_i_dmtc0(&p, K0, C0_CVMMEMCTL); - - /* Load the address of the second stage handler */ - UASM_i_LA(&p, K0, (long)octeon_wdt_nmi_stage2); - uasm_i_jr(&p, K0); - uasm_i_dmfc0(&p, K0, C0_DESAVE); - -#ifdef CONFIG_HOTPLUG_CPU - if (octeon_bootloader_entry_addr) { - uasm_build_label(&l, p, label_enter_bootloader); - /* Jump to the bootloader and restore K0 */ - UASM_i_LA(&p, K0, (long)octeon_bootloader_entry_addr); - uasm_i_jr(&p, K0); - uasm_i_dmfc0(&p, K0, C0_DESAVE); - } -#endif - uasm_resolve_relocs(relocs, labels); - - len = (int)(p - nmi_stage1_insns); - pr_debug("Synthesized NMI stage 1 handler (%d instructions)\n", len); - - pr_debug("\t.set push\n"); - pr_debug("\t.set noreorder\n"); - for (i = 0; i < len; i++) - pr_debug("\t.word 0x%08x\n", nmi_stage1_insns[i]); - pr_debug("\t.set pop\n"); - - if (len > 32) - panic("NMI stage 1 handler exceeds 32 instructions, was %d\n", - len); -} - static int cpu2core(int cpu) { #ifdef CONFIG_SMP - return cpu_logical_map(cpu); + return cpu_logical_map(cpu) & 0x3f; #else return cvmx_get_core_num(); #endif } -static int core2cpu(int coreid) -{ -#ifdef CONFIG_SMP - return cpu_number_map(coreid); -#else - return 0; -#endif -} - /** * Poke the watchdog when an interrupt is received * @@ -248,13 +137,14 @@ static int core2cpu(int coreid) */ static irqreturn_t octeon_wdt_poke_irq(int cpl, void *dev_id) { - unsigned int core = cvmx_get_core_num(); - int cpu = core2cpu(core); + int cpu = raw_smp_processor_id(); + unsigned int core = cpu2core(cpu); + int node = cpu_to_node(cpu); - if (do_coundown) { + if (do_countdown) { if (per_cpu_countdown[cpu] > 0) { /* We're alive, poke the watchdog */ - cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1); + cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(core), 1); per_cpu_countdown[cpu]--; } else { /* Bad news, you are about to reboot. */ @@ -263,7 +153,7 @@ static irqreturn_t octeon_wdt_poke_irq(int cpl, void *dev_id) } } else { /* Not open, just ping away... */ - cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1); + cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(core), 1); } return IRQ_HANDLED; } @@ -338,10 +228,10 @@ void octeon_wdt_nmi_stage3(u64 reg[32]) u64 cp0_epc = read_c0_epc(); /* Delay so output from all cores output is not jumbled together. */ - __delay(100000000ull * coreid); + udelay(85000 * coreid); octeon_wdt_write_string("\r\n*** NMI Watchdog interrupt on Core 0x"); - octeon_wdt_write_hex(coreid, 1); + octeon_wdt_write_hex(coreid, 2); octeon_wdt_write_string(" ***\r\n"); for (i = 0; i < 32; i++) { octeon_wdt_write_string("\t"); @@ -364,33 +254,98 @@ void octeon_wdt_nmi_stage3(u64 reg[32]) octeon_wdt_write_hex(cp0_cause, 16); octeon_wdt_write_string("\r\n"); - octeon_wdt_write_string("\tsum0\t0x"); - octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU_INTX_SUM0(coreid * 2)), 16); - octeon_wdt_write_string("\ten0\t0x"); - octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)), 16); - octeon_wdt_write_string("\r\n"); + /* The CIU register is different for each Octeon model. */ + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { + octeon_wdt_write_string("\tsrc_wd\t0x"); + octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU2_SRC_PPX_IP2_WDOG(coreid)), 16); + octeon_wdt_write_string("\ten_wd\t0x"); + octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU2_EN_PPX_IP2_WDOG(coreid)), 16); + octeon_wdt_write_string("\r\n"); + octeon_wdt_write_string("\tsrc_rml\t0x"); + octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU2_SRC_PPX_IP2_RML(coreid)), 16); + octeon_wdt_write_string("\ten_rml\t0x"); + octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU2_EN_PPX_IP2_RML(coreid)), 16); + octeon_wdt_write_string("\r\n"); + octeon_wdt_write_string("\tsum\t0x"); + octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)), 16); + octeon_wdt_write_string("\r\n"); + } else if (!octeon_has_feature(OCTEON_FEATURE_CIU3)) { + octeon_wdt_write_string("\tsum0\t0x"); + octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU_INTX_SUM0(coreid * 2)), 16); + octeon_wdt_write_string("\ten0\t0x"); + octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)), 16); + octeon_wdt_write_string("\r\n"); + } octeon_wdt_write_string("*** Chip soft reset soon ***\r\n"); + + /* + * G-30204: We must trigger a soft reset before watchdog + * does an incomplete job of doing it. + */ + if (OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX)) { + u64 scr; + unsigned int node = cvmx_get_node_num(); + unsigned int lcore = cvmx_get_local_core_num(); + union cvmx_ciu_wdogx ciu_wdog; + + /* + * Wait for other cores to print out information, but + * not too long. Do the soft reset before watchdog + * can trigger it. + */ + do { + ciu_wdog.u64 = cvmx_read_csr_node(node, CVMX_CIU_WDOGX(lcore)); + } while (ciu_wdog.s.cnt > 0x10000); + + scr = cvmx_read_csr_node(0, CVMX_GSERX_SCRATCH(0)); + scr |= 1 << 11; /* Indicate watchdog in bit 11 */ + cvmx_write_csr_node(0, CVMX_GSERX_SCRATCH(0), scr); + cvmx_write_csr_node(0, CVMX_RST_SOFT_RST, 1); + } +} + +static int octeon_wdt_cpu_to_irq(int cpu) +{ + unsigned int coreid; + int node; + int irq; + + coreid = cpu2core(cpu); + node = cpu_to_node(cpu); + + if (octeon_has_feature(OCTEON_FEATURE_CIU3)) { + struct irq_domain *domain; + int hwirq; + + domain = octeon_irq_get_block_domain(node, + WD_BLOCK_NUMBER); + hwirq = WD_BLOCK_NUMBER << 12 | 0x200 | coreid; + irq = irq_find_mapping(domain, hwirq); + } else { + irq = OCTEON_IRQ_WDOG0 + coreid; + } + return irq; } static int octeon_wdt_cpu_pre_down(unsigned int cpu) { unsigned int core; - unsigned int irq; + int node; union cvmx_ciu_wdogx ciu_wdog; core = cpu2core(cpu); - irq = OCTEON_IRQ_WDOG0 + core; + node = cpu_to_node(cpu); /* Poke the watchdog to clear out its state */ - cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1); + cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(core), 1); /* Disable the hardware. */ ciu_wdog.u64 = 0; - cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64); + cvmx_write_csr_node(node, CVMX_CIU_WDOGX(core), ciu_wdog.u64); - free_irq(irq, octeon_wdt_poke_irq); + free_irq(octeon_wdt_cpu_to_irq(cpu), octeon_wdt_poke_irq); return 0; } @@ -399,31 +354,56 @@ static int octeon_wdt_cpu_online(unsigned int cpu) unsigned int core; unsigned int irq; union cvmx_ciu_wdogx ciu_wdog; + int node; + struct irq_domain *domain; + int hwirq; core = cpu2core(cpu); + node = cpu_to_node(cpu); + + octeon_wdt_bootvector[core].target_ptr = (u64)octeon_wdt_nmi_stage2; /* Disable it before doing anything with the interrupts. */ ciu_wdog.u64 = 0; - cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64); + cvmx_write_csr_node(node, CVMX_CIU_WDOGX(core), ciu_wdog.u64); per_cpu_countdown[cpu] = countdown_reset; - irq = OCTEON_IRQ_WDOG0 + core; + if (octeon_has_feature(OCTEON_FEATURE_CIU3)) { + /* Must get the domain for the watchdog block */ + domain = octeon_irq_get_block_domain(node, WD_BLOCK_NUMBER); + + /* Get a irq for the wd intsn (hardware interrupt) */ + hwirq = WD_BLOCK_NUMBER << 12 | 0x200 | core; + irq = irq_create_mapping(domain, hwirq); + irqd_set_trigger_type(irq_get_irq_data(irq), + IRQ_TYPE_EDGE_RISING); + } else + irq = OCTEON_IRQ_WDOG0 + core; if (request_irq(irq, octeon_wdt_poke_irq, IRQF_NO_THREAD, "octeon_wdt", octeon_wdt_poke_irq)) panic("octeon_wdt: Couldn't obtain irq %d", irq); + /* Must set the irq affinity here */ + if (octeon_has_feature(OCTEON_FEATURE_CIU3)) { + cpumask_t mask; + + cpumask_clear(&mask); + cpumask_set_cpu(cpu, &mask); + irq_set_affinity(irq, &mask); + } + cpumask_set_cpu(cpu, &irq_enabled_cpus); /* Poke the watchdog to clear out its state */ - cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1); + cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(core), 1); /* Finally enable the watchdog now that all handlers are installed */ ciu_wdog.u64 = 0; ciu_wdog.s.len = timeout_cnt; ciu_wdog.s.mode = 3; /* 3 = Interrupt + NMI + Soft-Reset */ - cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64); + cvmx_write_csr_node(node, CVMX_CIU_WDOGX(core), ciu_wdog.u64); return 0; } @@ -432,17 +412,20 @@ static int octeon_wdt_ping(struct watchdog_device __always_unused *wdog) { int cpu; int coreid; + int node; + + if (disable) + return 0; for_each_online_cpu(cpu) { coreid = cpu2core(cpu); - cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1); + node = cpu_to_node(cpu); + cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(coreid), 1); per_cpu_countdown[cpu] = countdown_reset; - if ((countdown_reset || !do_coundown) && + if ((countdown_reset || !do_countdown) && !cpumask_test_cpu(cpu, &irq_enabled_cpus)) { /* We have to enable the irq */ - int irq = OCTEON_IRQ_WDOG0 + coreid; - - enable_irq(irq); + enable_irq(octeon_wdt_cpu_to_irq(cpu)); cpumask_set_cpu(cpu, &irq_enabled_cpus); } } @@ -472,7 +455,7 @@ static void octeon_wdt_calc_parameters(int t) countdown_reset = periods > 2 ? periods - 2 : 0; heartbeat = t; - timeout_cnt = ((octeon_get_io_clock_rate() >> 8) * timeout_sec) >> 8; + timeout_cnt = ((octeon_get_io_clock_rate() / divisor) * timeout_sec) >> 8; } static int octeon_wdt_set_timeout(struct watchdog_device *wdog, @@ -481,20 +464,25 @@ static int octeon_wdt_set_timeout(struct watchdog_device *wdog, int cpu; int coreid; union cvmx_ciu_wdogx ciu_wdog; + int node; if (t <= 0) return -1; octeon_wdt_calc_parameters(t); + if (disable) + return 0; + for_each_online_cpu(cpu) { coreid = cpu2core(cpu); - cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1); + node = cpu_to_node(cpu); + cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(coreid), 1); ciu_wdog.u64 = 0; ciu_wdog.s.len = timeout_cnt; ciu_wdog.s.mode = 3; /* 3 = Interrupt + NMI + Soft-Reset */ - cvmx_write_csr(CVMX_CIU_WDOGX(coreid), ciu_wdog.u64); - cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1); + cvmx_write_csr_node(node, CVMX_CIU_WDOGX(coreid), ciu_wdog.u64); + cvmx_write_csr_node(node, CVMX_CIU_PP_POKEX(coreid), 1); } octeon_wdt_ping(wdog); /* Get the irqs back on. */ return 0; @@ -503,13 +491,13 @@ static int octeon_wdt_set_timeout(struct watchdog_device *wdog, static int octeon_wdt_start(struct watchdog_device *wdog) { octeon_wdt_ping(wdog); - do_coundown = 1; + do_countdown = 1; return 0; } static int octeon_wdt_stop(struct watchdog_device *wdog) { - do_coundown = 0; + do_countdown = 0; octeon_wdt_ping(wdog); return 0; } @@ -540,14 +528,25 @@ static enum cpuhp_state octeon_wdt_online; */ static int __init octeon_wdt_init(void) { - int i; int ret; - u64 *ptr; + + octeon_wdt_bootvector = cvmx_boot_vector_get(); + if (!octeon_wdt_bootvector) { + pr_err("Error: Cannot allocate boot vector.\n"); + return -ENOMEM; + } + + if (OCTEON_IS_MODEL(OCTEON_CN68XX)) + divisor = 0x200; + else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) + divisor = 0x400; + else + divisor = 0x100; /* * Watchdog time expiration length = The 16 bits of LEN * represent the most significant bits of a 24 bit decrementer - * that decrements every 256 cycles. + * that decrements every divisor cycle. * * Try for a timeout of 5 sec, if that fails a smaller number * of even seconds, @@ -555,8 +554,7 @@ static int __init octeon_wdt_init(void) max_timeout_sec = 6; do { max_timeout_sec--; - timeout_cnt = ((octeon_get_io_clock_rate() >> 8) * - max_timeout_sec) >> 8; + timeout_cnt = ((octeon_get_io_clock_rate() / divisor) * max_timeout_sec) >> 8; } while (timeout_cnt > 65535); BUG_ON(timeout_cnt == 0); @@ -576,16 +574,10 @@ static int __init octeon_wdt_init(void) return ret; } - /* Build the NMI handler ... */ - octeon_wdt_build_stage1(); - - /* ... and install it. */ - ptr = (u64 *) nmi_stage1_insns; - for (i = 0; i < 16; i++) { - cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8); - cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, ptr[i]); + if (disable) { + pr_notice("disabled\n"); + return 0; } - cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0x81fc0000); cpumask_clear(&irq_enabled_cpus); @@ -607,6 +599,10 @@ err: static void __exit octeon_wdt_cleanup(void) { watchdog_unregister_device(&octeon_wdt); + + if (disable) + return; + cpuhp_remove_state(octeon_wdt_online); /* @@ -617,7 +613,7 @@ static void __exit octeon_wdt_cleanup(void) } MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>"); -MODULE_DESCRIPTION("Cavium Networks Octeon Watchdog driver."); +MODULE_AUTHOR("Cavium Inc. <support@cavium.com>"); +MODULE_DESCRIPTION("Cavium Inc. OCTEON Watchdog driver."); module_init(octeon_wdt_init); module_exit(octeon_wdt_cleanup); diff --git a/drivers/watchdog/octeon-wdt-nmi.S b/drivers/watchdog/octeon-wdt-nmi.S index 8a900a5e3233..97f6eb7b5a8e 100644 --- a/drivers/watchdog/octeon-wdt-nmi.S +++ b/drivers/watchdog/octeon-wdt-nmi.S @@ -3,20 +3,40 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2007 Cavium Networks + * Copyright (C) 2007-2017 Cavium, Inc. */ #include <asm/asm.h> #include <asm/regdef.h> -#define SAVE_REG(r) sd $r, -32768+6912-(32-r)*8($0) +#define CVMSEG_BASE -32768 +#define CVMSEG_SIZE 6912 +#define SAVE_REG(r) sd $r, CVMSEG_BASE + CVMSEG_SIZE - ((32 - r) * 8)($0) NESTED(octeon_wdt_nmi_stage2, 0, sp) .set push .set noreorder .set noat - /* Save all registers to the top CVMSEG. This shouldn't + /* Clear Dcache so cvmseg works right. */ + cache 1,0($0) + /* Use K0 to do a read/modify/write of CVMMEMCTL */ + dmfc0 k0, $11, 7 + /* Clear out the size of CVMSEG */ + dins k0, $0, 0, 6 + /* Set CVMSEG to its largest value */ + ori k0, k0, 0x1c0 | 54 + /* Store the CVMMEMCTL value */ + dmtc0 k0, $11, 7 + /* + * Restore K0 from the debug scratch register, it was saved in + * the boot-vector code. + */ + dmfc0 k0, $31 + + /* + * Save all registers to the top CVMSEG. This shouldn't * corrupt any state used by the kernel. Also all registers - * should have the value right before the NMI. */ + * should have the value right before the NMI. + */ SAVE_REG(0) SAVE_REG(1) SAVE_REG(2) @@ -49,16 +69,22 @@ SAVE_REG(29) SAVE_REG(30) SAVE_REG(31) + /* Write zero to all CVMSEG locations per Core-15169 */ + dli a0, CVMSEG_SIZE - (33 * 8) +1: sd zero, CVMSEG_BASE(a0) + daddiu a0, a0, -8 + bgez a0, 1b + nop /* Set the stack to begin right below the registers */ - li sp, -32768+6912-32*8 + dli sp, CVMSEG_BASE + CVMSEG_SIZE - (32 * 8) /* Load the address of the third stage handler */ - dla a0, octeon_wdt_nmi_stage3 + dla $25, octeon_wdt_nmi_stage3 /* Call the third stage handler */ - jal a0 + jal $25 /* a0 is the address of the saved registers */ move a0, sp /* Loop forvever if we get here. */ -1: b 1b +2: b 2b nop .set pop END(octeon_wdt_nmi_stage2) diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c index fae7fe929ea3..1cf286945b7a 100644 --- a/drivers/watchdog/of_xilinx_wdt.c +++ b/drivers/watchdog/of_xilinx_wdt.c @@ -51,9 +51,16 @@ struct xwdt_device { static int xilinx_wdt_start(struct watchdog_device *wdd) { + int ret; u32 control_status_reg; struct xwdt_device *xdev = watchdog_get_drvdata(wdd); + ret = clk_enable(xdev->clk); + if (ret) { + dev_err(wdd->parent, "Failed to enable clock\n"); + return ret; + } + spin_lock(&xdev->spinlock); /* Clean previous status and enable the watchdog timer */ @@ -85,6 +92,9 @@ static int xilinx_wdt_stop(struct watchdog_device *wdd) iowrite32(0, xdev->base + XWT_TWCSR1_OFFSET); spin_unlock(&xdev->spinlock); + + clk_disable(xdev->clk); + pr_info("Stopped!\n"); return 0; @@ -167,11 +177,6 @@ static int xwdt_probe(struct platform_device *pdev) if (IS_ERR(xdev->base)) return PTR_ERR(xdev->base); - rc = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &pfreq); - if (rc) - dev_warn(&pdev->dev, - "The watchdog clock frequency cannot be obtained\n"); - rc = of_property_read_u32(pdev->dev.of_node, "xlnx,wdt-interval", &xdev->wdt_interval); if (rc) @@ -186,6 +191,26 @@ static int xwdt_probe(struct platform_device *pdev) watchdog_set_nowayout(xilinx_wdt_wdd, enable_once); + xdev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(xdev->clk)) { + if (PTR_ERR(xdev->clk) != -ENOENT) + return PTR_ERR(xdev->clk); + + /* + * Clock framework support is optional, continue on + * anyways if we don't find a matching clock. + */ + xdev->clk = NULL; + + rc = of_property_read_u32(pdev->dev.of_node, "clock-frequency", + &pfreq); + if (rc) + dev_warn(&pdev->dev, + "The watchdog clock freq cannot be obtained\n"); + } else { + pfreq = clk_get_rate(xdev->clk); + } + /* * Twice of the 2^wdt_interval / freq because the first wdt overflow is * ignored (interrupt), reset is only generated at second wdt overflow @@ -197,14 +222,6 @@ static int xwdt_probe(struct platform_device *pdev) spin_lock_init(&xdev->spinlock); watchdog_set_drvdata(xilinx_wdt_wdd, xdev); - xdev->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(xdev->clk)) { - if (PTR_ERR(xdev->clk) == -ENOENT) - xdev->clk = NULL; - else - return PTR_ERR(xdev->clk); - } - rc = clk_prepare_enable(xdev->clk); if (rc) { dev_err(&pdev->dev, "unable to enable clock\n"); @@ -223,6 +240,8 @@ static int xwdt_probe(struct platform_device *pdev) goto err_clk_disable; } + clk_disable(xdev->clk); + dev_info(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds\n", xdev->base, xilinx_wdt_wdd->timeout); @@ -245,6 +264,43 @@ static int xwdt_remove(struct platform_device *pdev) return 0; } +/** + * xwdt_suspend - Suspend the device. + * + * @dev: handle to the device structure. + * Return: 0 always. + */ +static int __maybe_unused xwdt_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct xwdt_device *xdev = platform_get_drvdata(pdev); + + if (watchdog_active(&xdev->xilinx_wdt_wdd)) + xilinx_wdt_stop(&xdev->xilinx_wdt_wdd); + + return 0; +} + +/** + * xwdt_resume - Resume the device. + * + * @dev: handle to the device structure. + * Return: 0 on success, errno otherwise. + */ +static int __maybe_unused xwdt_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct xwdt_device *xdev = platform_get_drvdata(pdev); + int ret = 0; + + if (watchdog_active(&xdev->xilinx_wdt_wdd)) + ret = xilinx_wdt_start(&xdev->xilinx_wdt_wdd); + + return ret; +} + +static SIMPLE_DEV_PM_OPS(xwdt_pm_ops, xwdt_suspend, xwdt_resume); + /* Match table for of_platform binding */ static const struct of_device_id xwdt_of_match[] = { { .compatible = "xlnx,xps-timebase-wdt-1.00.a", }, @@ -259,6 +315,7 @@ static struct platform_driver xwdt_driver = { .driver = { .name = WATCHDOG_NAME, .of_match_table = xwdt_of_match, + .pm = &xwdt_pm_ops, }, }; diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c index 5615f4013924..b9e376c8e2e3 100644 --- a/drivers/watchdog/pcwd_usb.c +++ b/drivers/watchdog/pcwd_usb.c @@ -74,7 +74,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" #define USB_PCWD_PRODUCT_ID 0x1140 /* table of devices that work with this driver */ -static struct usb_device_id usb_pcwd_table[] = { +static const struct usb_device_id usb_pcwd_table[] = { { USB_DEVICE(USB_PCWD_VENDOR_ID, USB_PCWD_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c index 4f47b5e90956..780971318810 100644 --- a/drivers/watchdog/qcom-wdt.c +++ b/drivers/watchdog/qcom-wdt.c @@ -162,6 +162,8 @@ static int qcom_wdt_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENOMEM; /* We use CPU0's DGT for the watchdog */ if (of_property_read_u32(np, "cpu-offset", &percpu_offset)) diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c index cf61c92f7ecd..831ef83f6de1 100644 --- a/drivers/watchdog/renesas_wdt.c +++ b/drivers/watchdog/renesas_wdt.c @@ -1,8 +1,8 @@ /* * Watchdog driver for Renesas WDT watchdog * - * Copyright (C) 2015-16 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com> - * Copyright (C) 2015-16 Renesas Electronics Corporation + * Copyright (C) 2015-17 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com> + * Copyright (C) 2015-17 Renesas Electronics Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by @@ -23,10 +23,22 @@ #define RWTCSRA_WOVF BIT(4) #define RWTCSRA_WRFLG BIT(5) #define RWTCSRA_TME BIT(7) +#define RWTCSRB 8 #define RWDT_DEFAULT_TIMEOUT 60U -static const unsigned int clk_divs[] = { 1, 4, 16, 32, 64, 128, 1024 }; +/* + * In probe, clk_rate is checked to be not more than 16 bit * biggest clock + * divider (12 bits). d is only a factor to fully utilize the WDT counter and + * will not exceed its 16 bits. Thus, no overflow, we stay below 32 bits. + */ +#define MUL_BY_CLKS_PER_SEC(p, d) \ + DIV_ROUND_UP((d) * (p)->clk_rate, clk_divs[(p)->cks]) + +/* d is 16 bit, clk_divs 12 bit -> no 32 bit overflow */ +#define DIV_BY_CLKS_PER_SEC(p, d) ((d) * clk_divs[(p)->cks] / (p)->clk_rate) + +static const unsigned int clk_divs[] = { 1, 4, 16, 32, 64, 128, 1024, 4096 }; static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); @@ -36,8 +48,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" struct rwdt_priv { void __iomem *base; struct watchdog_device wdev; - struct clk *clk; - unsigned int clks_per_sec; + unsigned long clk_rate; u8 cks; }; @@ -55,7 +66,7 @@ static int rwdt_init_timeout(struct watchdog_device *wdev) { struct rwdt_priv *priv = watchdog_get_drvdata(wdev); - rwdt_write(priv, 65536 - wdev->timeout * priv->clks_per_sec, RWTCNT); + rwdt_write(priv, 65536 - MUL_BY_CLKS_PER_SEC(priv, wdev->timeout), RWTCNT); return 0; } @@ -64,8 +75,9 @@ static int rwdt_start(struct watchdog_device *wdev) { struct rwdt_priv *priv = watchdog_get_drvdata(wdev); - clk_prepare_enable(priv->clk); + pm_runtime_get_sync(wdev->parent); + rwdt_write(priv, 0, RWTCSRB); rwdt_write(priv, priv->cks, RWTCSRA); rwdt_init_timeout(wdev); @@ -82,7 +94,7 @@ static int rwdt_stop(struct watchdog_device *wdev) struct rwdt_priv *priv = watchdog_get_drvdata(wdev); rwdt_write(priv, priv->cks, RWTCSRA); - clk_disable_unprepare(priv->clk); + pm_runtime_put(wdev->parent); return 0; } @@ -92,7 +104,7 @@ static unsigned int rwdt_get_timeleft(struct watchdog_device *wdev) struct rwdt_priv *priv = watchdog_get_drvdata(wdev); u16 val = readw_relaxed(priv->base + RWTCNT); - return DIV_ROUND_CLOSEST(65536 - val, priv->clks_per_sec); + return DIV_BY_CLKS_PER_SEC(priv, 65536 - val); } static const struct watchdog_info rwdt_ident = { @@ -112,8 +124,8 @@ static int rwdt_probe(struct platform_device *pdev) { struct rwdt_priv *priv; struct resource *res; - unsigned long rate; - unsigned int clks_per_sec; + struct clk *clk; + unsigned long clks_per_sec; int ret, i; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@ -125,36 +137,40 @@ static int rwdt_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + pm_runtime_enable(&pdev->dev); - rate = clk_get_rate(priv->clk); - if (!rate) - return -ENOENT; + pm_runtime_get_sync(&pdev->dev); + priv->clk_rate = clk_get_rate(clk); + pm_runtime_put(&pdev->dev); + + if (!priv->clk_rate) { + ret = -ENOENT; + goto out_pm_disable; + } for (i = ARRAY_SIZE(clk_divs) - 1; i >= 0; i--) { - clks_per_sec = DIV_ROUND_UP(rate, clk_divs[i]); - if (clks_per_sec) { - priv->clks_per_sec = clks_per_sec; + clks_per_sec = priv->clk_rate / clk_divs[i]; + if (clks_per_sec && clks_per_sec < 65536) { priv->cks = i; break; } } - if (!clks_per_sec) { + if (i < 0) { dev_err(&pdev->dev, "Can't find suitable clock divider\n"); - return -ERANGE; + ret = -ERANGE; + goto out_pm_disable; } - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); - priv->wdev.info = &rwdt_ident, priv->wdev.ops = &rwdt_ops, priv->wdev.parent = &pdev->dev; priv->wdev.min_timeout = 1; - priv->wdev.max_timeout = 65536 / clks_per_sec; + priv->wdev.max_timeout = DIV_BY_CLKS_PER_SEC(priv, 65536); priv->wdev.timeout = min(priv->wdev.max_timeout, RWDT_DEFAULT_TIMEOUT); platform_set_drvdata(pdev, priv); @@ -167,13 +183,14 @@ static int rwdt_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "Specified timeout value invalid, using default\n"); ret = watchdog_register_device(&priv->wdev); - if (ret < 0) { - pm_runtime_put(&pdev->dev); - pm_runtime_disable(&pdev->dev); - return ret; - } + if (ret < 0) + goto out_pm_disable; return 0; + + out_pm_disable: + pm_runtime_disable(&pdev->dev); + return ret; } static int rwdt_remove(struct platform_device *pdev) @@ -181,7 +198,6 @@ static int rwdt_remove(struct platform_device *pdev) struct rwdt_priv *priv = platform_get_drvdata(pdev); watchdog_unregister_device(&priv->wdev); - pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c index 05524baf7dcc..98967f0a7d10 100644 --- a/drivers/watchdog/rt2880_wdt.c +++ b/drivers/watchdog/rt2880_wdt.c @@ -119,7 +119,7 @@ static int rt288x_wdt_bootcause(void) return 0; } -static struct watchdog_info rt288x_wdt_info = { +static const struct watchdog_info rt288x_wdt_info = { .identity = "Ralink Watchdog", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; @@ -152,7 +152,7 @@ static int rt288x_wdt_probe(struct platform_device *pdev) if (IS_ERR(rt288x_wdt_clk)) return PTR_ERR(rt288x_wdt_clk); - rt288x_wdt_reset = devm_reset_control_get(&pdev->dev, NULL); + rt288x_wdt_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (!IS_ERR(rt288x_wdt_reset)) reset_control_deassert(rt288x_wdt_reset); diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c index b34d3d5ba632..8e4e2fc13f87 100644 --- a/drivers/watchdog/sc1200wdt.c +++ b/drivers/watchdog/sc1200wdt.c @@ -342,7 +342,7 @@ static int __init sc1200wdt_probe(void) #if defined CONFIG_PNP -static struct pnp_device_id scl200wdt_pnp_devices[] = { +static const struct pnp_device_id scl200wdt_pnp_devices[] = { /* National Semiconductor PC87307/PC97307 watchdog component */ {.id = "NSC0800", .driver_data = 0}, {.id = ""}, diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index e7a715e82021..03805bc5d67a 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -281,7 +281,7 @@ static int __maybe_unused sp805_wdt_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(sp805_wdt_dev_pm_ops, sp805_wdt_suspend, sp805_wdt_resume); -static struct amba_id sp805_wdt_ids[] = { +static const struct amba_id sp805_wdt_ids[] = { { .id = 0x00141805, .mask = 0x00ffffff, diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c index 6c501b7dba29..be64a8699de3 100644 --- a/drivers/watchdog/stm32_iwdg.c +++ b/drivers/watchdog/stm32_iwdg.c @@ -140,7 +140,7 @@ static const struct watchdog_info stm32_iwdg_info = { .identity = "STM32 Independent Watchdog", }; -static struct watchdog_ops stm32_iwdg_ops = { +static const struct watchdog_ops stm32_iwdg_ops = { .owner = THIS_MODULE, .start = stm32_iwdg_start, .ping = stm32_iwdg_ping, diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c index 17c25daebcce..811e43c39ec4 100644 --- a/drivers/watchdog/ts72xx_wdt.c +++ b/drivers/watchdog/ts72xx_wdt.c @@ -112,7 +112,7 @@ static const struct watchdog_info ts72xx_wdt_ident = { .identity = "TS-72XX WDT", }; -static struct watchdog_ops ts72xx_wdt_ops = { +static const struct watchdog_ops ts72xx_wdt_ops = { .owner = THIS_MODULE, .start = ts72xx_wdt_start, .stop = ts72xx_wdt_stop, diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index d9ba0496713c..7817836bff55 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -429,7 +429,7 @@ static int __init wdt_init(void) { int ret; int chip; - const char * const chip_name[] = { + static const char * const chip_name[] = { "W83627HF", "W83627S", "W83697HF", diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c index b4e0cea5a64e..d3594aa3a374 100644 --- a/drivers/watchdog/ziirave_wdt.c +++ b/drivers/watchdog/ziirave_wdt.c @@ -737,7 +737,7 @@ static int ziirave_wdt_remove(struct i2c_client *client) return 0; } -static struct i2c_device_id ziirave_wdt_id[] = { +static const struct i2c_device_id ziirave_wdt_id[] = { { "rave-wdt", 0 }, { } }; diff --git a/drivers/watchdog/zx2967_wdt.c b/drivers/watchdog/zx2967_wdt.c index 69ec5855584b..9261f7c77f6d 100644 --- a/drivers/watchdog/zx2967_wdt.c +++ b/drivers/watchdog/zx2967_wdt.c @@ -229,7 +229,7 @@ static int zx2967_wdt_probe(struct platform_device *pdev) } clk_set_rate(wdt->clock, ZX2967_WDT_CLK_FREQ); - rstc = devm_reset_control_get(dev, NULL); + rstc = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(rstc)) { dev_err(dev, "failed to get rstc"); ret = PTR_ERR(rstc); diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index 1bf55a32a4b3..3fa40c723e8e 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -294,7 +294,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv, goto out; } - gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_TEMPORARY); + gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_KERNEL); if (!gref_ids) { rc = -ENOMEM; goto out; diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index 5fbfd9cfb6d6..5b3d57fc82d3 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c @@ -169,6 +169,9 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data) static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) { struct pci_bar_info *bar = data; + unsigned int pos = (offset - PCI_BASE_ADDRESS_0) / 4; + const struct resource *res = dev->resource; + u32 mask; if (unlikely(!bar)) { pr_warn(DRV_NAME ": driver data not found for %s\n", @@ -179,7 +182,13 @@ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) /* A write to obtain the length must happen as a 32-bit write. * This does not (yet) support writing individual bytes */ - if (value == ~0) + if (res[pos].flags & IORESOURCE_IO) + mask = ~PCI_BASE_ADDRESS_IO_MASK; + else if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64)) + mask = 0; + else + mask = ~PCI_BASE_ADDRESS_MEM_MASK; + if ((value | mask) == ~0U) bar->which = 1; else { u32 tmpval; diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 82a8866758ee..a1c17000129b 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -519,64 +519,6 @@ static int __xenbus_map_ring(struct xenbus_device *dev, return err; } -static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, - grant_ref_t *gnt_refs, - unsigned int nr_grefs, - void **vaddr) -{ - struct xenbus_map_node *node; - struct vm_struct *area; - pte_t *ptes[XENBUS_MAX_RING_GRANTS]; - phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; - int err = GNTST_okay; - int i; - bool leaked; - - *vaddr = NULL; - - if (nr_grefs > XENBUS_MAX_RING_GRANTS) - return -EINVAL; - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (!node) - return -ENOMEM; - - area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes); - if (!area) { - kfree(node); - return -ENOMEM; - } - - for (i = 0; i < nr_grefs; i++) - phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; - - err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, - phys_addrs, - GNTMAP_host_map | GNTMAP_contains_pte, - &leaked); - if (err) - goto failed; - - node->nr_handles = nr_grefs; - node->pv.area = area; - - spin_lock(&xenbus_valloc_lock); - list_add(&node->next, &xenbus_valloc_pages); - spin_unlock(&xenbus_valloc_lock); - - *vaddr = area->addr; - return 0; - -failed: - if (!leaked) - free_vm_area(area); - else - pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); - - kfree(node); - return err; -} - struct map_ring_valloc_hvm { unsigned int idx; @@ -725,6 +667,65 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); +#ifdef CONFIG_XEN_PV +static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, + grant_ref_t *gnt_refs, + unsigned int nr_grefs, + void **vaddr) +{ + struct xenbus_map_node *node; + struct vm_struct *area; + pte_t *ptes[XENBUS_MAX_RING_GRANTS]; + phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; + int err = GNTST_okay; + int i; + bool leaked; + + *vaddr = NULL; + + if (nr_grefs > XENBUS_MAX_RING_GRANTS) + return -EINVAL; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes); + if (!area) { + kfree(node); + return -ENOMEM; + } + + for (i = 0; i < nr_grefs; i++) + phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; + + err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, + phys_addrs, + GNTMAP_host_map | GNTMAP_contains_pte, + &leaked); + if (err) + goto failed; + + node->nr_handles = nr_grefs; + node->pv.area = area; + + spin_lock(&xenbus_valloc_lock); + list_add(&node->next, &xenbus_valloc_pages); + spin_unlock(&xenbus_valloc_lock); + + *vaddr = area->addr; + return 0; + +failed: + if (!leaked) + free_vm_area(area); + else + pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); + + kfree(node); + return err; +} + static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) { struct xenbus_map_node *node; @@ -788,6 +789,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) return err; } +static const struct xenbus_ring_ops ring_ops_pv = { + .map = xenbus_map_ring_valloc_pv, + .unmap = xenbus_unmap_ring_vfree_pv, +}; +#endif + struct unmap_ring_vfree_hvm { unsigned int idx; @@ -916,11 +923,6 @@ enum xenbus_state xenbus_read_driver_state(const char *path) } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); -static const struct xenbus_ring_ops ring_ops_pv = { - .map = xenbus_map_ring_valloc_pv, - .unmap = xenbus_unmap_ring_vfree_pv, -}; - static const struct xenbus_ring_ops ring_ops_hvm = { .map = xenbus_map_ring_valloc_hvm, .unmap = xenbus_unmap_ring_vfree_hvm, @@ -928,8 +930,10 @@ static const struct xenbus_ring_ops ring_ops_hvm = { void __init xenbus_ring_ops_init(void) { +#ifdef CONFIG_XEN_PV if (!xen_feature(XENFEAT_auto_translated_physmap)) ring_ops = &ring_ops_pv; else +#endif ring_ops = &ring_ops_hvm; } |