diff options
Diffstat (limited to 'drivers')
129 files changed, 5575 insertions, 1109 deletions
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index e72843fe41df..089e5dc7144a 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -227,6 +227,9 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, if (*ppos < 0 || !count) return -EINVAL; + if (count > (PAGE_SIZE << (MAX_ORDER - 1))) + count = PAGE_SIZE << (MAX_ORDER - 1); + buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -371,6 +374,9 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file, if (*ppos < 0 || !count) return -EINVAL; + if (count > (PAGE_SIZE << (MAX_ORDER - 1))) + count = PAGE_SIZE << (MAX_ORDER - 1); + buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index 008f8da69d97..62b95a9212ae 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -246,6 +246,63 @@ static const struct regmap_bus regmap_i2c_smbus_i2c_block = { .max_raw_write = I2C_SMBUS_BLOCK_MAX, }; +static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data, + size_t count) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + if (count < 2) + return -EINVAL; + + count--; + return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count, + (u8 *)data + 1); +} + +static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg, + size_t reg_size, void *val, + size_t val_size) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret, count, len = val_size; + + if (reg_size != 2) + return -EINVAL; + + ret = i2c_smbus_write_byte_data(i2c, ((u16 *)reg)[0] & 0xff, + ((u16 *)reg)[0] >> 8); + if (ret < 0) + return ret; + + count = 0; + do { + /* Current Address Read */ + ret = i2c_smbus_read_byte(i2c); + if (ret < 0) + break; + + *((u8 *)val++) = ret; + count++; + len--; + } while (len > 0); + + if (count == val_size) + return 0; + else if (ret < 0) + return ret; + else + return -EIO; +} + +static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = { + .write = regmap_i2c_smbus_i2c_write_reg16, + .read = regmap_i2c_smbus_i2c_read_reg16, + .max_raw_read = I2C_SMBUS_BLOCK_MAX, + .max_raw_write = I2C_SMBUS_BLOCK_MAX, +}; + static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, const struct regmap_config *config) { @@ -255,6 +312,10 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) return ®map_i2c_smbus_i2c_block; + else if (config->val_bits == 8 && config->reg_bits == 16 && + i2c_check_functionality(i2c->adapter, + I2C_FUNC_SMBUS_I2C_BLOCK)) + return ®map_i2c_smbus_i2c_block_reg16; else if (config->val_bits == 16 && config->reg_bits == 8 && i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_WORD_DATA)) diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 3d64c9331a82..4340e1d268b6 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -541,8 +541,9 @@ static const struct irq_domain_ops regmap_domain_ops = { }; /** - * regmap_add_irq_chip() - Use standard regmap IRQ controller handling + * regmap_add_irq_chip_np() - Use standard regmap IRQ controller handling * + * @np: The device_node where the IRQ domain should be added to. * @map: The regmap for the device. * @irq: The IRQ the device uses to signal interrupts. * @irq_flags: The IRQF_ flags to use for the primary interrupt. @@ -556,9 +557,10 @@ static const struct irq_domain_ops regmap_domain_ops = { * register cache. The chip driver is responsible for restoring the * register values used by the IRQ controller over suspend and resume. */ -int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, - int irq_base, const struct regmap_irq_chip *chip, - struct regmap_irq_chip_data **data) +int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) { struct regmap_irq_chip_data *d; int i; @@ -769,12 +771,10 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, } if (irq_base) - d->domain = irq_domain_add_legacy(map->dev->of_node, - chip->num_irqs, irq_base, 0, - ®map_domain_ops, d); + d->domain = irq_domain_add_legacy(np, chip->num_irqs, irq_base, + 0, ®map_domain_ops, d); else - d->domain = irq_domain_add_linear(map->dev->of_node, - chip->num_irqs, + d->domain = irq_domain_add_linear(np, chip->num_irqs, ®map_domain_ops, d); if (!d->domain) { dev_err(map->dev, "Failed to create IRQ domain\n"); @@ -808,6 +808,30 @@ err_alloc: kfree(d); return ret; } +EXPORT_SYMBOL_GPL(regmap_add_irq_chip_np); + +/** + * regmap_add_irq_chip() - Use standard regmap IRQ controller handling + * + * @map: The regmap for the device. + * @irq: The IRQ the device uses to signal interrupts. + * @irq_flags: The IRQF_ flags to use for the primary interrupt. + * @irq_base: Allocate at specific IRQ number if irq_base > 0. + * @chip: Configuration for the interrupt controller. + * @data: Runtime data structure for the controller, allocated on success. + * + * Returns 0 on success or an errno on failure. + * + * This is the same as regmap_add_irq_chip_np, except that the device + * node of the regmap is used. + */ +int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, + int irq_base, const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) +{ + return regmap_add_irq_chip_np(map->dev->of_node, map, irq, irq_flags, + irq_base, chip, data); +} EXPORT_SYMBOL_GPL(regmap_add_irq_chip); /** @@ -875,9 +899,10 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) } /** - * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip() + * devm_regmap_add_irq_chip_np() - Resource manager regmap_add_irq_chip_np() * * @dev: The device pointer on which irq_chip belongs to. + * @np: The device_node where the IRQ domain should be added to. * @map: The regmap for the device. * @irq: The IRQ the device uses to signal interrupts * @irq_flags: The IRQF_ flags to use for the primary interrupt. @@ -890,10 +915,11 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) * The ®map_irq_chip_data will be automatically released when the device is * unbound. */ -int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, - int irq_flags, int irq_base, - const struct regmap_irq_chip *chip, - struct regmap_irq_chip_data **data) +int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np, + struct regmap *map, int irq, int irq_flags, + int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) { struct regmap_irq_chip_data **ptr, *d; int ret; @@ -903,8 +929,8 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, if (!ptr) return -ENOMEM; - ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base, - chip, &d); + ret = regmap_add_irq_chip_np(np, map, irq, irq_flags, irq_base, + chip, &d); if (ret < 0) { devres_free(ptr); return ret; @@ -915,6 +941,32 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, *data = d; return 0; } +EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_np); + +/** + * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip() + * + * @dev: The device pointer on which irq_chip belongs to. + * @map: The regmap for the device. + * @irq: The IRQ the device uses to signal interrupts + * @irq_flags: The IRQF_ flags to use for the primary interrupt. + * @irq_base: Allocate at specific IRQ number if irq_base > 0. + * @chip: Configuration for the interrupt controller. + * @data: Runtime data structure for the controller, allocated on success + * + * Returns 0 on success or an errno on failure. + * + * The ®map_irq_chip_data will be automatically released when the device is + * unbound. + */ +int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) +{ + return devm_regmap_add_irq_chip_np(dev, map->dev->of_node, map, irq, + irq_flags, irq_base, chip, data); +} EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip); /** diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 59f911e57719..c472f624382d 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -827,6 +827,7 @@ struct regmap *__regmap_init(struct device *dev, } else if (!bus->read || !bus->write) { map->reg_read = _regmap_bus_reg_read; map->reg_write = _regmap_bus_reg_write; + map->reg_update_bits = bus->reg_update_bits; map->defer_caching = false; goto skip_format_initialization; @@ -2936,6 +2937,28 @@ int regmap_update_bits_base(struct regmap *map, unsigned int reg, } EXPORT_SYMBOL_GPL(regmap_update_bits_base); +/** + * regmap_test_bits() - Check if all specified bits are set in a register. + * + * @map: Register map to operate on + * @reg: Register to read from + * @bits: Bits to test + * + * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the + * tested bits is not set and 1 if all tested bits are set. + */ +int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) +{ + unsigned int val, ret; + + ret = regmap_read(map, reg, &val); + if (ret) + return ret; + + return (val & bits) == bits; +} +EXPORT_SYMBOL_GPL(regmap_test_bits); + void regmap_async_complete_cb(struct regmap_async *async, int ret) { struct regmap *map = async->map; diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c index e741b1157525..37a05800980c 100644 --- a/drivers/char/tpm/eventlog/tpm2.c +++ b/drivers/char/tpm/eventlog/tpm2.c @@ -51,8 +51,7 @@ static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos) int i; event_header = addr; - size = sizeof(struct tcg_pcr_event) - sizeof(event_header->event) - + event_header->event_size; + size = struct_size(event_header, event, event_header->event_size); if (*pos == 0) { if (addr + size < limit) { @@ -98,8 +97,8 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, event_header = log->bios_event_log; if (v == SEQ_START_TOKEN) { - event_size = sizeof(struct tcg_pcr_event) - - sizeof(event_header->event) + event_header->event_size; + event_size = struct_size(event_header, event, + event_header->event_size); marker = event_header; } else { event = v; @@ -136,9 +135,8 @@ static int tpm2_binary_bios_measurements_show(struct seq_file *m, void *v) size_t size; if (v == SEQ_START_TOKEN) { - size = sizeof(struct tcg_pcr_event) - - sizeof(event_header->event) + event_header->event_size; - + size = struct_size(event_header, event, + event_header->event_size); temp_ptr = event_header; if (size > 0) diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 22bf553ccf9d..2491a2cb54a2 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -241,7 +241,7 @@ static int ftpm_tee_probe(struct platform_device *pdev) /* Open a session with fTPM TA */ memset(&sess_arg, 0, sizeof(sess_arg)); - memcpy(sess_arg.uuid, ftpm_ta_uuid.b, TEE_IOCTL_UUID_LEN); + export_uuid(sess_arg.uuid, &ftpm_ta_uuid); sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC; sess_arg.num_params = 0; diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 11ec6f466467..abb121f8de52 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -377,6 +377,7 @@ config SM_GCC_8150 config SM_GCC_8250 tristate "SM8250 Global Clock Controller" + select QCOM_GDSC help Support for the global clock controller on SM8250 devices. Say Y if you want to use peripheral devices such as UART, diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index ef98fdc51755..732bc7c937e6 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -76,8 +76,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = { .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0_out_even", .parent_data = &(const struct clk_parent_data){ - .fw_name = "bi_tcxo", - .name = "bi_tcxo", + .hw = &gpll0.clkr.hw, }, .num_parents = 1, .ops = &clk_trion_pll_postdiv_ops, diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index dccef3a2908b..e1401d9cc756 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -682,7 +682,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp) make_tx_data_wr(sk, skb, immdlen, len, credits_needed, completion); tp->snd_nxt += len; - tp->lsndtime = tcp_time_stamp(tp); + tp->lsndtime = tcp_jiffies32; if (completion) ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR; } else { diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index baee8c3f06ad..cf3687a7925f 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -625,7 +625,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev) kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(kona_gpio->reg_base)) { - ret = -ENXIO; + ret = PTR_ERR(kona_gpio->reg_base); goto err_irq_domain; } diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index da1ef0b1c291..b1accfba017d 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -148,8 +148,10 @@ static int gpio_exar_probe(struct platform_device *pdev) mutex_init(&exar_gpio->lock); index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); - if (index < 0) - goto err_destroy; + if (index < 0) { + ret = index; + goto err_mutex_destroy; + } sprintf(exar_gpio->name, "exar_gpio%d", index); exar_gpio->gpio_chip.label = exar_gpio->name; @@ -176,6 +178,7 @@ static int gpio_exar_probe(struct platform_device *pdev) err_destroy: ida_simple_remove(&ida_index, index); +err_mutex_destroy: mutex_destroy(&exar_gpio->lock); return ret; } diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c index 7b7085050219..da570e63589d 100644 --- a/drivers/gpio/gpio-mlxbf2.c +++ b/drivers/gpio/gpio-mlxbf2.c @@ -127,8 +127,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs) { u32 arm_gpio_lock_val; - spin_lock(&gs->gc.bgpio_lock); mutex_lock(yu_arm_gpio_lock_param.lock); + spin_lock(&gs->gc.bgpio_lock); arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io); @@ -136,8 +136,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs) * When lock active bit[31] is set, ModeX is write enabled */ if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) { - mutex_unlock(yu_arm_gpio_lock_param.lock); spin_unlock(&gs->gc.bgpio_lock); + mutex_unlock(yu_arm_gpio_lock_param.lock); return -EINVAL; } @@ -152,8 +152,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs) static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs) { writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io); - mutex_unlock(yu_arm_gpio_lock_param.lock); spin_unlock(&gs->gc.bgpio_lock); + mutex_unlock(yu_arm_gpio_lock_param.lock); } /* diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 3c9f4fb3d5a2..bd65114eb170 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -782,6 +782,15 @@ static int mvebu_pwm_probe(struct platform_device *pdev, "marvell,armada-370-gpio")) return 0; + /* + * There are only two sets of PWM configuration registers for + * all the GPIO lines on those SoCs which this driver reserves + * for the first two GPIO chips. So if the resource is missing + * we can't treat it as an error. + */ + if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm")) + return 0; + if (IS_ERR(mvchip->clk)) return PTR_ERR(mvchip->clk); @@ -804,12 +813,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev, mvchip->mvpwm = mvpwm; mvpwm->mvchip = mvchip; - /* - * There are only two sets of PWM configuration registers for - * all the GPIO lines on those SoCs which this driver reserves - * for the first two GPIO chips. So if the resource is missing - * we can't treat it as an error. - */ mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm"); if (IS_ERR(mvpwm->membase)) return PTR_ERR(mvpwm->membase); diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 1361270ecf8c..0cb6600b8eee 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -660,8 +660,8 @@ static int pxa_gpio_probe(struct platform_device *pdev) pchip->irq1 = irq1; gpio_reg_base = devm_platform_ioremap_resource(pdev, 0); - if (!gpio_reg_base) - return -EINVAL; + if (IS_ERR(gpio_reg_base)) + return PTR_ERR(gpio_reg_base); clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 182136d98b97..c14f0784274a 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -729,6 +729,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) if (ret) goto out_free_descs; } + + atomic_notifier_call_chain(&desc->gdev->notifier, + GPIOLINE_CHANGED_REQUESTED, desc); + dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", offset); } @@ -1083,6 +1087,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) if (ret) goto out_free_desc; + atomic_notifier_call_chain(&desc->gdev->notifier, + GPIOLINE_CHANGED_REQUESTED, desc); + le->irq = gpiod_to_irq(desc); if (le->irq <= 0) { ret = -ENODEV; @@ -2998,8 +3005,6 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) } done: spin_unlock_irqrestore(&gpio_lock, flags); - atomic_notifier_call_chain(&desc->gdev->notifier, - GPIOLINE_CHANGED_REQUESTED, desc); return ret; } @@ -4215,7 +4220,9 @@ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset) } } - if (test_bit(FLAG_IS_OUT, &desc->flags)) { + /* To be valid for IRQ the line needs to be input or open drain */ + if (test_bit(FLAG_IS_OUT, &desc->flags) && + !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { chip_err(gc, "%s: tried to flag a GPIO set as output for IRQ\n", __func__); @@ -4278,7 +4285,12 @@ void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset) if (!IS_ERR(desc) && !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) { - WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags)); + /* + * We must not be output when using IRQ UNLESS we are + * open drain. + */ + WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) && + !test_bit(FLAG_OPEN_DRAIN, &desc->flags)); set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags); } } @@ -4961,6 +4973,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, return ERR_PTR(ret); } + atomic_notifier_call_chain(&desc->gdev->notifier, + GPIOLINE_CHANGED_REQUESTED, desc); + return desc; } EXPORT_SYMBOL_GPL(gpiod_get_index); @@ -5026,6 +5041,9 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, return ERR_PTR(ret); } + atomic_notifier_call_chain(&desc->gdev->notifier, + GPIOLINE_CHANGED_REQUESTED, desc); + return desc; } EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 4a3049841086..c24cad3c64ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1050,7 +1050,7 @@ void kfd_dec_compute_active(struct kfd_dev *dev); /* Check with device cgroup if @kfd device is accessible */ static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) { -#if defined(CONFIG_CGROUP_DEVICE) +#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) struct drm_device *ddev = kfd->ddev; return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 28e651b173ab..7fc15b82fe48 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7880,13 +7880,6 @@ static int dm_update_plane_state(struct dc *dc, return -EINVAL; } - if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width || - new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) { - DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n", - new_plane_state->crtc_x, new_plane_state->crtc_y); - return -EINVAL; - } - return 0; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 82fc3d5b3b2a..416afb99529d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1684,6 +1684,8 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx) return; /* Stall out until the cursor update completes. */ + if (vupdate_end < vupdate_start) + vupdate_end += stream->timing.v_total; us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line; udelay(us_to_vupdate + us_vupdate); } diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c index 1754c0547069..548cc25ea4ab 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c @@ -328,8 +328,8 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc, if (!drm_atomic_crtc_needs_modeset(state)) return 0; - if (state->mode.hdisplay > priv->soc_info->max_height || - state->mode.vdisplay > priv->soc_info->max_width) + if (state->mode.hdisplay > priv->soc_info->max_width || + state->mode.vdisplay > priv->soc_info->max_height) return -EINVAL; rate = clk_round_rate(priv->pix_clk, @@ -474,7 +474,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder, static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg) { - struct ingenic_drm *priv = arg; + struct ingenic_drm *priv = drm_device_get_priv(arg); unsigned int state; regmap_read(priv->map, JZ_REG_LCD_STATE, &state); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 4c62f900bf7e..288ae9f63588 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -324,6 +324,16 @@ config SENSORS_FAM15H_POWER This driver can also be built as a module. If so, the module will be called fam15h_power. +config SENSORS_AMD_ENERGY + tristate "AMD RAPL MSR based Energy driver" + depends on X86 + help + If you say yes here you get support for core and package energy + sensors, based on RAPL MSR for AMD family 17h and above CPUs. + + This driver can also be built as a module. If so, the module + will be called as amd_energy. + config SENSORS_APPLESMC tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)" depends on INPUT && X86 @@ -404,6 +414,31 @@ config SENSORS_ATXP1 This driver can also be built as a module. If so, the module will be called atxp1. +config SENSORS_BT1_PVT + tristate "Baikal-T1 Process, Voltage, Temperature sensor driver" + depends on MIPS_BAIKAL_T1 || COMPILE_TEST + help + If you say yes here you get support for Baikal-T1 PVT sensor + embedded into the SoC. + + This driver can also be built as a module. If so, the module will be + called bt1-pvt. + +config SENSORS_BT1_PVT_ALARMS + bool "Enable Baikal-T1 PVT sensor alarms" + depends on SENSORS_BT1_PVT + help + Baikal-T1 PVT IP-block provides threshold registers for each + supported sensor. But the corresponding interrupts might be + generated by the thresholds comparator only in synchronization with + a data conversion. Additionally there is only one sensor data can + be converted at a time. All of these makes the interface impossible + to be used for the hwmon alarms implementation without periodic + switch between the PVT sensors. By default the data conversion is + performed on demand from the user-space. If this config is enabled + the data conversion will be periodically performed and the data will be + saved in the internal driver cache. + config SENSORS_DRIVETEMP tristate "Hard disk drives with temperature sensors" depends on SCSI && ATA @@ -523,6 +558,15 @@ config SENSORS_F75375S This driver can also be built as a module. If so, the module will be called f75375s. +config SENSORS_GSC + tristate "Gateworks System Controller ADC" + depends on MFD_GATEWORKS_GSC + help + Support for the Gateworks System Controller A/D converters. + + To compile this driver as a module, choose M here: + the module will be called gsc-hwmon. + config SENSORS_MC13783_ADC tristate "Freescale MC13783/MC13892 ADC" depends on MFD_MC13XXX @@ -1198,10 +1242,11 @@ config SENSORS_LM90 help If you say yes here you get support for National Semiconductor LM90, LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A, - Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659, - MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008, - Winbond/Nuvoton W83L771W/G/AWG/ASG, Philips SA56004, GMT G781, and - Texas Instruments TMP451 sensor chips. + Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6654, MAX6657, MAX6658, + MAX6659, MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, + ON Semiconductor NCT1008, Winbond/Nuvoton W83L771W/G/AWG/ASG, + Philips SA56004, GMT G781, and Texas Instruments TMP451 + sensor chips. This driver can also be built as a module. If so, the module will be called lm90. @@ -1340,10 +1385,12 @@ config SENSORS_NCT7802 config SENSORS_NCT7904 tristate "Nuvoton NCT7904" - depends on I2C + depends on I2C && WATCHDOG + select WATCHDOG_CORE help If you say yes here you get support for the Nuvoton NCT7904 - hardware monitoring chip, including manual fan speed control. + hardware monitoring chip, including manual fan speed control + and support for the integrated watchdog. This driver can also be built as a module. If so, the module will be called nct7904. diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index b0b9c8e57176..3e32c21f5efe 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o +obj-$(CONFIG_SENSORS_AMD_ENERGY) += amd_energy.o obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o @@ -53,6 +54,7 @@ obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o obj-$(CONFIG_SENSORS_AXI_FAN_CONTROL) += axi-fan-control.o +obj-$(CONFIG_SENSORS_BT1_PVT) += bt1-pvt.o obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o @@ -74,6 +76,7 @@ obj-$(CONFIG_SENSORS_G760A) += g760a.o obj-$(CONFIG_SENSORS_G762) += g762.o obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o +obj-$(CONFIG_SENSORS_GSC) += gsc-hwmon.o obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c index c7010b91bc13..5a839cc2ed1c 100644 --- a/drivers/hwmon/adt7411.c +++ b/drivers/hwmon/adt7411.c @@ -716,7 +716,6 @@ static struct i2c_driver adt7411_driver = { module_i2c_driver(adt7411_driver); -MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de> and " - "Wolfram Sang <w.sang@pengutronix.de>"); +MODULE_AUTHOR("Sascha Hauer, Wolfram Sang <kernel@pengutronix.de>"); MODULE_DESCRIPTION("ADT7411 driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c new file mode 100644 index 000000000000..e95b7426106e --- /dev/null +++ b/drivers/hwmon/amd_energy.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + */ +#include <asm/cpu_device_id.h> + +#include <linux/bits.h> +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/processor.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/topology.h> +#include <linux/types.h> + +#define DRVNAME "amd_energy" + +#define ENERGY_PWR_UNIT_MSR 0xC0010299 +#define ENERGY_CORE_MSR 0xC001029A +#define ENERGY_PKG_MSR 0xC001029B + +#define AMD_ENERGY_UNIT_MASK 0x01F00 +#define AMD_ENERGY_MASK 0xFFFFFFFF + +struct sensor_accumulator { + u64 energy_ctr; + u64 prev_value; + char label[10]; +}; + +struct amd_energy_data { + struct hwmon_channel_info energy_info; + const struct hwmon_channel_info *info[2]; + struct hwmon_chip_info chip; + struct task_struct *wrap_accumulate; + /* Lock around the accumulator */ + struct mutex lock; + /* An accumulator for each core and socket */ + struct sensor_accumulator *accums; + /* Energy Status Units */ + u64 energy_units; + int nr_cpus; + int nr_socks; + int core_id; +}; + +static int amd_energy_read_labels(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, + const char **str) +{ + struct amd_energy_data *data = dev_get_drvdata(dev); + + *str = data->accums[channel].label; + return 0; +} + +static void get_energy_units(struct amd_energy_data *data) +{ + u64 rapl_units; + + rdmsrl_safe(ENERGY_PWR_UNIT_MSR, &rapl_units); + data->energy_units = (rapl_units & AMD_ENERGY_UNIT_MASK) >> 8; +} + +static void accumulate_socket_delta(struct amd_energy_data *data, + int sock, int cpu) +{ + struct sensor_accumulator *s_accum; + u64 input; + + mutex_lock(&data->lock); + rdmsrl_safe_on_cpu(cpu, ENERGY_PKG_MSR, &input); + input &= AMD_ENERGY_MASK; + + s_accum = &data->accums[data->nr_cpus + sock]; + if (input >= s_accum->prev_value) + s_accum->energy_ctr += + input - s_accum->prev_value; + else + s_accum->energy_ctr += UINT_MAX - + s_accum->prev_value + input; + + s_accum->prev_value = input; + mutex_unlock(&data->lock); +} + +static void accumulate_core_delta(struct amd_energy_data *data) +{ + struct sensor_accumulator *c_accum; + u64 input; + int cpu; + + mutex_lock(&data->lock); + if (data->core_id >= data->nr_cpus) + data->core_id = 0; + + cpu = data->core_id; + + if (!cpu_online(cpu)) + goto out; + + rdmsrl_safe_on_cpu(cpu, ENERGY_CORE_MSR, &input); + input &= AMD_ENERGY_MASK; + + c_accum = &data->accums[cpu]; + + if (input >= c_accum->prev_value) + c_accum->energy_ctr += + input - c_accum->prev_value; + else + c_accum->energy_ctr += UINT_MAX - + c_accum->prev_value + input; + + c_accum->prev_value = input; + +out: + data->core_id++; + mutex_unlock(&data->lock); +} + +static void read_accumulate(struct amd_energy_data *data) +{ + int sock; + + for (sock = 0; sock < data->nr_socks; sock++) { + int cpu; + + cpu = cpumask_first_and(cpu_online_mask, + cpumask_of_node(sock)); + + accumulate_socket_delta(data, sock, cpu); + } + + accumulate_core_delta(data); +} + +static void amd_add_delta(struct amd_energy_data *data, int ch, + int cpu, long *val, bool is_core) +{ + struct sensor_accumulator *s_accum, *c_accum; + u64 input; + + mutex_lock(&data->lock); + if (!is_core) { + rdmsrl_safe_on_cpu(cpu, ENERGY_PKG_MSR, &input); + input &= AMD_ENERGY_MASK; + + s_accum = &data->accums[ch]; + if (input >= s_accum->prev_value) + input += s_accum->energy_ctr - + s_accum->prev_value; + else + input += UINT_MAX - s_accum->prev_value + + s_accum->energy_ctr; + } else { + rdmsrl_safe_on_cpu(cpu, ENERGY_CORE_MSR, &input); + input &= AMD_ENERGY_MASK; + + c_accum = &data->accums[ch]; + if (input >= c_accum->prev_value) + input += c_accum->energy_ctr - + c_accum->prev_value; + else + input += UINT_MAX - c_accum->prev_value + + c_accum->energy_ctr; + } + + /* Energy consumed = (1/(2^ESU) * RAW * 1000000UL) μJoules */ + *val = div64_ul(input * 1000000UL, BIT(data->energy_units)); + + mutex_unlock(&data->lock); +} + +static int amd_energy_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct amd_energy_data *data = dev_get_drvdata(dev); + int cpu; + + if (channel >= data->nr_cpus) { + cpu = cpumask_first_and(cpu_online_mask, + cpumask_of_node + (channel - data->nr_cpus)); + amd_add_delta(data, channel, cpu, val, false); + } else { + cpu = channel; + if (!cpu_online(cpu)) + return -ENODEV; + + amd_add_delta(data, channel, cpu, val, true); + } + + return 0; +} + +static umode_t amd_energy_is_visible(const void *_data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + return 0444; +} + +static int energy_accumulator(void *p) +{ + struct amd_energy_data *data = (struct amd_energy_data *)p; + + while (!kthread_should_stop()) { + /* + * Ignoring the conditions such as + * cpu being offline or rdmsr failure + */ + read_accumulate(data); + + set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) + break; + + /* + * On a 240W system, with default resolution the + * Socket Energy status register may wrap around in + * 2^32*15.3 e-6/240 = 273.8041 secs (~4.5 mins) + * + * let us accumulate for every 100secs + */ + schedule_timeout(msecs_to_jiffies(100000)); + } + return 0; +} + +static const struct hwmon_ops amd_energy_ops = { + .is_visible = amd_energy_is_visible, + .read = amd_energy_read, + .read_string = amd_energy_read_labels, +}; + +static int amd_create_sensor(struct device *dev, + struct amd_energy_data *data, + u8 type, u32 config) +{ + struct hwmon_channel_info *info = &data->energy_info; + struct sensor_accumulator *accums; + int i, num_siblings, cpus, sockets; + u32 *s_config; + + /* Identify the number of siblings per core */ + num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1; + + sockets = num_possible_nodes(); + + /* + * Energy counter register is accessed at core level. + * Hence, filterout the siblings. + */ + cpus = num_present_cpus() / num_siblings; + + s_config = devm_kcalloc(dev, cpus + sockets, + sizeof(u32), GFP_KERNEL); + if (!s_config) + return -ENOMEM; + + accums = devm_kcalloc(dev, cpus + sockets, + sizeof(struct sensor_accumulator), + GFP_KERNEL); + if (!accums) + return -ENOMEM; + + info->type = type; + info->config = s_config; + + data->nr_cpus = cpus; + data->nr_socks = sockets; + data->accums = accums; + + for (i = 0; i < cpus + sockets; i++) { + s_config[i] = config; + if (i < cpus) + scnprintf(accums[i].label, 10, + "Ecore%03u", i); + else + scnprintf(accums[i].label, 10, + "Esocket%u", (i - cpus)); + } + + return 0; +} + +static int amd_energy_probe(struct platform_device *pdev) +{ + struct device *hwmon_dev; + struct amd_energy_data *data; + struct device *dev = &pdev->dev; + + data = devm_kzalloc(dev, + sizeof(struct amd_energy_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->chip.ops = &amd_energy_ops; + data->chip.info = data->info; + + dev_set_drvdata(dev, data); + /* Populate per-core energy reporting */ + data->info[0] = &data->energy_info; + amd_create_sensor(dev, data, hwmon_energy, + HWMON_E_INPUT | HWMON_E_LABEL); + + mutex_init(&data->lock); + get_energy_units(data); + + hwmon_dev = devm_hwmon_device_register_with_info(dev, DRVNAME, + data, + &data->chip, + NULL); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + data->wrap_accumulate = kthread_run(energy_accumulator, data, + "%s", dev_name(hwmon_dev)); + if (IS_ERR(data->wrap_accumulate)) + return PTR_ERR(data->wrap_accumulate); + + return PTR_ERR_OR_ZERO(data->wrap_accumulate); +} + +static int amd_energy_remove(struct platform_device *pdev) +{ + struct amd_energy_data *data = dev_get_drvdata(&pdev->dev); + + if (data && data->wrap_accumulate) + kthread_stop(data->wrap_accumulate); + + return 0; +} + +static const struct platform_device_id amd_energy_ids[] = { + { .name = DRVNAME, }, + {} +}; +MODULE_DEVICE_TABLE(platform, amd_energy_ids); + +static struct platform_driver amd_energy_driver = { + .probe = amd_energy_probe, + .remove = amd_energy_remove, + .id_table = amd_energy_ids, + .driver = { + .name = DRVNAME, + }, +}; + +static struct platform_device *amd_energy_platdev; + +static const struct x86_cpu_id cpu_ids[] __initconst = { + X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, cpu_ids); + +static int __init amd_energy_init(void) +{ + int ret; + + if (!x86_match_cpu(cpu_ids)) + return -ENODEV; + + ret = platform_driver_register(&amd_energy_driver); + if (ret) + return ret; + + amd_energy_platdev = platform_device_alloc(DRVNAME, 0); + if (!amd_energy_platdev) { + platform_driver_unregister(&amd_energy_driver); + return -ENOMEM; + } + + ret = platform_device_add(amd_energy_platdev); + if (ret) { + platform_device_put(amd_energy_platdev); + platform_driver_unregister(&amd_energy_driver); + return ret; + } + + return ret; +} + +static void __exit amd_energy_exit(void) +{ + platform_device_unregister(amd_energy_platdev); + platform_driver_unregister(&amd_energy_driver); +} + +module_init(amd_energy_init); +module_exit(amd_energy_exit); + +MODULE_DESCRIPTION("Driver for AMD Energy reporting from RAPL MSR via HWMON interface"); +MODULE_AUTHOR("Naveen Krishna Chatradhi <nchatrad@amd.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index ec93b8d673f5..316618409315 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -156,14 +156,19 @@ static struct workqueue_struct *applesmc_led_wq; */ static int wait_read(void) { + unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC; u8 status; int us; + for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) { - udelay(us); + usleep_range(us, us * 16); status = inb(APPLESMC_CMD_PORT); /* read: wait for smc to settle */ if (status & 0x01) return 0; + /* timeout: give up */ + if (time_after(jiffies, end)) + break; } pr_warn("wait_read() fail: 0x%02x\n", status); @@ -178,10 +183,11 @@ static int send_byte(u8 cmd, u16 port) { u8 status; int us; + unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC; outb(cmd, port); for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) { - udelay(us); + usleep_range(us, us * 16); status = inb(APPLESMC_CMD_PORT); /* write: wait for smc to settle */ if (status & 0x02) @@ -190,7 +196,7 @@ static int send_byte(u8 cmd, u16 port) if (status & 0x04) return 0; /* timeout: give up */ - if (us << 1 == APPLESMC_MAX_WAIT) + if (time_after(jiffies, end)) break; /* busy: long wait and resend */ udelay(APPLESMC_RETRY_WAIT); diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c new file mode 100644 index 000000000000..1a9772fb1f73 --- /dev/null +++ b/drivers/hwmon/bt1-pvt.c @@ -0,0 +1,1146 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru> + * Serge Semin <Sergey.Semin@baikalelectronics.ru> + * + * Baikal-T1 Process, Voltage, Temperature sensor driver + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/hwmon-sysfs.h> +#include <linux/hwmon.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/ktime.h> +#include <linux/limits.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/seqlock.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#include "bt1-pvt.h" + +/* + * For the sake of the code simplification we created the sensors info table + * with the sensor names, activation modes, threshold registers base address + * and the thresholds bit fields. + */ +static const struct pvt_sensor_info pvt_info[] = { + PVT_SENSOR_INFO(0, "CPU Core Temperature", hwmon_temp, TEMP, TTHRES), + PVT_SENSOR_INFO(0, "CPU Core Voltage", hwmon_in, VOLT, VTHRES), + PVT_SENSOR_INFO(1, "CPU Core Low-Vt", hwmon_in, LVT, LTHRES), + PVT_SENSOR_INFO(2, "CPU Core High-Vt", hwmon_in, HVT, HTHRES), + PVT_SENSOR_INFO(3, "CPU Core Standard-Vt", hwmon_in, SVT, STHRES), +}; + +/* + * The original translation formulae of the temperature (in degrees of Celsius) + * to PVT data and vice-versa are following: + * N = 1.8322e-8*(T^4) + 2.343e-5*(T^3) + 8.7018e-3*(T^2) + 3.9269*(T^1) + + * 1.7204e2, + * T = -1.6743e-11*(N^4) + 8.1542e-8*(N^3) + -1.8201e-4*(N^2) + + * 3.1020e-1*(N^1) - 4.838e1, + * where T = [-48.380, 147.438]C and N = [0, 1023]. + * They must be accordingly altered to be suitable for the integer arithmetics. + * The technique is called 'factor redistribution', which just makes sure the + * multiplications and divisions are made so to have a result of the operations + * within the integer numbers limit. In addition we need to translate the + * formulae to accept millidegrees of Celsius. Here what they look like after + * the alterations: + * N = (18322e-20*(T^4) + 2343e-13*(T^3) + 87018e-9*(T^2) + 39269e-3*T + + * 17204e2) / 1e4, + * T = -16743e-12*(D^4) + 81542e-9*(D^3) - 182010e-6*(D^2) + 310200e-3*D - + * 48380, + * where T = [-48380, 147438] mC and N = [0, 1023]. + */ +static const struct pvt_poly poly_temp_to_N = { + .total_divider = 10000, + .terms = { + {4, 18322, 10000, 10000}, + {3, 2343, 10000, 10}, + {2, 87018, 10000, 10}, + {1, 39269, 1000, 1}, + {0, 1720400, 1, 1} + } +}; + +static const struct pvt_poly poly_N_to_temp = { + .total_divider = 1, + .terms = { + {4, -16743, 1000, 1}, + {3, 81542, 1000, 1}, + {2, -182010, 1000, 1}, + {1, 310200, 1000, 1}, + {0, -48380, 1, 1} + } +}; + +/* + * Similar alterations are performed for the voltage conversion equations. + * The original formulae are: + * N = 1.8658e3*V - 1.1572e3, + * V = (N + 1.1572e3) / 1.8658e3, + * where V = [0.620, 1.168] V and N = [0, 1023]. + * After the optimization they looks as follows: + * N = (18658e-3*V - 11572) / 10, + * V = N * 10^5 / 18658 + 11572 * 10^4 / 18658. + */ +static const struct pvt_poly poly_volt_to_N = { + .total_divider = 10, + .terms = { + {1, 18658, 1000, 1}, + {0, -11572, 1, 1} + } +}; + +static const struct pvt_poly poly_N_to_volt = { + .total_divider = 10, + .terms = { + {1, 100000, 18658, 1}, + {0, 115720000, 1, 18658} + } +}; + +/* + * Here is the polynomial calculation function, which performs the + * redistributed terms calculations. It's pretty straightforward. We walk + * over each degree term up to the free one, and perform the redistributed + * multiplication of the term coefficient, its divider (as for the rationale + * fraction representation), data power and the rational fraction divider + * leftover. Then all of this is collected in a total sum variable, which + * value is normalized by the total divider before being returned. + */ +static long pvt_calc_poly(const struct pvt_poly *poly, long data) +{ + const struct pvt_poly_term *term = poly->terms; + long tmp, ret = 0; + int deg; + + do { + tmp = term->coef; + for (deg = 0; deg < term->deg; ++deg) + tmp = mult_frac(tmp, data, term->divider); + ret += tmp / term->divider_leftover; + } while ((term++)->deg); + + return ret / poly->total_divider; +} + +static inline u32 pvt_update(void __iomem *reg, u32 mask, u32 data) +{ + u32 old; + + old = readl_relaxed(reg); + writel((old & ~mask) | (data & mask), reg); + + return old & mask; +} + +/* + * Baikal-T1 PVT mode can be updated only when the controller is disabled. + * So first we disable it, then set the new mode together with the controller + * getting back enabled. The same concerns the temperature trim and + * measurements timeout. If it is necessary the interface mutex is supposed + * to be locked at the time the operations are performed. + */ +static inline void pvt_set_mode(struct pvt_hwmon *pvt, u32 mode) +{ + u32 old; + + mode = FIELD_PREP(PVT_CTRL_MODE_MASK, mode); + + old = pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0); + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_MODE_MASK | PVT_CTRL_EN, + mode | old); +} + +static inline u32 pvt_calc_trim(long temp) +{ + temp = clamp_val(temp, 0, PVT_TRIM_TEMP); + + return DIV_ROUND_UP(temp, PVT_TRIM_STEP); +} + +static inline void pvt_set_trim(struct pvt_hwmon *pvt, u32 trim) +{ + u32 old; + + trim = FIELD_PREP(PVT_CTRL_TRIM_MASK, trim); + + old = pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0); + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_TRIM_MASK | PVT_CTRL_EN, + trim | old); +} + +static inline void pvt_set_tout(struct pvt_hwmon *pvt, u32 tout) +{ + u32 old; + + old = pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0); + writel(tout, pvt->regs + PVT_TTIMEOUT); + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, old); +} + +/* + * This driver can optionally provide the hwmon alarms for each sensor the PVT + * controller supports. The alarms functionality is made compile-time + * configurable due to the hardware interface implementation peculiarity + * described further in this comment. So in case if alarms are unnecessary in + * your system design it's recommended to have them disabled to prevent the PVT + * IRQs being periodically raised to get the data cache/alarms status up to + * date. + * + * Baikal-T1 PVT embedded controller is based on the Analog Bits PVT sensor, + * but is equipped with a dedicated control wrapper. It exposes the PVT + * sub-block registers space via the APB3 bus. In addition the wrapper provides + * a common interrupt vector of the sensors conversion completion events and + * threshold value alarms. Alas the wrapper interface hasn't been fully thought + * through. There is only one sensor can be activated at a time, for which the + * thresholds comparator is enabled right after the data conversion is + * completed. Due to this if alarms need to be implemented for all available + * sensors we can't just set the thresholds and enable the interrupts. We need + * to enable the sensors one after another and let the controller to detect + * the alarms by itself at each conversion. This also makes pointless to handle + * the alarms interrupts, since in occasion they happen synchronously with + * data conversion completion. The best driver design would be to have the + * completion interrupts enabled only and keep the converted value in the + * driver data cache. This solution is implemented if hwmon alarms are enabled + * in this driver. In case if the alarms are disabled, the conversion is + * performed on demand at the time a sensors input file is read. + */ + +#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS) + +#define pvt_hard_isr NULL + +static irqreturn_t pvt_soft_isr(int irq, void *data) +{ + const struct pvt_sensor_info *info; + struct pvt_hwmon *pvt = data; + struct pvt_cache *cache; + u32 val, thres_sts, old; + + /* + * DVALID bit will be cleared by reading the data. We need to save the + * status before the next conversion happens. Threshold events will be + * handled a bit later. + */ + thres_sts = readl(pvt->regs + PVT_RAW_INTR_STAT); + + /* + * Then lets recharge the PVT interface with the next sampling mode. + * Lock the interface mutex to serialize trim, timeouts and alarm + * thresholds settings. + */ + cache = &pvt->cache[pvt->sensor]; + info = &pvt_info[pvt->sensor]; + pvt->sensor = (pvt->sensor == PVT_SENSOR_LAST) ? + PVT_SENSOR_FIRST : (pvt->sensor + 1); + + /* + * For some reason we have to mask the interrupt before changing the + * mode, otherwise sometimes the temperature mode doesn't get + * activated even though the actual mode in the ctrl register + * corresponds to one. Then we read the data. By doing so we also + * recharge the data conversion. After this the mode corresponding + * to the next sensor in the row is set. Finally we enable the + * interrupts back. + */ + mutex_lock(&pvt->iface_mtx); + + old = pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, + PVT_INTR_DVALID); + + val = readl(pvt->regs + PVT_DATA); + + pvt_set_mode(pvt, pvt_info[pvt->sensor].mode); + + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, old); + + mutex_unlock(&pvt->iface_mtx); + + /* + * We can now update the data cache with data just retrieved from the + * sensor. Lock write-seqlock to make sure the reader has a coherent + * data. + */ + write_seqlock(&cache->data_seqlock); + + cache->data = FIELD_GET(PVT_DATA_DATA_MASK, val); + + write_sequnlock(&cache->data_seqlock); + + /* + * While PVT core is doing the next mode data conversion, we'll check + * whether the alarms were triggered for the current sensor. Note that + * according to the documentation only one threshold IRQ status can be + * set at a time, that's why if-else statement is utilized. + */ + if ((thres_sts & info->thres_sts_lo) ^ cache->thres_sts_lo) { + WRITE_ONCE(cache->thres_sts_lo, thres_sts & info->thres_sts_lo); + hwmon_notify_event(pvt->hwmon, info->type, info->attr_min_alarm, + info->channel); + } else if ((thres_sts & info->thres_sts_hi) ^ cache->thres_sts_hi) { + WRITE_ONCE(cache->thres_sts_hi, thres_sts & info->thres_sts_hi); + hwmon_notify_event(pvt->hwmon, info->type, info->attr_max_alarm, + info->channel); + } + + return IRQ_HANDLED; +} + +inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type) +{ + return 0644; +} + +inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type) +{ + return 0444; +} + +static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type, + long *val) +{ + struct pvt_cache *cache = &pvt->cache[type]; + unsigned int seq; + u32 data; + + do { + seq = read_seqbegin(&cache->data_seqlock); + data = cache->data; + } while (read_seqretry(&cache->data_seqlock, seq)); + + if (type == PVT_TEMP) + *val = pvt_calc_poly(&poly_N_to_temp, data); + else + *val = pvt_calc_poly(&poly_N_to_volt, data); + + return 0; +} + +static int pvt_read_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type, + bool is_low, long *val) +{ + u32 data; + + /* No need in serialization, since it is just read from MMIO. */ + data = readl(pvt->regs + pvt_info[type].thres_base); + + if (is_low) + data = FIELD_GET(PVT_THRES_LO_MASK, data); + else + data = FIELD_GET(PVT_THRES_HI_MASK, data); + + if (type == PVT_TEMP) + *val = pvt_calc_poly(&poly_N_to_temp, data); + else + *val = pvt_calc_poly(&poly_N_to_volt, data); + + return 0; +} + +static int pvt_write_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type, + bool is_low, long val) +{ + u32 data, limit, mask; + int ret; + + if (type == PVT_TEMP) { + val = clamp(val, PVT_TEMP_MIN, PVT_TEMP_MAX); + data = pvt_calc_poly(&poly_temp_to_N, val); + } else { + val = clamp(val, PVT_VOLT_MIN, PVT_VOLT_MAX); + data = pvt_calc_poly(&poly_volt_to_N, val); + } + + /* Serialize limit update, since a part of the register is changed. */ + ret = mutex_lock_interruptible(&pvt->iface_mtx); + if (ret) + return ret; + + /* Make sure the upper and lower ranges don't intersect. */ + limit = readl(pvt->regs + pvt_info[type].thres_base); + if (is_low) { + limit = FIELD_GET(PVT_THRES_HI_MASK, limit); + data = clamp_val(data, PVT_DATA_MIN, limit); + data = FIELD_PREP(PVT_THRES_LO_MASK, data); + mask = PVT_THRES_LO_MASK; + } else { + limit = FIELD_GET(PVT_THRES_LO_MASK, limit); + data = clamp_val(data, limit, PVT_DATA_MAX); + data = FIELD_PREP(PVT_THRES_HI_MASK, data); + mask = PVT_THRES_HI_MASK; + } + + pvt_update(pvt->regs + pvt_info[type].thres_base, mask, data); + + mutex_unlock(&pvt->iface_mtx); + + return 0; +} + +static int pvt_read_alarm(struct pvt_hwmon *pvt, enum pvt_sensor_type type, + bool is_low, long *val) +{ + if (is_low) + *val = !!READ_ONCE(pvt->cache[type].thres_sts_lo); + else + *val = !!READ_ONCE(pvt->cache[type].thres_sts_hi); + + return 0; +} + +static const struct hwmon_channel_info *pvt_channel_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_TYPE | HWMON_T_LABEL | + HWMON_T_MIN | HWMON_T_MIN_ALARM | + HWMON_T_MAX | HWMON_T_MAX_ALARM | + HWMON_T_OFFSET), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT | HWMON_I_LABEL | + HWMON_I_MIN | HWMON_I_MIN_ALARM | + HWMON_I_MAX | HWMON_I_MAX_ALARM, + HWMON_I_INPUT | HWMON_I_LABEL | + HWMON_I_MIN | HWMON_I_MIN_ALARM | + HWMON_I_MAX | HWMON_I_MAX_ALARM, + HWMON_I_INPUT | HWMON_I_LABEL | + HWMON_I_MIN | HWMON_I_MIN_ALARM | + HWMON_I_MAX | HWMON_I_MAX_ALARM, + HWMON_I_INPUT | HWMON_I_LABEL | + HWMON_I_MIN | HWMON_I_MIN_ALARM | + HWMON_I_MAX | HWMON_I_MAX_ALARM), + NULL +}; + +#else /* !CONFIG_SENSORS_BT1_PVT_ALARMS */ + +static irqreturn_t pvt_hard_isr(int irq, void *data) +{ + struct pvt_hwmon *pvt = data; + struct pvt_cache *cache; + u32 val; + + /* + * Mask the DVALID interrupt so after exiting from the handler a + * repeated conversion wouldn't happen. + */ + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, + PVT_INTR_DVALID); + + /* + * Nothing special for alarm-less driver. Just read the data, update + * the cache and notify a waiter of this event. + */ + val = readl(pvt->regs + PVT_DATA); + if (!(val & PVT_DATA_VALID)) { + dev_err(pvt->dev, "Got IRQ when data isn't valid\n"); + return IRQ_HANDLED; + } + + cache = &pvt->cache[pvt->sensor]; + + WRITE_ONCE(cache->data, FIELD_GET(PVT_DATA_DATA_MASK, val)); + + complete(&cache->conversion); + + return IRQ_HANDLED; +} + +#define pvt_soft_isr NULL + +inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type) +{ + return 0; +} + +inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type) +{ + return 0; +} + +static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type, + long *val) +{ + struct pvt_cache *cache = &pvt->cache[type]; + u32 data; + int ret; + + /* + * Lock PVT conversion interface until data cache is updated. The + * data read procedure is following: set the requested PVT sensor + * mode, enable IRQ and conversion, wait until conversion is finished, + * then disable conversion and IRQ, and read the cached data. + */ + ret = mutex_lock_interruptible(&pvt->iface_mtx); + if (ret) + return ret; + + pvt->sensor = type; + pvt_set_mode(pvt, pvt_info[type].mode); + + /* + * Unmask the DVALID interrupt and enable the sensors conversions. + * Do the reverse procedure when conversion is done. + */ + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0); + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN); + + wait_for_completion(&cache->conversion); + + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0); + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, + PVT_INTR_DVALID); + + data = READ_ONCE(cache->data); + + mutex_unlock(&pvt->iface_mtx); + + if (type == PVT_TEMP) + *val = pvt_calc_poly(&poly_N_to_temp, data); + else + *val = pvt_calc_poly(&poly_N_to_volt, data); + + return 0; +} + +static int pvt_read_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type, + bool is_low, long *val) +{ + return -EOPNOTSUPP; +} + +static int pvt_write_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type, + bool is_low, long val) +{ + return -EOPNOTSUPP; +} + +static int pvt_read_alarm(struct pvt_hwmon *pvt, enum pvt_sensor_type type, + bool is_low, long *val) +{ + return -EOPNOTSUPP; +} + +static const struct hwmon_channel_info *pvt_channel_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_TYPE | HWMON_T_LABEL | + HWMON_T_OFFSET), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL), + NULL +}; + +#endif /* !CONFIG_SENSORS_BT1_PVT_ALARMS */ + +static inline bool pvt_hwmon_channel_is_valid(enum hwmon_sensor_types type, + int ch) +{ + switch (type) { + case hwmon_temp: + if (ch < 0 || ch >= PVT_TEMP_CHS) + return false; + break; + case hwmon_in: + if (ch < 0 || ch >= PVT_VOLT_CHS) + return false; + break; + default: + break; + } + + /* The rest of the types are independent from the channel number. */ + return true; +} + +static umode_t pvt_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int ch) +{ + if (!pvt_hwmon_channel_is_valid(type, ch)) + return 0; + + switch (type) { + case hwmon_chip: + switch (attr) { + case hwmon_chip_update_interval: + return 0644; + } + break; + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_type: + case hwmon_temp_label: + return 0444; + case hwmon_temp_min: + case hwmon_temp_max: + return pvt_limit_is_visible(ch); + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + return pvt_alarm_is_visible(ch); + case hwmon_temp_offset: + return 0644; + } + break; + case hwmon_in: + switch (attr) { + case hwmon_in_input: + case hwmon_in_label: + return 0444; + case hwmon_in_min: + case hwmon_in_max: + return pvt_limit_is_visible(PVT_VOLT + ch); + case hwmon_in_min_alarm: + case hwmon_in_max_alarm: + return pvt_alarm_is_visible(PVT_VOLT + ch); + } + break; + default: + break; + } + + return 0; +} + +static int pvt_read_trim(struct pvt_hwmon *pvt, long *val) +{ + u32 data; + + data = readl(pvt->regs + PVT_CTRL); + *val = FIELD_GET(PVT_CTRL_TRIM_MASK, data) * PVT_TRIM_STEP; + + return 0; +} + +static int pvt_write_trim(struct pvt_hwmon *pvt, long val) +{ + u32 trim; + int ret; + + /* + * Serialize trim update, since a part of the register is changed and + * the controller is supposed to be disabled during this operation. + */ + ret = mutex_lock_interruptible(&pvt->iface_mtx); + if (ret) + return ret; + + trim = pvt_calc_trim(val); + pvt_set_trim(pvt, trim); + + mutex_unlock(&pvt->iface_mtx); + + return 0; +} + +static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val) +{ + unsigned long rate; + ktime_t kt; + u32 data; + + rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk); + if (!rate) + return -ENODEV; + + /* + * Don't bother with mutex here, since we just read data from MMIO. + * We also have to scale the ticks timeout up to compensate the + * ms-ns-data translations. + */ + data = readl(pvt->regs + PVT_TTIMEOUT) + 1; + + /* + * Calculate ref-clock based delay (Ttotal) between two consecutive + * data samples of the same sensor. So we first must calculate the + * delay introduced by the internal ref-clock timer (Tref * Fclk). + * Then add the constant timeout cuased by each conversion latency + * (Tmin). The basic formulae for each conversion is following: + * Ttotal = Tref * Fclk + Tmin + * Note if alarms are enabled the sensors are polled one after + * another, so in order to have the delay being applicable for each + * sensor the requested value must be equally redistirbuted. + */ +#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS) + kt = ktime_set(PVT_SENSORS_NUM * (u64)data, 0); + kt = ktime_divns(kt, rate); + kt = ktime_add_ns(kt, PVT_SENSORS_NUM * PVT_TOUT_MIN); +#else + kt = ktime_set(data, 0); + kt = ktime_divns(kt, rate); + kt = ktime_add_ns(kt, PVT_TOUT_MIN); +#endif + + /* Return the result in msec as hwmon sysfs interface requires. */ + *val = ktime_to_ms(kt); + + return 0; +} + +static int pvt_write_timeout(struct pvt_hwmon *pvt, long val) +{ + unsigned long rate; + ktime_t kt; + u32 data; + int ret; + + rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk); + if (!rate) + return -ENODEV; + + /* + * If alarms are enabled, the requested timeout must be divided + * between all available sensors to have the requested delay + * applicable to each individual sensor. + */ + kt = ms_to_ktime(val); +#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS) + kt = ktime_divns(kt, PVT_SENSORS_NUM); +#endif + + /* + * Subtract a constant lag, which always persists due to the limited + * PVT sampling rate. Make sure the timeout is not negative. + */ + kt = ktime_sub_ns(kt, PVT_TOUT_MIN); + if (ktime_to_ns(kt) < 0) + kt = ktime_set(0, 0); + + /* + * Finally recalculate the timeout in terms of the reference clock + * period. + */ + data = ktime_divns(kt * rate, NSEC_PER_SEC); + + /* + * Update the measurements delay, but lock the interface first, since + * we have to disable PVT in order to have the new delay actually + * updated. + */ + ret = mutex_lock_interruptible(&pvt->iface_mtx); + if (ret) + return ret; + + pvt_set_tout(pvt, data); + + mutex_unlock(&pvt->iface_mtx); + + return 0; +} + +static int pvt_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int ch, long *val) +{ + struct pvt_hwmon *pvt = dev_get_drvdata(dev); + + if (!pvt_hwmon_channel_is_valid(type, ch)) + return -EINVAL; + + switch (type) { + case hwmon_chip: + switch (attr) { + case hwmon_chip_update_interval: + return pvt_read_timeout(pvt, val); + } + break; + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + return pvt_read_data(pvt, ch, val); + case hwmon_temp_type: + *val = 1; + return 0; + case hwmon_temp_min: + return pvt_read_limit(pvt, ch, true, val); + case hwmon_temp_max: + return pvt_read_limit(pvt, ch, false, val); + case hwmon_temp_min_alarm: + return pvt_read_alarm(pvt, ch, true, val); + case hwmon_temp_max_alarm: + return pvt_read_alarm(pvt, ch, false, val); + case hwmon_temp_offset: + return pvt_read_trim(pvt, val); + } + break; + case hwmon_in: + switch (attr) { + case hwmon_in_input: + return pvt_read_data(pvt, PVT_VOLT + ch, val); + case hwmon_in_min: + return pvt_read_limit(pvt, PVT_VOLT + ch, true, val); + case hwmon_in_max: + return pvt_read_limit(pvt, PVT_VOLT + ch, false, val); + case hwmon_in_min_alarm: + return pvt_read_alarm(pvt, PVT_VOLT + ch, true, val); + case hwmon_in_max_alarm: + return pvt_read_alarm(pvt, PVT_VOLT + ch, false, val); + } + break; + default: + break; + } + + return -EOPNOTSUPP; +} + +static int pvt_hwmon_read_string(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int ch, const char **str) +{ + if (!pvt_hwmon_channel_is_valid(type, ch)) + return -EINVAL; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_label: + *str = pvt_info[ch].label; + return 0; + } + break; + case hwmon_in: + switch (attr) { + case hwmon_in_label: + *str = pvt_info[PVT_VOLT + ch].label; + return 0; + } + break; + default: + break; + } + + return -EOPNOTSUPP; +} + +static int pvt_hwmon_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int ch, long val) +{ + struct pvt_hwmon *pvt = dev_get_drvdata(dev); + + if (!pvt_hwmon_channel_is_valid(type, ch)) + return -EINVAL; + + switch (type) { + case hwmon_chip: + switch (attr) { + case hwmon_chip_update_interval: + return pvt_write_timeout(pvt, val); + } + break; + case hwmon_temp: + switch (attr) { + case hwmon_temp_min: + return pvt_write_limit(pvt, ch, true, val); + case hwmon_temp_max: + return pvt_write_limit(pvt, ch, false, val); + case hwmon_temp_offset: + return pvt_write_trim(pvt, val); + } + break; + case hwmon_in: + switch (attr) { + case hwmon_in_min: + return pvt_write_limit(pvt, PVT_VOLT + ch, true, val); + case hwmon_in_max: + return pvt_write_limit(pvt, PVT_VOLT + ch, false, val); + } + break; + default: + break; + } + + return -EOPNOTSUPP; +} + +static const struct hwmon_ops pvt_hwmon_ops = { + .is_visible = pvt_hwmon_is_visible, + .read = pvt_hwmon_read, + .read_string = pvt_hwmon_read_string, + .write = pvt_hwmon_write +}; + +static const struct hwmon_chip_info pvt_hwmon_info = { + .ops = &pvt_hwmon_ops, + .info = pvt_channel_info +}; + +static void pvt_clear_data(void *data) +{ + struct pvt_hwmon *pvt = data; +#if !defined(CONFIG_SENSORS_BT1_PVT_ALARMS) + int idx; + + for (idx = 0; idx < PVT_SENSORS_NUM; ++idx) + complete_all(&pvt->cache[idx].conversion); +#endif + + mutex_destroy(&pvt->iface_mtx); +} + +static struct pvt_hwmon *pvt_create_data(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct pvt_hwmon *pvt; + int ret, idx; + + pvt = devm_kzalloc(dev, sizeof(*pvt), GFP_KERNEL); + if (!pvt) + return ERR_PTR(-ENOMEM); + + ret = devm_add_action(dev, pvt_clear_data, pvt); + if (ret) { + dev_err(dev, "Can't add PVT data clear action\n"); + return ERR_PTR(ret); + } + + pvt->dev = dev; + pvt->sensor = PVT_SENSOR_FIRST; + mutex_init(&pvt->iface_mtx); + +#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS) + for (idx = 0; idx < PVT_SENSORS_NUM; ++idx) + seqlock_init(&pvt->cache[idx].data_seqlock); +#else + for (idx = 0; idx < PVT_SENSORS_NUM; ++idx) + init_completion(&pvt->cache[idx].conversion); +#endif + + return pvt; +} + +static int pvt_request_regs(struct pvt_hwmon *pvt) +{ + struct platform_device *pdev = to_platform_device(pvt->dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(pvt->dev, "Couldn't find PVT memresource\n"); + return -EINVAL; + } + + pvt->regs = devm_ioremap_resource(pvt->dev, res); + if (IS_ERR(pvt->regs)) { + dev_err(pvt->dev, "Couldn't map PVT registers\n"); + return PTR_ERR(pvt->regs); + } + + return 0; +} + +static void pvt_disable_clks(void *data) +{ + struct pvt_hwmon *pvt = data; + + clk_bulk_disable_unprepare(PVT_CLOCK_NUM, pvt->clks); +} + +static int pvt_request_clks(struct pvt_hwmon *pvt) +{ + int ret; + + pvt->clks[PVT_CLOCK_APB].id = "pclk"; + pvt->clks[PVT_CLOCK_REF].id = "ref"; + + ret = devm_clk_bulk_get(pvt->dev, PVT_CLOCK_NUM, pvt->clks); + if (ret) { + dev_err(pvt->dev, "Couldn't get PVT clocks descriptors\n"); + return ret; + } + + ret = clk_bulk_prepare_enable(PVT_CLOCK_NUM, pvt->clks); + if (ret) { + dev_err(pvt->dev, "Couldn't enable the PVT clocks\n"); + return ret; + } + + ret = devm_add_action_or_reset(pvt->dev, pvt_disable_clks, pvt); + if (ret) { + dev_err(pvt->dev, "Can't add PVT clocks disable action\n"); + return ret; + } + + return 0; +} + +static void pvt_init_iface(struct pvt_hwmon *pvt) +{ + u32 trim, temp; + + /* + * Make sure all interrupts and controller are disabled so not to + * accidentally have ISR executed before the driver data is fully + * initialized. Clear the IRQ status as well. + */ + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_ALL, PVT_INTR_ALL); + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0); + readl(pvt->regs + PVT_CLR_INTR); + readl(pvt->regs + PVT_DATA); + + /* Setup default sensor mode, timeout and temperature trim. */ + pvt_set_mode(pvt, pvt_info[pvt->sensor].mode); + pvt_set_tout(pvt, PVT_TOUT_DEF); + + trim = PVT_TRIM_DEF; + if (!of_property_read_u32(pvt->dev->of_node, + "baikal,pvt-temp-offset-millicelsius", &temp)) + trim = pvt_calc_trim(temp); + + pvt_set_trim(pvt, trim); +} + +static int pvt_request_irq(struct pvt_hwmon *pvt) +{ + struct platform_device *pdev = to_platform_device(pvt->dev); + int ret; + + pvt->irq = platform_get_irq(pdev, 0); + if (pvt->irq < 0) + return pvt->irq; + + ret = devm_request_threaded_irq(pvt->dev, pvt->irq, + pvt_hard_isr, pvt_soft_isr, +#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS) + IRQF_SHARED | IRQF_TRIGGER_HIGH | + IRQF_ONESHOT, +#else + IRQF_SHARED | IRQF_TRIGGER_HIGH, +#endif + "pvt", pvt); + if (ret) { + dev_err(pvt->dev, "Couldn't request PVT IRQ\n"); + return ret; + } + + return 0; +} + +static int pvt_create_hwmon(struct pvt_hwmon *pvt) +{ + pvt->hwmon = devm_hwmon_device_register_with_info(pvt->dev, "pvt", pvt, + &pvt_hwmon_info, NULL); + if (IS_ERR(pvt->hwmon)) { + dev_err(pvt->dev, "Couldn't create hwmon device\n"); + return PTR_ERR(pvt->hwmon); + } + + return 0; +} + +#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS) + +static void pvt_disable_iface(void *data) +{ + struct pvt_hwmon *pvt = data; + + mutex_lock(&pvt->iface_mtx); + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0); + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, + PVT_INTR_DVALID); + mutex_unlock(&pvt->iface_mtx); +} + +static int pvt_enable_iface(struct pvt_hwmon *pvt) +{ + int ret; + + ret = devm_add_action(pvt->dev, pvt_disable_iface, pvt); + if (ret) { + dev_err(pvt->dev, "Can't add PVT disable interface action\n"); + return ret; + } + + /* + * Enable sensors data conversion and IRQ. We need to lock the + * interface mutex since hwmon has just been created and the + * corresponding sysfs files are accessible from user-space, + * which theoretically may cause races. + */ + mutex_lock(&pvt->iface_mtx); + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0); + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN); + mutex_unlock(&pvt->iface_mtx); + + return 0; +} + +#else /* !CONFIG_SENSORS_BT1_PVT_ALARMS */ + +static int pvt_enable_iface(struct pvt_hwmon *pvt) +{ + return 0; +} + +#endif /* !CONFIG_SENSORS_BT1_PVT_ALARMS */ + +static int pvt_probe(struct platform_device *pdev) +{ + struct pvt_hwmon *pvt; + int ret; + + pvt = pvt_create_data(pdev); + if (IS_ERR(pvt)) + return PTR_ERR(pvt); + + ret = pvt_request_regs(pvt); + if (ret) + return ret; + + ret = pvt_request_clks(pvt); + if (ret) + return ret; + + pvt_init_iface(pvt); + + ret = pvt_request_irq(pvt); + if (ret) + return ret; + + ret = pvt_create_hwmon(pvt); + if (ret) + return ret; + + ret = pvt_enable_iface(pvt); + if (ret) + return ret; + + return 0; +} + +static const struct of_device_id pvt_of_match[] = { + { .compatible = "baikal,bt1-pvt" }, + { } +}; +MODULE_DEVICE_TABLE(of, pvt_of_match); + +static struct platform_driver pvt_driver = { + .probe = pvt_probe, + .driver = { + .name = "bt1-pvt", + .of_match_table = pvt_of_match + } +}; +module_platform_driver(pvt_driver); + +MODULE_AUTHOR("Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>"); +MODULE_DESCRIPTION("Baikal-T1 PVT driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/bt1-pvt.h b/drivers/hwmon/bt1-pvt.h new file mode 100644 index 000000000000..5eac73e94885 --- /dev/null +++ b/drivers/hwmon/bt1-pvt.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 Process, Voltage, Temperature sensor driver + */ +#ifndef __HWMON_BT1_PVT_H__ +#define __HWMON_BT1_PVT_H__ + +#include <linux/completion.h> +#include <linux/hwmon.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/seqlock.h> + +/* Baikal-T1 PVT registers and their bitfields */ +#define PVT_CTRL 0x00 +#define PVT_CTRL_EN BIT(0) +#define PVT_CTRL_MODE_FLD 1 +#define PVT_CTRL_MODE_MASK GENMASK(3, PVT_CTRL_MODE_FLD) +#define PVT_CTRL_MODE_TEMP 0x0 +#define PVT_CTRL_MODE_VOLT 0x1 +#define PVT_CTRL_MODE_LVT 0x2 +#define PVT_CTRL_MODE_HVT 0x4 +#define PVT_CTRL_MODE_SVT 0x6 +#define PVT_CTRL_TRIM_FLD 4 +#define PVT_CTRL_TRIM_MASK GENMASK(8, PVT_CTRL_TRIM_FLD) +#define PVT_DATA 0x04 +#define PVT_DATA_VALID BIT(10) +#define PVT_DATA_DATA_FLD 0 +#define PVT_DATA_DATA_MASK GENMASK(9, PVT_DATA_DATA_FLD) +#define PVT_TTHRES 0x08 +#define PVT_VTHRES 0x0C +#define PVT_LTHRES 0x10 +#define PVT_HTHRES 0x14 +#define PVT_STHRES 0x18 +#define PVT_THRES_LO_FLD 0 +#define PVT_THRES_LO_MASK GENMASK(9, PVT_THRES_LO_FLD) +#define PVT_THRES_HI_FLD 10 +#define PVT_THRES_HI_MASK GENMASK(19, PVT_THRES_HI_FLD) +#define PVT_TTIMEOUT 0x1C +#define PVT_INTR_STAT 0x20 +#define PVT_INTR_MASK 0x24 +#define PVT_RAW_INTR_STAT 0x28 +#define PVT_INTR_DVALID BIT(0) +#define PVT_INTR_TTHRES_LO BIT(1) +#define PVT_INTR_TTHRES_HI BIT(2) +#define PVT_INTR_VTHRES_LO BIT(3) +#define PVT_INTR_VTHRES_HI BIT(4) +#define PVT_INTR_LTHRES_LO BIT(5) +#define PVT_INTR_LTHRES_HI BIT(6) +#define PVT_INTR_HTHRES_LO BIT(7) +#define PVT_INTR_HTHRES_HI BIT(8) +#define PVT_INTR_STHRES_LO BIT(9) +#define PVT_INTR_STHRES_HI BIT(10) +#define PVT_INTR_ALL GENMASK(10, 0) +#define PVT_CLR_INTR 0x2C + +/* + * PVT sensors-related limits and default values + * @PVT_TEMP_MIN: Minimal temperature in millidegrees of Celsius. + * @PVT_TEMP_MAX: Maximal temperature in millidegrees of Celsius. + * @PVT_TEMP_CHS: Number of temperature hwmon channels. + * @PVT_VOLT_MIN: Minimal voltage in mV. + * @PVT_VOLT_MAX: Maximal voltage in mV. + * @PVT_VOLT_CHS: Number of voltage hwmon channels. + * @PVT_DATA_MIN: Minimal PVT raw data value. + * @PVT_DATA_MAX: Maximal PVT raw data value. + * @PVT_TRIM_MIN: Minimal temperature sensor trim value. + * @PVT_TRIM_MAX: Maximal temperature sensor trim value. + * @PVT_TRIM_DEF: Default temperature sensor trim value (set a proper value + * when one is determined for Baikal-T1 SoC). + * @PVT_TRIM_TEMP: Maximum temperature encoded by the trim factor. + * @PVT_TRIM_STEP: Temperature stride corresponding to the trim value. + * @PVT_TOUT_MIN: Minimal timeout between samples in nanoseconds. + * @PVT_TOUT_DEF: Default data measurements timeout. In case if alarms are + * activated the PVT IRQ is enabled to be raised after each + * conversion in order to have the thresholds checked and the + * converted value cached. Too frequent conversions may cause + * the system CPU overload. Lets set the 50ms delay between + * them by default to prevent this. + */ +#define PVT_TEMP_MIN -48380L +#define PVT_TEMP_MAX 147438L +#define PVT_TEMP_CHS 1 +#define PVT_VOLT_MIN 620L +#define PVT_VOLT_MAX 1168L +#define PVT_VOLT_CHS 4 +#define PVT_DATA_MIN 0 +#define PVT_DATA_MAX (PVT_DATA_DATA_MASK >> PVT_DATA_DATA_FLD) +#define PVT_TRIM_MIN 0 +#define PVT_TRIM_MAX (PVT_CTRL_TRIM_MASK >> PVT_CTRL_TRIM_FLD) +#define PVT_TRIM_TEMP 7130 +#define PVT_TRIM_STEP (PVT_TRIM_TEMP / PVT_TRIM_MAX) +#define PVT_TRIM_DEF 0 +#define PVT_TOUT_MIN (NSEC_PER_SEC / 3000) +#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS) +# define PVT_TOUT_DEF 60000 +#else +# define PVT_TOUT_DEF 0 +#endif + +/* + * enum pvt_sensor_type - Baikal-T1 PVT sensor types (correspond to each PVT + * sampling mode) + * @PVT_SENSOR*: helpers to traverse the sensors in loops. + * @PVT_TEMP: PVT Temperature sensor. + * @PVT_VOLT: PVT Voltage sensor. + * @PVT_LVT: PVT Low-Voltage threshold sensor. + * @PVT_HVT: PVT High-Voltage threshold sensor. + * @PVT_SVT: PVT Standard-Voltage threshold sensor. + */ +enum pvt_sensor_type { + PVT_SENSOR_FIRST, + PVT_TEMP = PVT_SENSOR_FIRST, + PVT_VOLT, + PVT_LVT, + PVT_HVT, + PVT_SVT, + PVT_SENSOR_LAST = PVT_SVT, + PVT_SENSORS_NUM +}; + +/* + * enum pvt_clock_type - Baikal-T1 PVT clocks. + * @PVT_CLOCK_APB: APB clock. + * @PVT_CLOCK_REF: PVT reference clock. + */ +enum pvt_clock_type { + PVT_CLOCK_APB, + PVT_CLOCK_REF, + PVT_CLOCK_NUM +}; + +/* + * struct pvt_sensor_info - Baikal-T1 PVT sensor informational structure + * @channel: Sensor channel ID. + * @label: hwmon sensor label. + * @mode: PVT mode corresponding to the channel. + * @thres_base: upper and lower threshold values of the sensor. + * @thres_sts_lo: low threshold status bitfield. + * @thres_sts_hi: high threshold status bitfield. + * @type: Sensor type. + * @attr_min_alarm: Min alarm attribute ID. + * @attr_min_alarm: Max alarm attribute ID. + */ +struct pvt_sensor_info { + int channel; + const char *label; + u32 mode; + unsigned long thres_base; + u32 thres_sts_lo; + u32 thres_sts_hi; + enum hwmon_sensor_types type; + u32 attr_min_alarm; + u32 attr_max_alarm; +}; + +#define PVT_SENSOR_INFO(_ch, _label, _type, _mode, _thres) \ + { \ + .channel = _ch, \ + .label = _label, \ + .mode = PVT_CTRL_MODE_ ##_mode, \ + .thres_base = PVT_ ##_thres, \ + .thres_sts_lo = PVT_INTR_ ##_thres## _LO, \ + .thres_sts_hi = PVT_INTR_ ##_thres## _HI, \ + .type = _type, \ + .attr_min_alarm = _type## _min, \ + .attr_max_alarm = _type## _max, \ + } + +/* + * struct pvt_cache - PVT sensors data cache + * @data: data cache in raw format. + * @thres_sts_lo: low threshold status saved on the previous data conversion. + * @thres_sts_hi: high threshold status saved on the previous data conversion. + * @data_seqlock: cached data seq-lock. + * @conversion: data conversion completion. + */ +struct pvt_cache { + u32 data; +#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS) + seqlock_t data_seqlock; + u32 thres_sts_lo; + u32 thres_sts_hi; +#else + struct completion conversion; +#endif +}; + +/* + * struct pvt_hwmon - Baikal-T1 PVT private data + * @dev: device structure of the PVT platform device. + * @hwmon: hwmon device structure. + * @regs: pointer to the Baikal-T1 PVT registers region. + * @irq: PVT events IRQ number. + * @clks: Array of the PVT clocks descriptor (APB/ref clocks). + * @ref_clk: Pointer to the reference clocks descriptor. + * @iface_mtx: Generic interface mutex (used to lock the alarm registers + * when the alarms enabled, or the data conversion interface + * if alarms are disabled). + * @sensor: current PVT sensor the data conversion is being performed for. + * @cache: data cache descriptor. + */ +struct pvt_hwmon { + struct device *dev; + struct device *hwmon; + + void __iomem *regs; + int irq; + + struct clk_bulk_data clks[PVT_CLOCK_NUM]; + + struct mutex iface_mtx; + enum pvt_sensor_type sensor; + struct pvt_cache cache[PVT_SENSORS_NUM]; +}; + +/* + * struct pvt_poly_term - a term descriptor of the PVT data translation + * polynomial + * @deg: degree of the term. + * @coef: multiplication factor of the term. + * @divider: distributed divider per each degree. + * @divider_leftover: divider leftover, which couldn't be redistributed. + */ +struct pvt_poly_term { + unsigned int deg; + long coef; + long divider; + long divider_leftover; +}; + +/* + * struct pvt_poly - PVT data translation polynomial descriptor + * @total_divider: total data divider. + * @terms: polynomial terms up to a free one. + */ +struct pvt_poly { + long total_divider; + struct pvt_poly_term terms[]; +}; + +#endif /* __HWMON_BT1_PVT_H__ */ diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index ab719d372b0d..16be012a95ed 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -1073,13 +1073,6 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = { }, }, { - .ident = "Dell XPS421", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"), - }, - }, - { .ident = "Dell Studio", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), @@ -1088,14 +1081,6 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = { .driver_data = (void *)&i8k_config_data[DELL_STUDIO], }, { - .ident = "Dell XPS 13", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS13"), - }, - .driver_data = (void *)&i8k_config_data[DELL_XPS], - }, - { .ident = "Dell XPS M140", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), @@ -1104,17 +1089,10 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = { .driver_data = (void *)&i8k_config_data[DELL_XPS], }, { - .ident = "Dell XPS 15 9560", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9560"), - }, - }, - { - .ident = "Dell XPS 15 9570", + .ident = "Dell XPS", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS"), }, }, { } diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c new file mode 100644 index 000000000000..2137bc65829d --- /dev/null +++ b/drivers/hwmon/gsc-hwmon.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Gateworks System Controller Hardware Monitor module + * + * Copyright (C) 2020 Gateworks Corporation + */ +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/mfd/gsc.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#include <linux/platform_data/gsc_hwmon.h> + +#define GSC_HWMON_MAX_TEMP_CH 16 +#define GSC_HWMON_MAX_IN_CH 16 + +#define GSC_HWMON_RESOLUTION 12 +#define GSC_HWMON_VREF 2500 + +struct gsc_hwmon_data { + struct gsc_dev *gsc; + struct gsc_hwmon_platform_data *pdata; + struct regmap *regmap; + const struct gsc_hwmon_channel *temp_ch[GSC_HWMON_MAX_TEMP_CH]; + const struct gsc_hwmon_channel *in_ch[GSC_HWMON_MAX_IN_CH]; + u32 temp_config[GSC_HWMON_MAX_TEMP_CH + 1]; + u32 in_config[GSC_HWMON_MAX_IN_CH + 1]; + struct hwmon_channel_info temp_info; + struct hwmon_channel_info in_info; + const struct hwmon_channel_info *info[3]; + struct hwmon_chip_info chip; +}; + +static struct regmap_bus gsc_hwmon_regmap_bus = { + .reg_read = gsc_read, + .reg_write = gsc_write, +}; + +static const struct regmap_config gsc_hwmon_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_NONE, +}; + +static ssize_t pwm_auto_point_temp_show(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + u8 reg = hwmon->pdata->fan_base + (2 * attr->index); + u8 regs[2]; + int ret; + + ret = regmap_bulk_read(hwmon->regmap, reg, regs, 2); + if (ret) + return ret; + + ret = regs[0] | regs[1] << 8; + return sprintf(buf, "%d\n", ret * 10); +} + +static ssize_t pwm_auto_point_temp_store(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + u8 reg = hwmon->pdata->fan_base + (2 * attr->index); + u8 regs[2]; + long temp; + int err; + + if (kstrtol(buf, 10, &temp)) + return -EINVAL; + + temp = clamp_val(temp, 0, 10000); + temp = DIV_ROUND_CLOSEST(temp, 10); + + regs[0] = temp & 0xff; + regs[1] = (temp >> 8) & 0xff; + err = regmap_bulk_write(hwmon->regmap, reg, regs, 2); + if (err) + return err; + + return count; +} + +static ssize_t pwm_auto_point_pwm_show(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + + return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)) / 100); +} + +static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point1_pwm, pwm_auto_point_pwm, 0); +static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point1_temp, pwm_auto_point_temp, 0); + +static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point2_pwm, pwm_auto_point_pwm, 1); +static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point2_temp, pwm_auto_point_temp, 1); + +static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point3_pwm, pwm_auto_point_pwm, 2); +static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point3_temp, pwm_auto_point_temp, 2); + +static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point4_pwm, pwm_auto_point_pwm, 3); +static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point4_temp, pwm_auto_point_temp, 3); + +static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point5_pwm, pwm_auto_point_pwm, 4); +static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point5_temp, pwm_auto_point_temp, 4); + +static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point6_pwm, pwm_auto_point_pwm, 5); +static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point6_temp, pwm_auto_point_temp, 5); + +static struct attribute *gsc_hwmon_attributes[] = { + &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr, + NULL +}; + +static const struct attribute_group gsc_hwmon_group = { + .attrs = gsc_hwmon_attributes, +}; +__ATTRIBUTE_GROUPS(gsc_hwmon); + +static int +gsc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long *val) +{ + struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev); + const struct gsc_hwmon_channel *ch; + int sz, ret; + long tmp; + u8 buf[3]; + + switch (type) { + case hwmon_in: + ch = hwmon->in_ch[channel]; + break; + case hwmon_temp: + ch = hwmon->temp_ch[channel]; + break; + default: + return -EOPNOTSUPP; + } + + sz = (ch->mode == mode_voltage) ? 3 : 2; + ret = regmap_bulk_read(hwmon->regmap, ch->reg, buf, sz); + if (ret) + return ret; + + tmp = 0; + while (sz-- > 0) + tmp |= (buf[sz] << (8 * sz)); + + switch (ch->mode) { + case mode_temperature: + if (tmp > 0x8000) + tmp -= 0xffff; + break; + case mode_voltage_raw: + tmp = clamp_val(tmp, 0, BIT(GSC_HWMON_RESOLUTION)); + /* scale based on ref voltage and ADC resolution */ + tmp *= GSC_HWMON_VREF; + tmp >>= GSC_HWMON_RESOLUTION; + /* scale based on optional voltage divider */ + if (ch->vdiv[0] && ch->vdiv[1]) { + tmp *= (ch->vdiv[0] + ch->vdiv[1]); + tmp /= ch->vdiv[1]; + } + /* adjust by uV offset */ + tmp += ch->mvoffset; + break; + case mode_voltage: + /* no adjustment needed */ + break; + } + + *val = tmp; + + return 0; +} + +static int +gsc_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, const char **buf) +{ + struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev); + + switch (type) { + case hwmon_in: + *buf = hwmon->in_ch[channel]->name; + break; + case hwmon_temp: + *buf = hwmon->temp_ch[channel]->name; + break; + default: + return -ENOTSUPP; + } + + return 0; +} + +static umode_t +gsc_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr, + int ch) +{ + return 0444; +} + +static const struct hwmon_ops gsc_hwmon_ops = { + .is_visible = gsc_hwmon_is_visible, + .read = gsc_hwmon_read, + .read_string = gsc_hwmon_read_string, +}; + +static struct gsc_hwmon_platform_data * +gsc_hwmon_get_devtree_pdata(struct device *dev) +{ + struct gsc_hwmon_platform_data *pdata; + struct gsc_hwmon_channel *ch; + struct fwnode_handle *child; + struct device_node *fan; + int nchannels; + + nchannels = device_get_child_node_count(dev); + if (nchannels == 0) + return ERR_PTR(-ENODEV); + + pdata = devm_kzalloc(dev, + sizeof(*pdata) + nchannels * sizeof(*ch), + GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + ch = (struct gsc_hwmon_channel *)(pdata + 1); + pdata->channels = ch; + pdata->nchannels = nchannels; + + /* fan controller base address */ + fan = of_find_compatible_node(dev->parent->of_node, NULL, "gw,gsc-fan"); + if (fan && of_property_read_u32(fan, "reg", &pdata->fan_base)) { + dev_err(dev, "fan node without base\n"); + return ERR_PTR(-EINVAL); + } + + /* allocate structures for channels and count instances of each type */ + device_for_each_child_node(dev, child) { + if (fwnode_property_read_string(child, "label", &ch->name)) { + dev_err(dev, "channel without label\n"); + fwnode_handle_put(child); + return ERR_PTR(-EINVAL); + } + if (fwnode_property_read_u32(child, "reg", &ch->reg)) { + dev_err(dev, "channel without reg\n"); + fwnode_handle_put(child); + return ERR_PTR(-EINVAL); + } + if (fwnode_property_read_u32(child, "gw,mode", &ch->mode)) { + dev_err(dev, "channel without mode\n"); + fwnode_handle_put(child); + return ERR_PTR(-EINVAL); + } + if (ch->mode > mode_max) { + dev_err(dev, "invalid channel mode\n"); + fwnode_handle_put(child); + return ERR_PTR(-EINVAL); + } + + if (!fwnode_property_read_u32(child, + "gw,voltage-offset-microvolt", + &ch->mvoffset)) + ch->mvoffset /= 1000; + fwnode_property_read_u32_array(child, + "gw,voltage-divider-ohms", + ch->vdiv, ARRAY_SIZE(ch->vdiv)); + ch++; + } + + return pdata; +} + +static int gsc_hwmon_probe(struct platform_device *pdev) +{ + struct gsc_dev *gsc = dev_get_drvdata(pdev->dev.parent); + struct device *dev = &pdev->dev; + struct device *hwmon_dev; + struct gsc_hwmon_platform_data *pdata = dev_get_platdata(dev); + struct gsc_hwmon_data *hwmon; + const struct attribute_group **groups; + int i, i_in, i_temp; + + if (!pdata) { + pdata = gsc_hwmon_get_devtree_pdata(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } + + hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL); + if (!hwmon) + return -ENOMEM; + hwmon->gsc = gsc; + hwmon->pdata = pdata; + + hwmon->regmap = devm_regmap_init(dev, &gsc_hwmon_regmap_bus, + gsc->i2c_hwmon, + &gsc_hwmon_regmap_config); + if (IS_ERR(hwmon->regmap)) + return PTR_ERR(hwmon->regmap); + + for (i = 0, i_in = 0, i_temp = 0; i < hwmon->pdata->nchannels; i++) { + const struct gsc_hwmon_channel *ch = &pdata->channels[i]; + + switch (ch->mode) { + case mode_temperature: + if (i_temp == GSC_HWMON_MAX_TEMP_CH) { + dev_err(gsc->dev, "too many temp channels\n"); + return -EINVAL; + } + hwmon->temp_ch[i_temp] = ch; + hwmon->temp_config[i_temp] = HWMON_T_INPUT | + HWMON_T_LABEL; + i_temp++; + break; + case mode_voltage: + case mode_voltage_raw: + if (i_in == GSC_HWMON_MAX_IN_CH) { + dev_err(gsc->dev, "too many input channels\n"); + return -EINVAL; + } + hwmon->in_ch[i_in] = ch; + hwmon->in_config[i_in] = + HWMON_I_INPUT | HWMON_I_LABEL; + i_in++; + break; + default: + dev_err(gsc->dev, "invalid mode: %d\n", ch->mode); + return -EINVAL; + } + } + + /* setup config structures */ + hwmon->chip.ops = &gsc_hwmon_ops; + hwmon->chip.info = hwmon->info; + hwmon->info[0] = &hwmon->temp_info; + hwmon->info[1] = &hwmon->in_info; + hwmon->temp_info.type = hwmon_temp; + hwmon->temp_info.config = hwmon->temp_config; + hwmon->in_info.type = hwmon_in; + hwmon->in_info.config = hwmon->in_config; + + groups = pdata->fan_base ? gsc_hwmon_groups : NULL; + hwmon_dev = devm_hwmon_device_register_with_info(dev, + KBUILD_MODNAME, hwmon, + &hwmon->chip, groups); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct of_device_id gsc_hwmon_of_match[] = { + { .compatible = "gw,gsc-adc", }, + {} +}; + +static struct platform_driver gsc_hwmon_driver = { + .driver = { + .name = "gsc-hwmon", + .of_match_table = gsc_hwmon_of_match, + }, + .probe = gsc_hwmon_probe, +}; + +module_platform_driver(gsc_hwmon_driver); + +MODULE_AUTHOR("Tim Harvey <tharvey@gateworks.com>"); +MODULE_DESCRIPTION("GSC hardware monitor driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 6a30fb453f7a..3f596a5328da 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -15,6 +15,7 @@ #include <linux/gfp.h> #include <linux/hwmon.h> #include <linux/idr.h> +#include <linux/list.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> @@ -31,7 +32,7 @@ struct hwmon_device { const char *name; struct device dev; const struct hwmon_chip_info *chip; - + struct list_head tzdata; struct attribute_group group; const struct attribute_group **groups; }; @@ -55,12 +56,12 @@ struct hwmon_device_attribute { /* * Thermal zone information - * In addition to the reference to the hwmon device, - * also provides the sensor index. */ struct hwmon_thermal_data { + struct list_head node; /* hwmon tzdata list entry */ struct device *dev; /* Reference to hwmon device */ int index; /* sensor index */ + struct thermal_zone_device *tzd;/* thermal zone device */ }; static ssize_t @@ -156,10 +157,17 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = { .get_temp = hwmon_thermal_get_temp, }; +static void hwmon_thermal_remove_sensor(void *data) +{ + list_del(data); +} + static int hwmon_thermal_add_sensor(struct device *dev, int index) { + struct hwmon_device *hwdev = to_hwmon_device(dev); struct hwmon_thermal_data *tdata; struct thermal_zone_device *tzd; + int err; tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL); if (!tdata) @@ -177,13 +185,68 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index) if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV)) return PTR_ERR(tzd); + err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node); + if (err) + return err; + + tdata->tzd = tzd; + list_add(&tdata->node, &hwdev->tzdata); + return 0; } + +static int hwmon_thermal_register_sensors(struct device *dev) +{ + struct hwmon_device *hwdev = to_hwmon_device(dev); + const struct hwmon_chip_info *chip = hwdev->chip; + const struct hwmon_channel_info **info = chip->info; + void *drvdata = dev_get_drvdata(dev); + int i; + + for (i = 1; info[i]; i++) { + int j; + + if (info[i]->type != hwmon_temp) + continue; + + for (j = 0; info[i]->config[j]; j++) { + int err; + + if (!(info[i]->config[j] & HWMON_T_INPUT) || + !chip->ops->is_visible(drvdata, hwmon_temp, + hwmon_temp_input, j)) + continue; + + err = hwmon_thermal_add_sensor(dev, j); + if (err) + return err; + } + } + + return 0; +} + +static void hwmon_thermal_notify(struct device *dev, int index) +{ + struct hwmon_device *hwdev = to_hwmon_device(dev); + struct hwmon_thermal_data *tzdata; + + list_for_each_entry(tzdata, &hwdev->tzdata, node) { + if (tzdata->index == index) { + thermal_zone_device_update(tzdata->tzd, + THERMAL_EVENT_UNSPECIFIED); + } + } +} + #else -static int hwmon_thermal_add_sensor(struct device *dev, int index) +static int hwmon_thermal_register_sensors(struct device *dev) { return 0; } + +static void hwmon_thermal_notify(struct device *dev, int index) { } + #endif /* IS_REACHABLE(CONFIG_THERMAL) && ... */ static int hwmon_attr_base(enum hwmon_sensor_types type) @@ -511,6 +574,35 @@ static const int __templates_size[] = { [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates), }; +int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + char sattr[MAX_SYSFS_ATTR_NAME_LENGTH]; + const char * const *templates; + const char *template; + int base; + + if (type >= ARRAY_SIZE(__templates)) + return -EINVAL; + if (attr >= __templates_size[type]) + return -EINVAL; + + templates = __templates[type]; + template = templates[attr]; + + base = hwmon_attr_base(type); + + scnprintf(sattr, MAX_SYSFS_ATTR_NAME_LENGTH, template, base + channel); + sysfs_notify(&dev->kobj, NULL, sattr); + kobject_uevent(&dev->kobj, KOBJ_CHANGE); + + if (type == hwmon_temp) + hwmon_thermal_notify(dev, channel); + + return 0; +} +EXPORT_SYMBOL_GPL(hwmon_notify_event); + static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info) { int i, n; @@ -596,7 +688,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, { struct hwmon_device *hwdev; struct device *hdev; - int i, j, err, id; + int i, err, id; /* Complain about invalid characters in hwmon name attribute */ if (name && (!strlen(name) || strpbrk(name, "-* \t\n"))) @@ -661,33 +753,19 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, if (err) goto free_hwmon; + INIT_LIST_HEAD(&hwdev->tzdata); + if (dev && dev->of_node && chip && chip->ops->read && chip->info[0]->type == hwmon_chip && (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) { - const struct hwmon_channel_info **info = chip->info; - - for (i = 1; info[i]; i++) { - if (info[i]->type != hwmon_temp) - continue; - - for (j = 0; info[i]->config[j]; j++) { - if (!chip->ops->is_visible(drvdata, hwmon_temp, - hwmon_temp_input, j)) - continue; - if (info[i]->config[j] & HWMON_T_INPUT) { - err = hwmon_thermal_add_sensor(hdev, j); - if (err) { - device_unregister(hdev); - /* - * Don't worry about hwdev; - * hwmon_dev_release(), called - * from device_unregister(), - * will free it. - */ - goto ida_remove; - } - } - } + err = hwmon_thermal_register_sensors(hdev); + if (err) { + device_unregister(hdev); + /* + * Don't worry about hwdev; hwmon_dev_release(), called + * from device_unregister(), will free it. + */ + goto ida_remove; } } diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e9e78c0b7212..55d474ec7c35 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -74,6 +74,17 @@ #define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9) #define INA226_SHIFT_AVG(val) ((val) << 9) +/* bit number of alert functions in Mask/Enable Register */ +#define INA226_SHUNT_OVER_VOLTAGE_BIT 15 +#define INA226_SHUNT_UNDER_VOLTAGE_BIT 14 +#define INA226_BUS_OVER_VOLTAGE_BIT 13 +#define INA226_BUS_UNDER_VOLTAGE_BIT 12 +#define INA226_POWER_OVER_LIMIT_BIT 11 + +/* bit mask for alert config bits of Mask/Enable Register */ +#define INA226_ALERT_CONFIG_MASK 0xFC00 +#define INA226_ALERT_FUNCTION_FLAG BIT(4) + /* common attrs, ina226 attrs and NULL */ #define INA2XX_MAX_ATTRIBUTE_GROUPS 3 @@ -303,6 +314,145 @@ static ssize_t ina2xx_value_show(struct device *dev, ina2xx_get_value(data, attr->index, regval)); } +static int ina226_reg_to_alert(struct ina2xx_data *data, u8 bit, u16 regval) +{ + int reg; + + switch (bit) { + case INA226_SHUNT_OVER_VOLTAGE_BIT: + case INA226_SHUNT_UNDER_VOLTAGE_BIT: + reg = INA2XX_SHUNT_VOLTAGE; + break; + case INA226_BUS_OVER_VOLTAGE_BIT: + case INA226_BUS_UNDER_VOLTAGE_BIT: + reg = INA2XX_BUS_VOLTAGE; + break; + case INA226_POWER_OVER_LIMIT_BIT: + reg = INA2XX_POWER; + break; + default: + /* programmer goofed */ + WARN_ON_ONCE(1); + return 0; + } + + return ina2xx_get_value(data, reg, regval); +} + +/* + * Turns alert limit values into register values. + * Opposite of the formula in ina2xx_get_value(). + */ +static s16 ina226_alert_to_reg(struct ina2xx_data *data, u8 bit, int val) +{ + switch (bit) { + case INA226_SHUNT_OVER_VOLTAGE_BIT: + case INA226_SHUNT_UNDER_VOLTAGE_BIT: + val *= data->config->shunt_div; + return clamp_val(val, SHRT_MIN, SHRT_MAX); + case INA226_BUS_OVER_VOLTAGE_BIT: + case INA226_BUS_UNDER_VOLTAGE_BIT: + val = (val * 1000) << data->config->bus_voltage_shift; + val = DIV_ROUND_CLOSEST(val, data->config->bus_voltage_lsb); + return clamp_val(val, 0, SHRT_MAX); + case INA226_POWER_OVER_LIMIT_BIT: + val = DIV_ROUND_CLOSEST(val, data->power_lsb_uW); + return clamp_val(val, 0, USHRT_MAX); + default: + /* programmer goofed */ + WARN_ON_ONCE(1); + return 0; + } +} + +static ssize_t ina226_alert_show(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct ina2xx_data *data = dev_get_drvdata(dev); + int regval; + int val = 0; + int ret; + + mutex_lock(&data->config_lock); + ret = regmap_read(data->regmap, INA226_MASK_ENABLE, ®val); + if (ret) + goto abort; + + if (regval & BIT(attr->index)) { + ret = regmap_read(data->regmap, INA226_ALERT_LIMIT, ®val); + if (ret) + goto abort; + val = ina226_reg_to_alert(data, attr->index, regval); + } + + ret = snprintf(buf, PAGE_SIZE, "%d\n", val); +abort: + mutex_unlock(&data->config_lock); + return ret; +} + +static ssize_t ina226_alert_store(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct ina2xx_data *data = dev_get_drvdata(dev); + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) + return ret; + + /* + * Clear all alerts first to avoid accidentally triggering ALERT pin + * due to register write sequence. Then, only enable the alert + * if the value is non-zero. + */ + mutex_lock(&data->config_lock); + ret = regmap_update_bits(data->regmap, INA226_MASK_ENABLE, + INA226_ALERT_CONFIG_MASK, 0); + if (ret < 0) + goto abort; + + ret = regmap_write(data->regmap, INA226_ALERT_LIMIT, + ina226_alert_to_reg(data, attr->index, val)); + if (ret < 0) + goto abort; + + if (val != 0) { + ret = regmap_update_bits(data->regmap, INA226_MASK_ENABLE, + INA226_ALERT_CONFIG_MASK, + BIT(attr->index)); + if (ret < 0) + goto abort; + } + + ret = count; +abort: + mutex_unlock(&data->config_lock); + return ret; +} + +static ssize_t ina226_alarm_show(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct ina2xx_data *data = dev_get_drvdata(dev); + int regval; + int alarm = 0; + int ret; + + ret = regmap_read(data->regmap, INA226_MASK_ENABLE, ®val); + if (ret) + return ret; + + alarm = (regval & BIT(attr->index)) && + (regval & INA226_ALERT_FUNCTION_FLAG); + return snprintf(buf, PAGE_SIZE, "%d\n", alarm); +} + /* * In order to keep calibration register value fixed, the product * of current_lsb and shunt_resistor should also be fixed and equal @@ -392,15 +542,38 @@ static ssize_t ina226_interval_show(struct device *dev, /* shunt voltage */ static SENSOR_DEVICE_ATTR_RO(in0_input, ina2xx_value, INA2XX_SHUNT_VOLTAGE); +/* shunt voltage over/under voltage alert setting and alarm */ +static SENSOR_DEVICE_ATTR_RW(in0_crit, ina226_alert, + INA226_SHUNT_OVER_VOLTAGE_BIT); +static SENSOR_DEVICE_ATTR_RW(in0_lcrit, ina226_alert, + INA226_SHUNT_UNDER_VOLTAGE_BIT); +static SENSOR_DEVICE_ATTR_RO(in0_crit_alarm, ina226_alarm, + INA226_SHUNT_OVER_VOLTAGE_BIT); +static SENSOR_DEVICE_ATTR_RO(in0_lcrit_alarm, ina226_alarm, + INA226_SHUNT_UNDER_VOLTAGE_BIT); /* bus voltage */ static SENSOR_DEVICE_ATTR_RO(in1_input, ina2xx_value, INA2XX_BUS_VOLTAGE); +/* bus voltage over/under voltage alert setting and alarm */ +static SENSOR_DEVICE_ATTR_RW(in1_crit, ina226_alert, + INA226_BUS_OVER_VOLTAGE_BIT); +static SENSOR_DEVICE_ATTR_RW(in1_lcrit, ina226_alert, + INA226_BUS_UNDER_VOLTAGE_BIT); +static SENSOR_DEVICE_ATTR_RO(in1_crit_alarm, ina226_alarm, + INA226_BUS_OVER_VOLTAGE_BIT); +static SENSOR_DEVICE_ATTR_RO(in1_lcrit_alarm, ina226_alarm, + INA226_BUS_UNDER_VOLTAGE_BIT); /* calculated current */ static SENSOR_DEVICE_ATTR_RO(curr1_input, ina2xx_value, INA2XX_CURRENT); /* calculated power */ static SENSOR_DEVICE_ATTR_RO(power1_input, ina2xx_value, INA2XX_POWER); +/* over-limit power alert setting and alarm */ +static SENSOR_DEVICE_ATTR_RW(power1_crit, ina226_alert, + INA226_POWER_OVER_LIMIT_BIT); +static SENSOR_DEVICE_ATTR_RO(power1_crit_alarm, ina226_alarm, + INA226_POWER_OVER_LIMIT_BIT); /* shunt resistance */ static SENSOR_DEVICE_ATTR_RW(shunt_resistor, ina2xx_shunt, INA2XX_CALIBRATION); @@ -423,6 +596,16 @@ static const struct attribute_group ina2xx_group = { }; static struct attribute *ina226_attrs[] = { + &sensor_dev_attr_in0_crit.dev_attr.attr, + &sensor_dev_attr_in0_lcrit.dev_attr.attr, + &sensor_dev_attr_in0_crit_alarm.dev_attr.attr, + &sensor_dev_attr_in0_lcrit_alarm.dev_attr.attr, + &sensor_dev_attr_in1_crit.dev_attr.attr, + &sensor_dev_attr_in1_lcrit.dev_attr.attr, + &sensor_dev_attr_in1_crit_alarm.dev_attr.attr, + &sensor_dev_attr_in1_lcrit_alarm.dev_attr.attr, + &sensor_dev_attr_power1_crit.dev_attr.attr, + &sensor_dev_attr_power1_crit_alarm.dev_attr.attr, &sensor_dev_attr_update_interval.dev_attr.attr, NULL, }; diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c index 4122e59f0bb4..ae2b84263a44 100644 --- a/drivers/hwmon/lm70.c +++ b/drivers/hwmon/lm70.c @@ -25,7 +25,7 @@ #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/of_device.h> - +#include <linux/acpi.h> #define DRVNAME "lm70" @@ -148,18 +148,50 @@ static const struct of_device_id lm70_of_ids[] = { MODULE_DEVICE_TABLE(of, lm70_of_ids); #endif +#ifdef CONFIG_ACPI +static const struct acpi_device_id lm70_acpi_ids[] = { + { + .id = "LM000070", + .driver_data = LM70_CHIP_LM70, + }, + { + .id = "TMP00121", + .driver_data = LM70_CHIP_TMP121, + }, + { + .id = "LM000071", + .driver_data = LM70_CHIP_LM71, + }, + { + .id = "LM000074", + .driver_data = LM70_CHIP_LM74, + }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, lm70_acpi_ids); +#endif + static int lm70_probe(struct spi_device *spi) { - const struct of_device_id *match; + const struct of_device_id *of_match; struct device *hwmon_dev; struct lm70 *p_lm70; int chip; - match = of_match_device(lm70_of_ids, &spi->dev); - if (match) - chip = (int)(uintptr_t)match->data; - else - chip = spi_get_device_id(spi)->driver_data; + of_match = of_match_device(lm70_of_ids, &spi->dev); + if (of_match) + chip = (int)(uintptr_t)of_match->data; + else { +#ifdef CONFIG_ACPI + const struct acpi_device_id *acpi_match; + + acpi_match = acpi_match_device(lm70_acpi_ids, &spi->dev); + if (acpi_match) + chip = (int)(uintptr_t)acpi_match->driver_data; + else +#endif + chip = spi_get_device_id(spi)->driver_data; + } /* signaling is SPI_MODE_0 */ if (spi->mode & (SPI_CPOL | SPI_CPHA)) @@ -195,6 +227,7 @@ static struct spi_driver lm70_driver = { .driver = { .name = "lm70", .of_match_table = of_match_ptr(lm70_of_ids), + .acpi_match_table = ACPI_PTR(lm70_acpi_ids), }, .id_table = lm70_ids, .probe = lm70_probe, diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index 5e6392294c03..ba0be48aeadd 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -797,8 +797,10 @@ static int lm75_detect(struct i2c_client *new_client, /* First check for LM75A */ if (i2c_smbus_read_byte_data(new_client, 7) == LM75A_ID) { - /* LM75A returns 0xff on unused registers so - just to be sure we check for that too. */ + /* + * LM75A returns 0xff on unused registers so + * just to be sure we check for that too. + */ if (i2c_smbus_read_byte_data(new_client, 4) != 0xff || i2c_smbus_read_byte_data(new_client, 5) != 0xff || i2c_smbus_read_byte_data(new_client, 6) != 0xff) @@ -849,6 +851,7 @@ static int lm75_suspend(struct device *dev) { int status; struct i2c_client *client = to_i2c_client(dev); + status = i2c_smbus_read_byte_data(client, LM75_REG_CONF); if (status < 0) { dev_dbg(&client->dev, "Can't read config? %d\n", status); @@ -863,6 +866,7 @@ static int lm75_resume(struct device *dev) { int status; struct i2c_client *client = to_i2c_client(dev); + status = i2c_smbus_read_byte_data(client, LM75_REG_CONF); if (status < 0) { dev_dbg(&client->dev, "Can't read config? %d\n", status); diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h index b614e6328566..a398171162a8 100644 --- a/drivers/hwmon/lm75.h +++ b/drivers/hwmon/lm75.h @@ -1,17 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - lm75.h - Part of lm_sensors, Linux kernel modules for hardware - monitoring - Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com> - -*/ + * lm75.h - Part of lm_sensors, Linux kernel modules for hardware monitoring + * Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com> + */ /* - This file contains common code for encoding/decoding LM75 type - temperature readings, which are emulated by many of the chips - we support. As the user is unlikely to load more than one driver - which contains this code, we don't worry about the wasted space. -*/ + * This file contains common code for encoding/decoding LM75 type + * temperature readings, which are emulated by many of the chips + * we support. As the user is unlikely to load more than one driver + * which contains this code, we don't worry about the wasted space. + */ #include <linux/kernel.h> @@ -20,18 +18,23 @@ #define LM75_TEMP_MAX 125000 #define LM75_SHUTDOWN 0x01 -/* TEMP: 0.001C/bit (-55C to +125C) - REG: (0.5C/bit, two's complement) << 7 */ +/* + * TEMP: 0.001C/bit (-55C to +125C) + * REG: (0.5C/bit, two's complement) << 7 + */ static inline u16 LM75_TEMP_TO_REG(long temp) { int ntemp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX); + ntemp += (ntemp < 0 ? -250 : 250); return (u16)((ntemp / 500) << 7); } static inline int LM75_TEMP_FROM_REG(u16 reg) { - /* use integer division instead of equivalent right shift to - guarantee arithmetic shift and preserve the sign */ + /* + * use integer division instead of equivalent right shift to + * guarantee arithmetic shift and preserve the sign + */ return ((s16)reg / 128) * 500; } diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 9b3c9f390ef8..7bdc664af55b 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -35,6 +35,14 @@ * explicitly as max6659, or if its address is not 0x4c. * These chips lack the remote temperature offset feature. * + * This driver also supports the MAX6654 chip made by Maxim. This chip can + * be at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is + * otherwise similar to MAX6657/MAX6658/MAX6659. Extended range is available + * by setting the configuration register accordingly, and is done during + * initialization. Extended precision is only available at conversion rates + * of 1 Hz and slower. Note that extended precision is not enabled by + * default, as this driver initializes all chips to 2 Hz by design. + * * This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and * MAX6692 chips made by Maxim. These are again similar to the LM86, * but they use unsigned temperature values and can report temperatures @@ -94,8 +102,8 @@ * have address 0x4d. * MAX6647 has address 0x4e. * MAX6659 can have address 0x4c, 0x4d or 0x4e. - * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, - * 0x4c, 0x4d or 0x4e. + * MAX6654, MAX6680, and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, + * 0x2a, 0x2b, 0x4c, 0x4d or 0x4e. * SA56004 can have address 0x48 through 0x4F. */ @@ -104,7 +112,7 @@ static const unsigned short normal_i2c[] = { 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, - max6646, w83l771, max6696, sa56004, g781, tmp451 }; + max6646, w83l771, max6696, sa56004, g781, tmp451, max6654 }; /* * The LM90 registers @@ -145,7 +153,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, #define LM90_REG_R_TCRIT_HYST 0x21 #define LM90_REG_W_TCRIT_HYST 0x21 -/* MAX6646/6647/6649/6657/6658/6659/6695/6696 registers */ +/* MAX6646/6647/6649/6654/6657/6658/6659/6695/6696 registers */ #define MAX6657_REG_R_LOCAL_TEMPL 0x11 #define MAX6696_REG_R_STATUS2 0x12 @@ -209,6 +217,7 @@ static const struct i2c_device_id lm90_id[] = { { "max6646", max6646 }, { "max6647", max6646 }, { "max6649", max6646 }, + { "max6654", max6654 }, { "max6657", max6657 }, { "max6658", max6657 }, { "max6659", max6659 }, @@ -270,6 +279,10 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = { .data = (void *)max6646 }, { + .compatible = "dallas,max6654", + .data = (void *)max6654 + }, + { .compatible = "dallas,max6657", .data = (void *)max6657 }, @@ -367,6 +380,11 @@ static const struct lm90_params lm90_params[] = { .max_convrate = 6, .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, }, + [max6654] = { + .alert_alarms = 0x7c, + .max_convrate = 7, + .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, + }, [max6657] = { .flags = LM90_PAUSE_FOR_CONFIG, .alert_alarms = 0x7c, @@ -1557,6 +1575,16 @@ static int lm90_detect(struct i2c_client *client, && (config1 & 0x3f) == 0x00 && convrate <= 0x07) { name = "max6646"; + } else + /* + * The chip_id of the MAX6654 holds the revision of the chip. + * The lowest 3 bits of the config1 register are unused and + * should return zero when read. + */ + if (chip_id == 0x08 + && (config1 & 0x07) == 0x00 + && convrate <= 0x07) { + name = "max6654"; } } else if (address == 0x4C @@ -1661,6 +1689,15 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data) config |= 0x18; /* + * Put MAX6654 into extended range (0x20, extend minimum range from + * 0 degrees to -64 degrees). Note that extended resolution is not + * possible on the MAX6654 unless conversion rate is set to 1 Hz or + * slower, which is intentionally not done by default. + */ + if (data->kind == max6654) + config |= 0x20; + + /* * Select external channel 0 for max6695/96 */ if (data->kind == max6696) diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 7efa6bfef060..e7e1ddc1d631 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -2047,7 +2047,7 @@ store_temp_beep(struct device *dev, struct device_attribute *attr, static umode_t nct6775_in_is_visible(struct kobject *kobj, struct attribute *attr, int index) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct nct6775_data *data = dev_get_drvdata(dev); int in = index / 5; /* voltage index */ @@ -2253,7 +2253,7 @@ store_fan_pulses(struct device *dev, struct device_attribute *attr, static umode_t nct6775_fan_is_visible(struct kobject *kobj, struct attribute *attr, int index) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct nct6775_data *data = dev_get_drvdata(dev); int fan = index / 6; /* fan index */ int nr = index % 6; /* attribute index */ @@ -2440,7 +2440,7 @@ store_temp_type(struct device *dev, struct device_attribute *attr, static umode_t nct6775_temp_is_visible(struct kobject *kobj, struct attribute *attr, int index) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct nct6775_data *data = dev_get_drvdata(dev); int temp = index / 10; /* temp index */ int nr = index % 10; /* attribute index */ @@ -3257,7 +3257,7 @@ store_auto_temp(struct device *dev, struct device_attribute *attr, static umode_t nct6775_pwm_is_visible(struct kobject *kobj, struct attribute *attr, int index) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct nct6775_data *data = dev_get_drvdata(dev); int pwm = index / 36; /* pwm index */ int nr = index % 36; /* attribute index */ @@ -3459,7 +3459,7 @@ static SENSOR_DEVICE_ATTR(beep_enable, S_IWUSR | S_IRUGO, show_beep, static umode_t nct6775_other_is_visible(struct kobject *kobj, struct attribute *attr, int index) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct nct6775_data *data = dev_get_drvdata(dev); if (index == 0 && !data->have_vid) diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index 2e97e56c72c7..570df8eb5272 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -679,7 +679,7 @@ static struct attribute *nct7802_temp_attrs[] = { static umode_t nct7802_temp_is_visible(struct kobject *kobj, struct attribute *attr, int index) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct nct7802_data *data = dev_get_drvdata(dev); unsigned int reg; int err; @@ -778,7 +778,7 @@ static struct attribute *nct7802_in_attrs[] = { static umode_t nct7802_in_is_visible(struct kobject *kobj, struct attribute *attr, int index) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct nct7802_data *data = dev_get_drvdata(dev); unsigned int reg; int err; @@ -853,7 +853,7 @@ static struct attribute *nct7802_fan_attrs[] = { static umode_t nct7802_fan_is_visible(struct kobject *kobj, struct attribute *attr, int index) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct nct7802_data *data = dev_get_drvdata(dev); int fan = index / 4; /* 4 attributes per fan */ unsigned int reg; diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index a7eb10d2a053..b0425694f702 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -8,6 +8,9 @@ * Copyright (c) 2019 Advantech * Author: Amy.Shih <amy.shih@advantech.com.tw> * + * Copyright (c) 2020 Advantech + * Author: Yuechao Zhao <yuechao.zhao@advantech.com.cn> + * * Supports the following chips: * * Chip #vin #fan #pwm #temp #dts chip ID @@ -20,6 +23,7 @@ #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/hwmon.h> +#include <linux/watchdog.h> #define VENDOR_ID_REG 0x7A /* Any bank */ #define NUVOTON_ID 0x50 @@ -88,18 +92,42 @@ #define FANCTL1_FMR_REG 0x00 /* Bank 3; 1 reg per channel */ #define FANCTL1_OUT_REG 0x10 /* Bank 3; 1 reg per channel */ +#define WDT_LOCK_REG 0xE0 /* W/O Lock Watchdog Register */ +#define WDT_EN_REG 0xE1 /* R/O Watchdog Enable Register */ +#define WDT_STS_REG 0xE2 /* R/O Watchdog Status Register */ +#define WDT_TIMER_REG 0xE3 /* R/W Watchdog Timer Register */ +#define WDT_SOFT_EN 0x55 /* Enable soft watchdog timer */ +#define WDT_SOFT_DIS 0xAA /* Disable soft watchdog timer */ + #define VOLT_MONITOR_MODE 0x0 #define THERMAL_DIODE_MODE 0x1 #define THERMISTOR_MODE 0x3 #define ENABLE_TSI BIT(1) +#define WATCHDOG_TIMEOUT 1 /* 1 minute default timeout */ + +/*The timeout range is 1-255 minutes*/ +#define MIN_TIMEOUT (1 * 60) +#define MAX_TIMEOUT (255 * 60) + +static int timeout; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, "Watchdog timeout in minutes. 1 <= timeout <= 255, default=" + __MODULE_STRING(WATCHDOG_TIMEOUT) "."); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END }; struct nct7904_data { struct i2c_client *client; + struct watchdog_device wdt; struct mutex bank_lock; int bank_sel; u32 fanin_mask; @@ -892,6 +920,95 @@ static const struct hwmon_chip_info nct7904_chip_info = { .info = nct7904_info, }; +/* + * Watchdog Function + */ +static int nct7904_wdt_start(struct watchdog_device *wdt) +{ + struct nct7904_data *data = watchdog_get_drvdata(wdt); + + /* Enable soft watchdog timer */ + return nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_EN); +} + +static int nct7904_wdt_stop(struct watchdog_device *wdt) +{ + struct nct7904_data *data = watchdog_get_drvdata(wdt); + + return nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_DIS); +} + +static int nct7904_wdt_set_timeout(struct watchdog_device *wdt, + unsigned int timeout) +{ + struct nct7904_data *data = watchdog_get_drvdata(wdt); + /* + * The NCT7904 is very special in watchdog function. + * Its minimum unit is minutes. And wdt->timeout needs + * to match the actual timeout selected. So, this needs + * to be: wdt->timeout = timeout / 60 * 60. + * For example, if the user configures a timeout of + * 119 seconds, the actual timeout will be 60 seconds. + * So, wdt->timeout must then be set to 60 seconds. + */ + wdt->timeout = timeout / 60 * 60; + + return nct7904_write_reg(data, BANK_0, WDT_TIMER_REG, + wdt->timeout / 60); +} + +static int nct7904_wdt_ping(struct watchdog_device *wdt) +{ + /* + * Note: + * NCT7904 does not support refreshing WDT_TIMER_REG register when + * the watchdog is active. Please disable watchdog before feeding + * the watchdog and enable it again. + */ + struct nct7904_data *data = watchdog_get_drvdata(wdt); + int ret; + + /* Disable soft watchdog timer */ + ret = nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_DIS); + if (ret < 0) + return ret; + + /* feed watchdog */ + ret = nct7904_write_reg(data, BANK_0, WDT_TIMER_REG, wdt->timeout / 60); + if (ret < 0) + return ret; + + /* Enable soft watchdog timer */ + return nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_EN); +} + +static unsigned int nct7904_wdt_get_timeleft(struct watchdog_device *wdt) +{ + struct nct7904_data *data = watchdog_get_drvdata(wdt); + int ret; + + ret = nct7904_read_reg(data, BANK_0, WDT_TIMER_REG); + if (ret < 0) + return 0; + + return ret * 60; +} + +static const struct watchdog_info nct7904_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, + .identity = "nct7904 watchdog", +}; + +static const struct watchdog_ops nct7904_wdt_ops = { + .owner = THIS_MODULE, + .start = nct7904_wdt_start, + .stop = nct7904_wdt_stop, + .ping = nct7904_wdt_ping, + .set_timeout = nct7904_wdt_set_timeout, + .get_timeleft = nct7904_wdt_get_timeleft, +}; + static int nct7904_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1022,7 +1139,26 @@ static int nct7904_probe(struct i2c_client *client, hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, &nct7904_chip_info, NULL); - return PTR_ERR_OR_ZERO(hwmon_dev); + ret = PTR_ERR_OR_ZERO(hwmon_dev); + if (ret) + return ret; + + /* Watchdog initialization */ + data->wdt.ops = &nct7904_wdt_ops; + data->wdt.info = &nct7904_wdt_info; + + data->wdt.timeout = WATCHDOG_TIMEOUT * 60; /* Set default timeout */ + data->wdt.min_timeout = MIN_TIMEOUT; + data->wdt.max_timeout = MAX_TIMEOUT; + data->wdt.parent = &client->dev; + + watchdog_init_timeout(&data->wdt, timeout * 60, &client->dev); + watchdog_set_nowayout(&data->wdt, nowayout); + watchdog_set_drvdata(&data->wdt, data); + + watchdog_stop_on_unregister(&data->wdt); + + return devm_watchdog_register_device(dev, &data->wdt); } static const struct i2c_device_id nct7904_id[] = { diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index de12a565006d..a337195b1c39 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -146,6 +146,15 @@ config SENSORS_MAX16064 This driver can also be built as a module. If so, the module will be called max16064. +config SENSORS_MAX16601 + tristate "Maxim MAX16601" + help + If you say yes here you get hardware monitoring support for Maxim + MAX16601. + + This driver can also be built as a module. If so, the module will + be called max16601. + config SENSORS_MAX20730 tristate "Maxim MAX20730, MAX20734, MAX20743" help diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile index 5feb45806123..c4b15db996ad 100644 --- a/drivers/hwmon/pmbus/Makefile +++ b/drivers/hwmon/pmbus/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o obj-$(CONFIG_SENSORS_MAX16064) += max16064.o +obj-$(CONFIG_SENSORS_MAX16601) += max16601.o obj-$(CONFIG_SENSORS_MAX20730) += max20730.o obj-$(CONFIG_SENSORS_MAX20751) += max20751.o obj-$(CONFIG_SENSORS_MAX31785) += max31785.o diff --git a/drivers/hwmon/pmbus/max16601.c b/drivers/hwmon/pmbus/max16601.c new file mode 100644 index 000000000000..51cdfaf9023c --- /dev/null +++ b/drivers/hwmon/pmbus/max16601.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hardware monitoring driver for Maxim MAX16601 + * + * Implementation notes: + * + * Ths chip supports two rails, VCORE and VSA. Telemetry information for the + * two rails is reported in two subsequent I2C addresses. The driver + * instantiates a dummy I2C client at the second I2C address to report + * information for the VSA rail in a single instance of the driver. + * Telemetry for the VSA rail is reported to the PMBus core in PMBus page 2. + * + * The chip reports input current using two separate methods. The input current + * reported with the standard READ_IIN command is derived from the output + * current. The first method is reported to the PMBus core with PMBus page 0, + * the second method is reported with PMBus page 1. + * + * The chip supports reading per-phase temperatures and per-phase input/output + * currents for VCORE. Telemetry is reported in vendor specific registers. + * The driver translates the vendor specific register values to PMBus standard + * register values and reports per-phase information in PMBus page 0. + * + * Copyright 2019, 2020 Google LLC. + */ + +#include <linux/bits.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include "pmbus.h" + +#define REG_SETPT_DVID 0xd1 +#define DAC_10MV_MODE BIT(4) +#define REG_IOUT_AVG_PK 0xee +#define REG_IIN_SENSOR 0xf1 +#define REG_TOTAL_INPUT_POWER 0xf2 +#define REG_PHASE_ID 0xf3 +#define CORE_RAIL_INDICATOR BIT(7) +#define REG_PHASE_REPORTING 0xf4 + +struct max16601_data { + struct pmbus_driver_info info; + struct i2c_client *vsa; + int iout_avg_pkg; +}; + +#define to_max16601_data(x) container_of(x, struct max16601_data, info) + +static int max16601_read_byte(struct i2c_client *client, int page, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct max16601_data *data = to_max16601_data(info); + + if (page > 0) { + if (page == 2) /* VSA */ + return i2c_smbus_read_byte_data(data->vsa, reg); + return -EOPNOTSUPP; + } + return -ENODATA; +} + +static int max16601_read_word(struct i2c_client *client, int page, int phase, + int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct max16601_data *data = to_max16601_data(info); + u8 buf[I2C_SMBUS_BLOCK_MAX + 1]; + int ret; + + switch (page) { + case 0: /* VCORE */ + if (phase == 0xff) + return -ENODATA; + switch (reg) { + case PMBUS_READ_IIN: + case PMBUS_READ_IOUT: + case PMBUS_READ_TEMPERATURE_1: + ret = i2c_smbus_write_byte_data(client, REG_PHASE_ID, + phase); + if (ret) + return ret; + ret = i2c_smbus_read_block_data(client, + REG_PHASE_REPORTING, + buf); + if (ret < 0) + return ret; + if (ret < 6) + return -EIO; + switch (reg) { + case PMBUS_READ_TEMPERATURE_1: + return buf[1] << 8 | buf[0]; + case PMBUS_READ_IOUT: + return buf[3] << 8 | buf[2]; + case PMBUS_READ_IIN: + return buf[5] << 8 | buf[4]; + default: + break; + } + } + return -EOPNOTSUPP; + case 1: /* VCORE, read IIN/PIN from sensor element */ + switch (reg) { + case PMBUS_READ_IIN: + return i2c_smbus_read_word_data(client, REG_IIN_SENSOR); + case PMBUS_READ_PIN: + return i2c_smbus_read_word_data(client, + REG_TOTAL_INPUT_POWER); + default: + break; + } + return -EOPNOTSUPP; + case 2: /* VSA */ + switch (reg) { + case PMBUS_VIRT_READ_IOUT_MAX: + ret = i2c_smbus_read_word_data(data->vsa, + REG_IOUT_AVG_PK); + if (ret < 0) + return ret; + if (sign_extend32(ret, 10) > + sign_extend32(data->iout_avg_pkg, 10)) + data->iout_avg_pkg = ret; + return data->iout_avg_pkg; + case PMBUS_VIRT_RESET_IOUT_HISTORY: + return 0; + case PMBUS_IOUT_OC_FAULT_LIMIT: + case PMBUS_IOUT_OC_WARN_LIMIT: + case PMBUS_OT_FAULT_LIMIT: + case PMBUS_OT_WARN_LIMIT: + case PMBUS_READ_IIN: + case PMBUS_READ_IOUT: + case PMBUS_READ_TEMPERATURE_1: + case PMBUS_STATUS_WORD: + return i2c_smbus_read_word_data(data->vsa, reg); + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static int max16601_write_byte(struct i2c_client *client, int page, u8 reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct max16601_data *data = to_max16601_data(info); + + if (page == 2) { + if (reg == PMBUS_CLEAR_FAULTS) + return i2c_smbus_write_byte(data->vsa, reg); + return -EOPNOTSUPP; + } + return -ENODATA; +} + +static int max16601_write_word(struct i2c_client *client, int page, int reg, + u16 value) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct max16601_data *data = to_max16601_data(info); + + switch (page) { + case 0: /* VCORE */ + return -ENODATA; + case 1: /* VCORE IIN/PIN from sensor element */ + default: + return -EOPNOTSUPP; + case 2: /* VSA */ + switch (reg) { + case PMBUS_VIRT_RESET_IOUT_HISTORY: + data->iout_avg_pkg = 0xfc00; + return 0; + case PMBUS_IOUT_OC_FAULT_LIMIT: + case PMBUS_IOUT_OC_WARN_LIMIT: + case PMBUS_OT_FAULT_LIMIT: + case PMBUS_OT_WARN_LIMIT: + return i2c_smbus_write_word_data(data->vsa, reg, value); + default: + return -EOPNOTSUPP; + } + } +} + +static int max16601_identify(struct i2c_client *client, + struct pmbus_driver_info *info) +{ + int reg; + + reg = i2c_smbus_read_byte_data(client, REG_SETPT_DVID); + if (reg < 0) + return reg; + if (reg & DAC_10MV_MODE) + info->vrm_version[0] = vr13; + else + info->vrm_version[0] = vr12; + + return 0; +} + +static struct pmbus_driver_info max16601_info = { + .pages = 3, + .format[PSC_VOLTAGE_IN] = linear, + .format[PSC_VOLTAGE_OUT] = vid, + .format[PSC_CURRENT_IN] = linear, + .format[PSC_CURRENT_OUT] = linear, + .format[PSC_TEMPERATURE] = linear, + .format[PSC_POWER] = linear, + .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | + PMBUS_HAVE_STATUS_INPUT | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_POUT | PMBUS_PAGE_VIRTUAL | PMBUS_PHASE_VIRTUAL, + .func[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_PAGE_VIRTUAL, + .func[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_PAGE_VIRTUAL, + .phases[0] = 8, + .pfunc[0] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP, + .pfunc[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT, + .pfunc[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP, + .pfunc[3] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT, + .pfunc[4] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP, + .pfunc[5] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT, + .pfunc[6] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP, + .pfunc[7] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT, + .identify = max16601_identify, + .read_byte_data = max16601_read_byte, + .read_word_data = max16601_read_word, + .write_byte = max16601_write_byte, + .write_word_data = max16601_write_word, +}; + +static void max16601_remove(void *_data) +{ + struct max16601_data *data = _data; + + i2c_unregister_device(data->vsa); +} + +static int max16601_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + u8 buf[I2C_SMBUS_BLOCK_MAX + 1]; + struct max16601_data *data; + int ret; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_BYTE_DATA | + I2C_FUNC_SMBUS_READ_BLOCK_DATA)) + return -ENODEV; + + ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf); + if (ret < 0) + return -ENODEV; + + /* PMBUS_IC_DEVICE_ID is expected to return "MAX16601y.xx" */ + if (ret < 11 || strncmp(buf, "MAX16601", 8)) { + buf[ret] = '\0'; + dev_err(dev, "Unsupported chip '%s'\n", buf); + return -ENODEV; + } + + ret = i2c_smbus_read_byte_data(client, REG_PHASE_ID); + if (ret < 0) + return ret; + if (!(ret & CORE_RAIL_INDICATOR)) { + dev_err(dev, + "Driver must be instantiated on CORE rail I2C address\n"); + return -ENODEV; + } + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->iout_avg_pkg = 0xfc00; + data->vsa = i2c_new_dummy_device(client->adapter, client->addr + 1); + if (IS_ERR(data->vsa)) { + dev_err(dev, "Failed to register VSA client\n"); + return PTR_ERR(data->vsa); + } + ret = devm_add_action_or_reset(dev, max16601_remove, data); + if (ret) + return ret; + + data->info = max16601_info; + + return pmbus_do_probe(client, id, &data->info); +} + +static const struct i2c_device_id max16601_id[] = { + {"max16601", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, max16601_id); + +static struct i2c_driver max16601_driver = { + .driver = { + .name = "max16601", + }, + .probe = max16601_probe, + .remove = pmbus_do_remove, + .id_table = max16601_id, +}; + +module_i2c_driver(max16601_driver); + +MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); +MODULE_DESCRIPTION("PMBus driver for Maxim MAX16601"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 8d321bf7d15b..a420877ba533 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -109,8 +109,8 @@ struct pmbus_data { bool has_status_word; /* device uses STATUS_WORD register */ int (*read_status)(struct i2c_client *client, int page); - u8 currpage; - u8 currphase; /* current phase, 0xff for all */ + s16 currpage; /* current page, -1 for unknown/unset */ + s16 currphase; /* current phase, 0xff for all, -1 for unknown/unset */ }; struct pmbus_debugfs_entry { @@ -2529,8 +2529,8 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id, if (pdata) data->flags = pdata->flags; data->info = info; - data->currpage = 0xff; - data->currphase = 0xfe; + data->currpage = -1; + data->currphase = -1; ret = pmbus_init_common(client, data, info); if (ret < 0) diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index bf8e149d3191..e0a5e897e4b1 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -153,9 +153,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj, uobj->context = NULL; /* - * For DESTROY the usecnt is held write locked, the caller is expected - * to put it unlock and put the object when done with it. Only DESTROY - * can remove the IDR handle. + * For DESTROY the usecnt is not changed, the caller is expected to + * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR + * handle. */ if (reason != RDMA_REMOVE_DESTROY) atomic_set(&uobj->usecnt, 0); @@ -187,7 +187,7 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj, /* * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY * sequence. It should only be used from command callbacks. On success the - * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This + * caller must pair this with uobj_put_destroy(). This * version requires the caller to have already obtained an * LOOKUP_DESTROY uobject kref. */ @@ -198,6 +198,13 @@ int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) down_read(&ufile->hw_destroy_rwsem); + /* + * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left + * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY. + * This is because any other concurrent thread can still see the object + * in the xarray due to RCU. Leaving it locked ensures nothing else will + * touch it. + */ ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE); if (ret) goto out_unlock; @@ -216,7 +223,7 @@ out_unlock: /* * uobj_get_destroy destroys the HW object and returns a handle to the uobj * with a NULL object pointer. The caller must pair this with - * uverbs_put_destroy. + * uobj_put_destroy(). */ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, u32 id, struct uverbs_attr_bundle *attrs) @@ -250,8 +257,7 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id, uobj = __uobj_get_destroy(obj, id, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); - - rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); + uobj_put_destroy(uobj); return 0; } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index a401931189b7..44683073be0c 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1439,6 +1439,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (is_odp_mr(mr)) { to_ib_umem_odp(mr->umem)->private = mr; + init_waitqueue_head(&mr->q_deferred_work); atomic_set(&mr->num_deferred_work, 0); err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key), &mr->mmkey, diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 568b21eb6ea1..021df0654ba7 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -760,7 +760,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping linkcontrol sysfs info, (err %d) port %u\n", ret, port_num); - goto bail; + goto bail_link; } kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); @@ -770,7 +770,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping sl2vl sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_link; + goto bail_sl; } kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); @@ -780,7 +780,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping diag_counters sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_sl; + goto bail_diagc; } kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); @@ -793,7 +793,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping Congestion Control sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_diagc; + goto bail_cc; } kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); @@ -854,6 +854,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd) &cc_table_bin_attr); kobject_put(&ppd->pport_cc_kobj); } + kobject_put(&ppd->diagc_kobj); kobject_put(&ppd->sl2vl_kobj); kobject_put(&ppd->pport_kobj); } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index e580ae9cc55a..780fd2dfc07e 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -829,7 +829,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); ret = -ENOMEM; - goto err_free_device; + goto err_disable_pdev; } ret = pci_request_regions(pdev, DRV_NAME); diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index e188a95984b5..9a3379c49541 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -377,8 +377,12 @@ struct ipoib_dev_priv { struct ipoib_rx_buf *rx_ring; struct ipoib_tx_buf *tx_ring; + /* cyclic ring variables for managing tx_ring, for UD only */ unsigned int tx_head; unsigned int tx_tail; + /* cyclic ring variables for counting overall outstanding send WRs */ + unsigned int global_tx_head; + unsigned int global_tx_tail; struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; struct ib_ud_wr tx_wr; struct ib_wc send_wc[MAX_SEND_CQE]; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index c59e00a0881f..9bf0fa30df28 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -756,7 +756,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ return; } - if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) { + if ((priv->global_tx_head - priv->global_tx_tail) == + ipoib_sendq_size - 1) { ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", tx->qp->qp_num); netif_stop_queue(dev); @@ -786,7 +787,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ } else { netif_trans_update(dev); ++tx->tx_head; - ++priv->tx_head; + ++priv->global_tx_head; } } @@ -820,10 +821,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) netif_tx_lock(dev); ++tx->tx_tail; - ++priv->tx_tail; + ++priv->global_tx_tail; if (unlikely(netif_queue_stopped(dev) && - (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 && + ((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) netif_wake_queue(dev); @@ -1232,8 +1234,9 @@ timeout: dev_kfree_skb_any(tx_req->skb); netif_tx_lock_bh(p->dev); ++p->tx_tail; - ++priv->tx_tail; - if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) && + ++priv->global_tx_tail; + if (unlikely((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && netif_queue_stopped(p->dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(p->dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index c332b4761816..da3c5315bbb5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -407,9 +407,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; + ++priv->global_tx_tail; if (unlikely(netif_queue_stopped(dev) && - ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) && + ((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) netif_wake_queue(dev); @@ -634,7 +636,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, else priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; /* increase the tx_head after send success, but use it for queue state */ - if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) { + if ((priv->global_tx_head - priv->global_tx_tail) == + ipoib_sendq_size - 1) { ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); netif_stop_queue(dev); } @@ -662,6 +665,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, rc = priv->tx_head; ++priv->tx_head; + ++priv->global_tx_head; } return rc; } @@ -807,6 +811,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; + ++priv->global_tx_tail; } for (i = 0; i < ipoib_recvq_size; ++i) { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 81b8227214f1..ceec24d45185 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1184,9 +1184,11 @@ static void ipoib_timeout(struct net_device *dev, unsigned int txqueue) ipoib_warn(priv, "transmit timeout: latency %d msecs\n", jiffies_to_msecs(jiffies - dev_trans_start(dev))); - ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", - netif_queue_stopped(dev), - priv->tx_head, priv->tx_tail); + ipoib_warn(priv, + "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n", + netif_queue_stopped(dev), priv->tx_head, priv->tx_tail, + priv->global_tx_head, priv->global_tx_tail); + /* XXX reset QP, etc. */ } @@ -1701,7 +1703,7 @@ static int ipoib_dev_init_default(struct net_device *dev) goto out_rx_ring_cleanup; } - /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ + /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */ if (ipoib_transport_dev_init(dev, priv->ca)) { pr_warn("%s: ipoib_transport_dev_init failed\n", diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index cb6e3a5f509c..0d57e51b8ba1 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -326,20 +326,6 @@ static int evdev_fasync(int fd, struct file *file, int on) return fasync_helper(fd, file, on, &client->fasync); } -static int evdev_flush(struct file *file, fl_owner_t id) -{ - struct evdev_client *client = file->private_data; - struct evdev *evdev = client->evdev; - - mutex_lock(&evdev->mutex); - - if (evdev->exist && !client->revoked) - input_flush_device(&evdev->handle, file); - - mutex_unlock(&evdev->mutex); - return 0; -} - static void evdev_free(struct device *dev) { struct evdev *evdev = container_of(dev, struct evdev, dev); @@ -453,6 +439,10 @@ static int evdev_release(struct inode *inode, struct file *file) unsigned int i; mutex_lock(&evdev->mutex); + + if (evdev->exist && !client->revoked) + input_flush_device(&evdev->handle, file); + evdev_ungrab(evdev, client); mutex_unlock(&evdev->mutex); @@ -1310,7 +1300,6 @@ static const struct file_operations evdev_fops = { .compat_ioctl = evdev_ioctl_compat, #endif .fasync = evdev_fasync, - .flush = evdev_flush, .llseek = no_llseek, }; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 6b40a1c68f9f..c77cdb3b62b5 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -459,6 +459,16 @@ static const u8 xboxone_fw2015_init[] = { }; /* + * This packet is required for Xbox One S (0x045e:0x02ea) + * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to + * initialize the controller that was previously used in + * Bluetooth mode. + */ +static const u8 xboxone_s_init[] = { + 0x05, 0x20, 0x00, 0x0f, 0x06 +}; + +/* * This packet is required for the Titanfall 2 Xbox One pads * (0x0e6f:0x0165) to finish initialization and for Hori pads * (0x0f0d:0x0067) to make the analog sticks work. @@ -516,6 +526,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), + XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init), + XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c index d38398526965..14362ebab9a9 100644 --- a/drivers/input/keyboard/applespi.c +++ b/drivers/input/keyboard/applespi.c @@ -186,7 +186,7 @@ struct touchpad_protocol { u8 number_of_fingers; u8 clicked2; u8 unknown3[16]; - struct tp_finger fingers[0]; + struct tp_finger fingers[]; }; /** diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index 2b71c5a51f90..fc1793ca2f17 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c @@ -347,18 +347,14 @@ static int cros_ec_keyb_info(struct cros_ec_device *ec_dev, params->info_type = info_type; params->event_type = event_type; - ret = cros_ec_cmd_xfer(ec_dev, msg); - if (ret < 0) { - dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n", - (int)info_type, (int)event_type, ret); - } else if (msg->result == EC_RES_INVALID_VERSION) { + ret = cros_ec_cmd_xfer_status(ec_dev, msg); + if (ret == -ENOTSUPP) { /* With older ECs we just return 0 for everything */ memset(result, 0, result_size); ret = 0; - } else if (msg->result != EC_RES_SUCCESS) { - dev_warn(ec_dev->dev, "Error getting info %d/%d: %d\n", - (int)info_type, (int)event_type, msg->result); - ret = -EPROTO; + } else if (ret < 0) { + dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n", + (int)info_type, (int)event_type, ret); } else if (ret != result_size) { dev_warn(ec_dev->dev, "Wrong size %d/%d: %d != %zu\n", (int)info_type, (int)event_type, diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c index b0ead7199c40..a69dcc3bd30c 100644 --- a/drivers/input/keyboard/dlink-dir685-touchkeys.c +++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c @@ -143,7 +143,7 @@ MODULE_DEVICE_TABLE(of, dir685_tk_of_match); static struct i2c_driver dir685_tk_i2c_driver = { .driver = { - .name = "dlin-dir685-touchkeys", + .name = "dlink-dir685-touchkeys", .of_match_table = of_match_ptr(dir685_tk_of_match), }, .probe = dir685_tk_probe, diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index c8f87df93a50..9c6386b2af33 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c @@ -205,8 +205,11 @@ ATTRIBUTE_GROUPS(axp20x); static irqreturn_t axp20x_pek_irq(int irq, void *pwr) { - struct input_dev *idev = pwr; - struct axp20x_pek *axp20x_pek = input_get_drvdata(idev); + struct axp20x_pek *axp20x_pek = pwr; + struct input_dev *idev = axp20x_pek->input; + + if (!idev) + return IRQ_HANDLED; /* * The power-button is connected to ground so a falling edge (dbf) @@ -225,22 +228,9 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr) static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, struct platform_device *pdev) { - struct axp20x_dev *axp20x = axp20x_pek->axp20x; struct input_dev *idev; int error; - axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR"); - if (axp20x_pek->irq_dbr < 0) - return axp20x_pek->irq_dbr; - axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc, - axp20x_pek->irq_dbr); - - axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF"); - if (axp20x_pek->irq_dbf < 0) - return axp20x_pek->irq_dbf; - axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc, - axp20x_pek->irq_dbf); - axp20x_pek->input = devm_input_allocate_device(&pdev->dev); if (!axp20x_pek->input) return -ENOMEM; @@ -255,24 +245,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, input_set_drvdata(idev, axp20x_pek); - error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr, - axp20x_pek_irq, 0, - "axp20x-pek-dbr", idev); - if (error < 0) { - dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n", - axp20x_pek->irq_dbr, error); - return error; - } - - error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf, - axp20x_pek_irq, 0, - "axp20x-pek-dbf", idev); - if (error < 0) { - dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n", - axp20x_pek->irq_dbf, error); - return error; - } - error = input_register_device(idev); if (error) { dev_err(&pdev->dev, "Can't register input device: %d\n", @@ -280,8 +252,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, return error; } - device_init_wakeup(&pdev->dev, true); - return 0; } @@ -339,6 +309,18 @@ static int axp20x_pek_probe(struct platform_device *pdev) axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent); + axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR"); + if (axp20x_pek->irq_dbr < 0) + return axp20x_pek->irq_dbr; + axp20x_pek->irq_dbr = regmap_irq_get_virq( + axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbr); + + axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF"); + if (axp20x_pek->irq_dbf < 0) + return axp20x_pek->irq_dbf; + axp20x_pek->irq_dbf = regmap_irq_get_virq( + axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf); + if (axp20x_pek_should_register_input(axp20x_pek, pdev)) { error = axp20x_pek_probe_input_device(axp20x_pek, pdev); if (error) @@ -347,6 +329,26 @@ static int axp20x_pek_probe(struct platform_device *pdev) axp20x_pek->info = (struct axp20x_info *)match->driver_data; + error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr, + axp20x_pek_irq, 0, + "axp20x-pek-dbr", axp20x_pek); + if (error < 0) { + dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n", + axp20x_pek->irq_dbr, error); + return error; + } + + error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf, + axp20x_pek_irq, 0, + "axp20x-pek-dbf", axp20x_pek); + if (error < 0) { + dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n", + axp20x_pek->irq_dbf, error); + return error; + } + + device_init_wakeup(&pdev->dev, true); + platform_set_drvdata(pdev, axp20x_pek); return 0; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 4d2036209b45..758dae8d6500 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = { "LEN005b", /* P50 */ "LEN005e", /* T560 */ "LEN006c", /* T470s */ + "LEN007a", /* T470s */ "LEN0071", /* T480 */ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ "LEN0073", /* X1 Carbon G5 (Elantech) */ diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 190b9974526b..258d5fe3d395 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -205,7 +205,7 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id) if (count) { kfree(attn_data.data); - attn_data.data = NULL; + drvdata->attn_data.data = NULL; } if (!kfifo_is_empty(&drvdata->attn_fifo)) @@ -1210,7 +1210,8 @@ static int rmi_driver_probe(struct device *dev) if (data->input) { rmi_driver_set_input_name(rmi_dev, data->input); if (!rmi_dev->xport->input) { - if (input_register_device(data->input)) { + retval = input_register_device(data->input); + if (retval) { dev_err(dev, "%s: Failed to register input device.\n", __func__); goto err_destroy_functions; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 08e919dbeb5d..7e048b557462 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -662,6 +662,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), }, }, + { + /* Lenovo ThinkPad Twist S230u */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), + }, + }, { } }; diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 14c577c16b16..2289f9638116 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -19,6 +19,7 @@ */ +#include <linux/bits.h> #include <linux/module.h> #include <linux/input.h> #include <linux/interrupt.h> @@ -73,6 +74,7 @@ #define FW_POS_STATE 1 #define FW_POS_TOTAL 2 #define FW_POS_XY 3 +#define FW_POS_TOOL_TYPE 33 #define FW_POS_CHECKSUM 34 #define FW_POS_WIDTH 35 #define FW_POS_PRESSURE 45 @@ -842,6 +844,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf) { struct input_dev *input = ts->input; unsigned int n_fingers; + unsigned int tool_type; u16 finger_state; int i; @@ -852,6 +855,10 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf) dev_dbg(&ts->client->dev, "n_fingers: %u, state: %04x\n", n_fingers, finger_state); + /* Note: all fingers have the same tool type */ + tool_type = buf[FW_POS_TOOL_TYPE] & BIT(0) ? + MT_TOOL_FINGER : MT_TOOL_PALM; + for (i = 0; i < MAX_CONTACT_NUM && n_fingers; i++) { if (finger_state & 1) { unsigned int x, y, p, w; @@ -867,7 +874,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf) i, x, y, p, w); input_mt_slot(input, i); - input_mt_report_slot_state(input, MT_TOOL_FINGER, true); + input_mt_report_slot_state(input, tool_type, true); input_event(input, EV_ABS, ABS_MT_POSITION_X, x); input_event(input, EV_ABS, ABS_MT_POSITION_Y, y); input_event(input, EV_ABS, ABS_MT_PRESSURE, p); @@ -1307,6 +1314,8 @@ static int elants_i2c_probe(struct i2c_client *client, input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0); input_set_abs_params(ts->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0); + input_set_abs_params(ts->input, ABS_MT_TOOL_TYPE, + 0, MT_TOOL_PALM, 0, 0); input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res); input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res); input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1); diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 69c6d559eeb0..2ef1adaed9af 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -91,15 +91,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg, if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL) BUG(); - /* Write register: use repeated start */ + /* Write register */ xfer[0].addr = client->addr; - xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART; + xfer[0].flags = client->flags & I2C_M_TEN; xfer[0].len = 1; xfer[0].buf = &buf; /* Read data */ xfer[1].addr = client->addr; - xfer[1].flags = I2C_M_RD; + xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD; xfer[1].len = len; xfer[1].buf = val; @@ -428,10 +428,8 @@ static int mms114_probe(struct i2c_client *client, const void *match_data; int error; - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_PROTOCOL_MANGLING)) { - dev_err(&client->dev, - "Need i2c bus that supports protocol mangling\n"); + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(&client->dev, "Not supported I2C adapter\n"); return -ENODEV; } diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 16d70201de4a..397cb1d3f481 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -182,6 +182,7 @@ static const struct usb_device_id usbtouch_devices[] = { #endif #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH + {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES}, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 1faa08c8bbb4..03d6a26687bc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -510,7 +510,7 @@ struct iommu_group *iommu_group_alloc(void) NULL, "%d", group->id); if (ret) { ida_simple_remove(&iommu_group_ida, group->id); - kfree(group); + kobject_put(&group->kobj); return ERR_PTR(ret); } diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 0a59249198d3..05448e78e5b8 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -407,6 +407,21 @@ config MFD_EXYNOS_LPASS Select this option to enable support for Samsung Exynos Low Power Audio Subsystem. +config MFD_GATEWORKS_GSC + tristate "Gateworks System Controller" + depends on (I2C && OF) + select MFD_CORE + select REGMAP_I2C + select REGMAP_IRQ + help + Enable support for the Gateworks System Controller (GSC) found + on Gateworks Single Board Computers supporting system functions + such as push-button monitor, multiple ADC's for voltage and + temperature monitoring, fan controller and watchdog monitor. + This driver provides common support for accessing the device. + Additional drivers must be enabled in order to use the + functionality of the device. + config MFD_MC13XXX tristate depends on (SPI_MASTER || I2C) diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index f935d10cbf0f..ed433aed7010 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o obj-$(CONFIG_MFD_CROS_EC_DEV) += cros_ec_dev.o obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o +obj-$(CONFIG_MFD_GATEWORKS_GSC) += gateworks-gsc.o obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o diff --git a/drivers/mfd/gateworks-gsc.c b/drivers/mfd/gateworks-gsc.c new file mode 100644 index 000000000000..576da62fbb0c --- /dev/null +++ b/drivers/mfd/gateworks-gsc.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Gateworks System Controller (GSC) is a multi-function + * device designed for use in Gateworks Single Board Computers. + * The control interface is I2C, with an interrupt. The device supports + * system functions such as push-button monitoring, multiple ADC's for + * voltage and temperature monitoring, fan controller and watchdog monitor. + * + * Copyright (C) 2020 Gateworks Corporation + */ + +#include <linux/device.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/mfd/gsc.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <asm/unaligned.h> + +/* + * The GSC suffers from an errata where occasionally during + * ADC cycles the chip can NAK I2C transactions. To ensure we have reliable + * register access we place retries around register access. + */ +#define I2C_RETRIES 3 + +int gsc_write(void *context, unsigned int reg, unsigned int val) +{ + struct i2c_client *client = context; + int retry, ret; + + for (retry = 0; retry < I2C_RETRIES; retry++) { + ret = i2c_smbus_write_byte_data(client, reg, val); + /* + * -EAGAIN returned when the i2c host controller is busy + * -EIO returned when i2c device is busy + */ + if (ret != -EAGAIN && ret != -EIO) + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(gsc_write); + +int gsc_read(void *context, unsigned int reg, unsigned int *val) +{ + struct i2c_client *client = context; + int retry, ret; + + for (retry = 0; retry < I2C_RETRIES; retry++) { + ret = i2c_smbus_read_byte_data(client, reg); + /* + * -EAGAIN returned when the i2c host controller is busy + * -EIO returned when i2c device is busy + */ + if (ret != -EAGAIN && ret != -EIO) + break; + } + *val = ret & 0xff; + + return 0; +} +EXPORT_SYMBOL_GPL(gsc_read); + +/* + * gsc_powerdown - API to use GSC to power down board for a specific time + * + * secs - number of seconds to remain powered off + */ +static int gsc_powerdown(struct gsc_dev *gsc, unsigned long secs) +{ + int ret; + unsigned char regs[4]; + + dev_info(&gsc->i2c->dev, "GSC powerdown for %ld seconds\n", + secs); + + put_unaligned_le32(secs, regs); + ret = regmap_bulk_write(gsc->regmap, GSC_TIME_ADD, regs, 4); + if (ret) + return ret; + + ret = regmap_update_bits(gsc->regmap, GSC_CTRL_1, + BIT(GSC_CTRL_1_SLEEP_ADD), + BIT(GSC_CTRL_1_SLEEP_ADD)); + if (ret) + return ret; + + ret = regmap_update_bits(gsc->regmap, GSC_CTRL_1, + BIT(GSC_CTRL_1_SLEEP_ACTIVATE) | + BIT(GSC_CTRL_1_SLEEP_ENABLE), + BIT(GSC_CTRL_1_SLEEP_ACTIVATE) | + BIT(GSC_CTRL_1_SLEEP_ENABLE)); + + + return ret; +} + +static ssize_t gsc_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct gsc_dev *gsc = dev_get_drvdata(dev); + const char *name = attr->attr.name; + int rz = 0; + + if (strcasecmp(name, "fw_version") == 0) + rz = sprintf(buf, "%d\n", gsc->fwver); + else if (strcasecmp(name, "fw_crc") == 0) + rz = sprintf(buf, "0x%04x\n", gsc->fwcrc); + else + dev_err(dev, "invalid command: '%s'\n", name); + + return rz; +} + +static ssize_t gsc_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct gsc_dev *gsc = dev_get_drvdata(dev); + const char *name = attr->attr.name; + long value; + + if (strcasecmp(name, "powerdown") == 0) { + if (kstrtol(buf, 0, &value) == 0) + gsc_powerdown(gsc, value); + } else { + dev_err(dev, "invalid command: '%s\n", name); + } + + return count; +} + +static struct device_attribute attr_fwver = + __ATTR(fw_version, 0440, gsc_show, NULL); +static struct device_attribute attr_fwcrc = + __ATTR(fw_crc, 0440, gsc_show, NULL); +static struct device_attribute attr_pwrdown = + __ATTR(powerdown, 0220, NULL, gsc_store); + +static struct attribute *gsc_attrs[] = { + &attr_fwver.attr, + &attr_fwcrc.attr, + &attr_pwrdown.attr, + NULL, +}; + +static struct attribute_group attr_group = { + .attrs = gsc_attrs, +}; + +static const struct of_device_id gsc_of_match[] = { + { .compatible = "gw,gsc", }, + { } +}; +MODULE_DEVICE_TABLE(of, gsc_of_match); + +static struct regmap_bus gsc_regmap_bus = { + .reg_read = gsc_read, + .reg_write = gsc_write, +}; + +static const struct regmap_config gsc_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_NONE, + .max_register = GSC_WP, +}; + +static const struct regmap_irq gsc_irqs[] = { + REGMAP_IRQ_REG(GSC_IRQ_PB, 0, BIT(GSC_IRQ_PB)), + REGMAP_IRQ_REG(GSC_IRQ_KEY_ERASED, 0, BIT(GSC_IRQ_KEY_ERASED)), + REGMAP_IRQ_REG(GSC_IRQ_EEPROM_WP, 0, BIT(GSC_IRQ_EEPROM_WP)), + REGMAP_IRQ_REG(GSC_IRQ_RESV, 0, BIT(GSC_IRQ_RESV)), + REGMAP_IRQ_REG(GSC_IRQ_GPIO, 0, BIT(GSC_IRQ_GPIO)), + REGMAP_IRQ_REG(GSC_IRQ_TAMPER, 0, BIT(GSC_IRQ_TAMPER)), + REGMAP_IRQ_REG(GSC_IRQ_WDT_TIMEOUT, 0, BIT(GSC_IRQ_WDT_TIMEOUT)), + REGMAP_IRQ_REG(GSC_IRQ_SWITCH_HOLD, 0, BIT(GSC_IRQ_SWITCH_HOLD)), +}; + +static const struct regmap_irq_chip gsc_irq_chip = { + .name = "gateworks-gsc", + .irqs = gsc_irqs, + .num_irqs = ARRAY_SIZE(gsc_irqs), + .num_regs = 1, + .status_base = GSC_IRQ_STATUS, + .mask_base = GSC_IRQ_ENABLE, + .mask_invert = true, + .ack_base = GSC_IRQ_STATUS, + .ack_invert = true, +}; + +static int gsc_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct gsc_dev *gsc; + struct regmap_irq_chip_data *irq_data; + int ret; + unsigned int reg; + + gsc = devm_kzalloc(dev, sizeof(*gsc), GFP_KERNEL); + if (!gsc) + return -ENOMEM; + + gsc->dev = &client->dev; + gsc->i2c = client; + i2c_set_clientdata(client, gsc); + + gsc->regmap = devm_regmap_init(dev, &gsc_regmap_bus, client, + &gsc_regmap_config); + if (IS_ERR(gsc->regmap)) + return PTR_ERR(gsc->regmap); + + if (regmap_read(gsc->regmap, GSC_FW_VER, ®)) + return -EIO; + gsc->fwver = reg; + + regmap_read(gsc->regmap, GSC_FW_CRC, ®); + gsc->fwcrc = reg; + regmap_read(gsc->regmap, GSC_FW_CRC + 1, ®); + gsc->fwcrc |= reg << 8; + + gsc->i2c_hwmon = devm_i2c_new_dummy_device(dev, client->adapter, + GSC_HWMON); + if (IS_ERR(gsc->i2c_hwmon)) { + dev_err(dev, "Failed to allocate I2C device for HWMON\n"); + return PTR_ERR(gsc->i2c_hwmon); + } + + ret = devm_regmap_add_irq_chip(dev, gsc->regmap, client->irq, + IRQF_ONESHOT | IRQF_SHARED | + IRQF_TRIGGER_FALLING, 0, + &gsc_irq_chip, &irq_data); + if (ret) + return ret; + + dev_info(dev, "Gateworks System Controller v%d: fw 0x%04x\n", + gsc->fwver, gsc->fwcrc); + + ret = sysfs_create_group(&dev->kobj, &attr_group); + if (ret) + dev_err(dev, "failed to create sysfs attrs\n"); + + ret = devm_of_platform_populate(dev); + if (ret) { + sysfs_remove_group(&dev->kobj, &attr_group); + return ret; + } + + return 0; +} + +static int gsc_remove(struct i2c_client *client) +{ + sysfs_remove_group(&client->dev.kobj, &attr_group); + + return 0; +} + +static struct i2c_driver gsc_driver = { + .driver = { + .name = "gateworks-gsc", + .of_match_table = gsc_of_match, + }, + .probe_new = gsc_probe, + .remove = gsc_remove, +}; +module_i2c_driver(gsc_driver); + +MODULE_AUTHOR("Tim Harvey <tharvey@gateworks.com>"); +MODULE_DESCRIPTION("I2C Core interface for GSC"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index c5367e2c8487..7896952de1ac 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2484,8 +2484,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, struct mmc_rpmb_data, chrdev); - put_device(&rpmb->dev); mmc_blk_put(rpmb->md); + put_device(&rpmb->dev); return 0; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 3f716466fcfd..e368f2dabf20 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -4000,9 +4000,6 @@ int sdhci_setup_host(struct sdhci_host *host) mmc_hostname(mmc), host->version); } - if (host->quirks & SDHCI_QUIRK_BROKEN_CQE) - mmc->caps2 &= ~MMC_CAP2_CQE; - if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_SDMA; else if (!(host->caps & SDHCI_CAN_DO_SDMA)) @@ -4539,6 +4536,12 @@ int __sdhci_add_host(struct sdhci_host *host) struct mmc_host *mmc = host->mmc; int ret; + if ((mmc->caps2 & MMC_CAP2_CQE) && + (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { + mmc->caps2 &= ~MMC_CAP2_CQE; + mmc->cqe_ops = NULL; + } + host->complete_wq = alloc_workqueue("sdhci", flags, 0); if (!host->complete_wq) return -ENOMEM; diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 007481557191..9b8346638f69 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -149,8 +149,10 @@ int bond_sysfs_slave_add(struct slave *slave) err = kobject_init_and_add(&slave->kobj, &slave_ktype, &(slave->dev->dev.kobj), "bonding_slave"); - if (err) + if (err) { + kobject_put(&slave->kobj); return err; + } for (a = slave_attrs; *a; ++a) { err = sysfs_create_file(&slave->kobj, &((*a)->attr)); diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index e8aae64db1ca..e113269c220a 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -102,13 +102,17 @@ static void felix_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan) { struct ocelot *ocelot = ds->priv; + u16 flags = vlan->flags; u16 vid; int err; + if (dsa_is_cpu_port(ds, port)) + flags &= ~BRIDGE_VLAN_INFO_UNTAGGED; + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { err = ocelot_vlan_add(ocelot, port, vid, - vlan->flags & BRIDGE_VLAN_INFO_PVID, - vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + flags & BRIDGE_VLAN_INFO_PVID, + flags & BRIDGE_VLAN_INFO_UNTAGGED); if (err) { dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", vid, port, err); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d1a83716d934..58e0d9a781e9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4176,14 +4176,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, int i, intr_process, rc, tmo_count; struct input *req = msg; u32 *data = msg; - __le32 *resp_len; u8 *valid; u16 cp_ring_id, len = 0; struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; struct hwrm_short_input short_input = {0}; u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; - u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; u16 dst = BNXT_HWRM_CHNL_CHIMP; @@ -4201,7 +4199,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, bar_offset = BNXT_GRCPF_REG_KONG_COMM; doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; resp = bp->hwrm_cmd_kong_resp_addr; - resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; } memset(resp, 0, PAGE_SIZE); @@ -4270,7 +4267,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); - resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); if (intr_process) { u16 seq_id = bp->hwrm_intr_seq_id; @@ -4298,9 +4294,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, le16_to_cpu(req->req_type)); return -EBUSY; } - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> - HWRM_RESP_LEN_SFT; - valid = resp_addr + len - 1; + len = le16_to_cpu(resp->resp_len); + valid = ((u8 *)resp) + len - 1; } else { int j; @@ -4311,8 +4306,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, */ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) return -EBUSY; - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> - HWRM_RESP_LEN_SFT; + len = le16_to_cpu(resp->resp_len); if (len) break; /* on first few passes, just barely sleep */ @@ -4334,7 +4328,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } /* Last byte of resp contains valid bit */ - valid = resp_addr + len - 1; + valid = ((u8 *)resp) + len - 1; for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { /* make sure we read from updated DMA memory */ dma_rmb(); @@ -9310,7 +9304,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bnxt_free_skbs(bp); /* Save ring stats before shutdown */ - if (bp->bnapi) + if (bp->bnapi && irq_re_init) bnxt_get_ring_stats(bp, &bp->net_stats_prev); if (irq_re_init) { bnxt_free_irq(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index f6a3250ef1c5..3d39638521d6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -656,11 +656,6 @@ struct nqe_cn { #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) #define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) -#define HWRM_RESP_ERR_CODE_MASK 0xffff -#define HWRM_RESP_LEN_OFFSET 4 -#define HWRM_RESP_LEN_MASK 0xffff0000 -#define HWRM_RESP_LEN_SFT 16 -#define HWRM_RESP_VALID_MASK 0xff000000 #define BNXT_HWRM_REQ_MAX_SIZE 128 #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ BNXT_HWRM_REQ_MAX_SIZE) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 34046a6286e8..360f9a95c1d5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2012,11 +2012,12 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, bnxt_hwrm_fw_set_time(bp); - if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, - BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, - &index, &item_len, NULL) != 0) { + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, + &index, &item_len, NULL); + if (rc) { netdev_err(dev, "PKG update area not created in nvram\n"); - return -ENOBUFS; + return rc; } rc = request_firmware(&fw, filename, &dev->dev); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 2cd1f8efdfa3..6bfa7575af94 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2914,7 +2914,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) } /* Do this here, so we can be verbose early */ - SET_NETDEV_DEV(net_dev, dev); + SET_NETDEV_DEV(net_dev, dev->parent); dev_set_drvdata(dev, net_dev); priv = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3de549c6c693..197dc5b2c090 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4678,12 +4678,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); break; } - dev_info(dev, "Partner protocol version is %d\n", - crq->version_exchange_rsp.version); - if (be16_to_cpu(crq->version_exchange_rsp.version) < - ibmvnic_version) - ibmvnic_version = + ibmvnic_version = be16_to_cpu(crq->version_exchange_rsp.version); + dev_info(dev, "Partner protocol version is %d\n", + ibmvnic_version); send_cap_queries(adapter); break; case QUERY_CAPABILITY_RSP: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 7d69a3061f17..fd375cbe586e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -80,7 +80,7 @@ config MLX5_ESWITCH config MLX5_TC_CT bool "MLX5 TC connection tracking offload support" - depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT + depends on MLX5_ESWITCH && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT default y help Say Y here if you want to support offloading connection tracking rules diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 59745402747b..0a5aada0f50f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1068,10 +1068,12 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels); -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, - u8 cq_period_mode); -void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, - u8 cq_period_mode); + +void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); + void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 2c4a670c8ffd..2a8950b3056f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -369,17 +369,19 @@ enum mlx5e_fec_supported_link_mode { *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \ } while (0) -#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ - do { \ - u16 *__policy = &(policy); \ - bool _write = (write); \ - \ - if (_write && *__policy) \ - *__policy = find_first_bit((u_long *)__policy, \ - sizeof(u16) * BITS_PER_BYTE);\ - MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ - if (!_write && *__policy) \ - *__policy = 1 << *__policy; \ +#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ + do { \ + unsigned long policy_long; \ + u16 *__policy = &(policy); \ + bool _write = (write); \ + \ + policy_long = *__policy; \ + if (_write && *__policy) \ + *__policy = find_first_bit(&policy_long, \ + sizeof(policy_long) * BITS_PER_BYTE);\ + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ + if (!_write && *__policy) \ + *__policy = 1 << *__policy; \ } while (0) /* get/set FEC admin field for a given speed */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 6d703ddee4e2..bc290ae80a53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -527,8 +527,8 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *rx_moder, *tx_moder; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; + bool reset_rx, reset_tx; int err = 0; - bool reset; if (!MLX5_CAP_GEN(mdev, cq_moderation)) return -EOPNOTSUPP; @@ -566,15 +566,28 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, } /* we are opened */ - reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) || - (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled); + reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; + reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; - if (!reset) { + if (!reset_rx && !reset_tx) { mlx5e_set_priv_channels_coalesce(priv, coal); priv->channels.params = new_channels.params; goto out; } + if (reset_rx) { + u8 mode = MLX5E_GET_PFLAG(&new_channels.params, + MLX5E_PFLAG_RX_CQE_BASED_MODER); + + mlx5e_reset_rx_moderation(&new_channels.params, mode); + } + if (reset_tx) { + u8 mode = MLX5E_GET_PFLAG(&new_channels.params, + MLX5E_PFLAG_TX_CQE_BASED_MODER); + + mlx5e_reset_tx_moderation(&new_channels.params, mode); + } + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); out: @@ -665,11 +678,12 @@ static const u32 pplm_fec_2_ethtool_linkmodes[] = { static int get_fec_supported_advertised(struct mlx5_core_dev *dev, struct ethtool_link_ksettings *link_ksettings) { - u_long active_fec = 0; + unsigned long active_fec_long; + u32 active_fec; u32 bitn; int err; - err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL); + err = mlx5e_get_fec_mode(dev, &active_fec, NULL); if (err) return (err == -EOPNOTSUPP) ? 0 : err; @@ -682,10 +696,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev, MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1, ETHTOOL_LINK_MODE_FEC_LLRS_BIT); + active_fec_long = active_fec; /* active fec is a bit set, find out which bit is set and * advertise the corresponding ethtool bit */ - bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE); + bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE); if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes)) __set_bit(pplm_fec_2_ethtool_linkmodes[bitn], link_ksettings->link_modes.advertising); @@ -1517,8 +1532,8 @@ static int mlx5e_get_fecparam(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - u16 fec_configured = 0; - u32 fec_active = 0; + u16 fec_configured; + u32 fec_active; int err; err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured); @@ -1526,14 +1541,14 @@ static int mlx5e_get_fecparam(struct net_device *netdev, if (err) return err; - fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active, - sizeof(u32) * BITS_PER_BYTE); + fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active, + sizeof(unsigned long) * BITS_PER_BYTE); if (!fecparam->active_fec) return -EOPNOTSUPP; - fecparam->fec = pplm2ethtool_fec((u_long)fec_configured, - sizeof(u16) * BITS_PER_BYTE); + fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured, + sizeof(unsigned long) * BITS_PER_BYTE); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c6b83042d431..bd8d0e096085 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4716,7 +4716,7 @@ static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) DIM_CQ_PERIOD_MODE_START_FROM_EQE; } -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) { if (params->tx_dim_enabled) { u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); @@ -4725,13 +4725,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) } else { params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); } - - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, - params->tx_cq_moderation.cq_period_mode == - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); } -void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) { if (params->rx_dim_enabled) { u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); @@ -4740,7 +4736,19 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) } else { params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); } +} + +void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + mlx5e_reset_tx_moderation(params, cq_period_mode); + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, + params->tx_cq_moderation.cq_period_mode == + MLX5_CQ_PERIOD_MODE_START_FROM_CQE); +} +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + mlx5e_reset_rx_moderation(params, cq_period_mode); MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, params->rx_cq_moderation.cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 5bcf95fcdd59..10f705761666 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2068,7 +2068,7 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev, flow_rule_match_meta(rule, &match); if (match.mask->ingress_ifindex != 0xFFFFFFFF) { NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); - return -EINVAL; + return -EOPNOTSUPP; } ingress_dev = __dev_get_by_index(dev_net(filter_dev), @@ -2076,13 +2076,13 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev, if (!ingress_dev) { NL_SET_ERR_MSG_MOD(extack, "Can't find the ingress port to match on"); - return -EINVAL; + return -ENOENT; } if (ingress_dev != filter_dev) { NL_SET_ERR_MSG_MOD(extack, "Can't match on the ingress filter port"); - return -EINVAL; + return -EOPNOTSUPP; } return 0; @@ -3849,10 +3849,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { NL_SET_ERR_MSG_MOD(extack, "devices are not on same switch HW, can't offload forwarding"); - netdev_warn(priv->netdev, - "devices %s %s not on same switch HW, can't offload forwarding\n", - priv->netdev->name, - out_dev->name); return -EOPNOTSUPP; } @@ -4614,7 +4610,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; rpriv->prev_vf_vport_stats = cur_stats; - flow_stats_update(&ma->stats, dpkts, dbytes, jiffies, + flow_stats_update(&ma->stats, dbytes, dpkts, jiffies, FLOW_ACTION_HW_STATS_DELAYED); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c1618b818f3a..17f818a54090 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1549,6 +1549,22 @@ static void shutdown(struct pci_dev *pdev) mlx5_pci_disable_device(dev); } +static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + + mlx5_unload_one(dev, false); + + return 0; +} + +static int mlx5_resume(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + + return mlx5_load_one(dev, false); +} + static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) }, { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ @@ -1592,6 +1608,8 @@ static struct pci_driver mlx5_core_driver = { .id_table = mlx5_core_pci_table, .probe = init_one, .remove = remove_one, + .suspend = mlx5_suspend, + .resume = mlx5_resume, .shutdown = shutdown, .err_handler = &mlx5_err_handler, .sriov_configure = mlx5_core_sriov_configure, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c694dbc239d0..6b60771ccb19 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1440,7 +1440,8 @@ __nfp_flower_update_merge_stats(struct nfp_app *app, ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); priv->stats[ctx_id].pkts += pkts; priv->stats[ctx_id].bytes += bytes; - max_t(u64, priv->stats[ctx_id].used, used); + priv->stats[ctx_id].used = max_t(u64, used, + priv->stats[ctx_id].used); } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 2a533280b124..29b9c728a65e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) ahw->diag_cnt = 0; ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); if (ret) - goto fail_diag_irq; + goto fail_mbx_args; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[0].id; @@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) done: qlcnic_free_mbx_args(&cmd); + +fail_mbx_args: qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); fail_diag_irq: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 1f319c9cee46..7e9cbfd23530 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -630,7 +630,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; snap_type_sel = PTP_TCR_SNAPTYPSEL_1; - ts_event_en = PTP_TCR_TSEVNTENA; + if (priv->synopsys_id != DWMAC_CORE_5_10) + ts_event_en = PTP_TCR_TSEVNTENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ptp_over_ethernet = PTP_TCR_TSIPENA; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 4bb8552a00d3..4a2c7355be63 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1324,6 +1324,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c index a1d69f9b2d4a..0b9ca6d20ffa 100644 --- a/drivers/nfc/st21nfca/dep.c +++ b/drivers/nfc/st21nfca/dep.c @@ -173,8 +173,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev, memcpy(atr_res->gbi, atr_req->gbi, gb_len); r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi, gb_len); - if (r < 0) + if (r < 0) { + kfree_skb(skb); return r; + } } info->dep_info.curr_nfc_dep_pni = 0; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 3726dc780d15..cc46e250fcac 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1382,16 +1382,19 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) /* * Called only on a device that has been disabled and after all other threads - * that can check this device's completion queues have synced. This is the - * last chance for the driver to see a natural completion before - * nvme_cancel_request() terminates all incomplete requests. + * that can check this device's completion queues have synced, except + * nvme_poll(). This is the last chance for the driver to see a natural + * completion before nvme_cancel_request() terminates all incomplete requests. */ static void nvme_reap_pending_cqes(struct nvme_dev *dev) { int i; - for (i = dev->ctrl.queue_count - 1; i > 0; i--) + for (i = dev->ctrl.queue_count - 1; i > 0; i--) { + spin_lock(&dev->queues[i].cq_poll_lock); nvme_process_cq(&dev->queues[i]); + spin_unlock(&dev->queues[i].cq_poll_lock); + } } static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index db37144ae98c..87ee9f767b7a 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -351,7 +351,9 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb, spin_unlock_irqrestore(&client->lock, flags); } - mbox_send_message(client->chan, pkt); + err = mbox_send_message(client->chan, pkt); + if (err < 0) + return err; /* We can send next packet immediately, so just call txdone. */ mbox_client_txdone(client->chan, 0); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 741b9140992a..8f1f8fca79e3 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -226,17 +226,20 @@ config SPI_DESIGNWARE help general driver for SPI controller core from DesignWare +if SPI_DESIGNWARE + +config SPI_DW_DMA + bool "DMA support for DW SPI controller" + config SPI_DW_PCI tristate "PCI interface driver for DW SPI core" - depends on SPI_DESIGNWARE && PCI - -config SPI_DW_MID_DMA - bool "DMA support for DW SPI controller on Intel MID platform" - depends on SPI_DW_PCI && DW_DMAC_PCI + depends on PCI config SPI_DW_MMIO tristate "Memory-mapped io interface driver for DW SPI core" - depends on SPI_DESIGNWARE + depends on HAS_IOMEM + +endif config SPI_DLN2 tristate "Diolan DLN-2 USB SPI adapter" @@ -844,6 +847,7 @@ config SPI_TXX9 config SPI_UNIPHIER tristate "Socionext UniPhier SPI Controller" depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF + depends on HAS_IOMEM help This enables a driver for the Socionext UniPhier SoC SCSSI SPI controller. @@ -910,6 +914,12 @@ config SPI_ZYNQMP_GQSPI help Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. +config SPI_AMD + tristate "AMD SPI controller" + depends on SPI_MASTER || COMPILE_TEST + help + Enables SPI controller driver for AMD SoC. + # # Add new SPI master controllers in alphabetical order above this line # diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 28f601327f8c..d2e41d3d464a 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -36,9 +36,10 @@ obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o obj-$(CONFIG_SPI_DLN2) += spi-dln2.o obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o +spi-dw-y := spi-dw-core.o +spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o -obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o -spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o +obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o obj-$(CONFIG_SPI_EFM32) += spi-efm32.o obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o obj-$(CONFIG_SPI_FALCON) += spi-falcon.o @@ -127,6 +128,7 @@ obj-$(CONFIG_SPI_XLP) += spi-xlp.o obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o obj-$(CONFIG_SPI_ZYNQ_QSPI) += spi-zynq-qspi.o obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o +obj-$(CONFIG_SPI_AMD) += spi-amd.o # SPI slave protocol handlers obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c new file mode 100644 index 000000000000..d0aacd4de1b9 --- /dev/null +++ b/drivers/spi/spi-amd.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// +// AMD SPI controller driver +// +// Copyright (c) 2020, Advanced Micro Devices, Inc. +// +// Author: Sanjay R Mehta <sanju.mehta@amd.com> + +#include <linux/acpi.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/spi/spi.h> + +#define AMD_SPI_CTRL0_REG 0x00 +#define AMD_SPI_EXEC_CMD BIT(16) +#define AMD_SPI_FIFO_CLEAR BIT(20) +#define AMD_SPI_BUSY BIT(31) + +#define AMD_SPI_OPCODE_MASK 0xFF + +#define AMD_SPI_ALT_CS_REG 0x1D +#define AMD_SPI_ALT_CS_MASK 0x3 + +#define AMD_SPI_FIFO_BASE 0x80 +#define AMD_SPI_TX_COUNT_REG 0x48 +#define AMD_SPI_RX_COUNT_REG 0x4B +#define AMD_SPI_STATUS_REG 0x4C + +#define AMD_SPI_MEM_SIZE 200 + +/* M_CMD OP codes for SPI */ +#define AMD_SPI_XFER_TX 1 +#define AMD_SPI_XFER_RX 2 + +struct amd_spi { + void __iomem *io_remap_addr; + unsigned long io_base_addr; + u32 rom_addr; + u8 chip_select; +}; + +static inline u8 amd_spi_readreg8(struct spi_master *master, int idx) +{ + struct amd_spi *amd_spi = spi_master_get_devdata(master); + + return ioread8((u8 __iomem *)amd_spi->io_remap_addr + idx); +} + +static inline void amd_spi_writereg8(struct spi_master *master, int idx, + u8 val) +{ + struct amd_spi *amd_spi = spi_master_get_devdata(master); + + iowrite8(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx)); +} + +static inline void amd_spi_setclear_reg8(struct spi_master *master, int idx, + u8 set, u8 clear) +{ + u8 tmp = amd_spi_readreg8(master, idx); + + tmp = (tmp & ~clear) | set; + amd_spi_writereg8(master, idx, tmp); +} + +static inline u32 amd_spi_readreg32(struct spi_master *master, int idx) +{ + struct amd_spi *amd_spi = spi_master_get_devdata(master); + + return ioread32((u8 __iomem *)amd_spi->io_remap_addr + idx); +} + +static inline void amd_spi_writereg32(struct spi_master *master, int idx, + u32 val) +{ + struct amd_spi *amd_spi = spi_master_get_devdata(master); + + iowrite32(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx)); +} + +static inline void amd_spi_setclear_reg32(struct spi_master *master, int idx, + u32 set, u32 clear) +{ + u32 tmp = amd_spi_readreg32(master, idx); + + tmp = (tmp & ~clear) | set; + amd_spi_writereg32(master, idx, tmp); +} + +static void amd_spi_select_chip(struct spi_master *master) +{ + struct amd_spi *amd_spi = spi_master_get_devdata(master); + u8 chip_select = amd_spi->chip_select; + + amd_spi_setclear_reg8(master, AMD_SPI_ALT_CS_REG, chip_select, + AMD_SPI_ALT_CS_MASK); +} + +static void amd_spi_clear_fifo_ptr(struct spi_master *master) +{ + amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR, + AMD_SPI_FIFO_CLEAR); +} + +static void amd_spi_set_opcode(struct spi_master *master, u8 cmd_opcode) +{ + amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, cmd_opcode, + AMD_SPI_OPCODE_MASK); +} + +static inline void amd_spi_set_rx_count(struct spi_master *master, + u8 rx_count) +{ + amd_spi_setclear_reg8(master, AMD_SPI_RX_COUNT_REG, rx_count, 0xff); +} + +static inline void amd_spi_set_tx_count(struct spi_master *master, + u8 tx_count) +{ + amd_spi_setclear_reg8(master, AMD_SPI_TX_COUNT_REG, tx_count, 0xff); +} + +static inline int amd_spi_busy_wait(struct amd_spi *amd_spi) +{ + bool spi_busy; + int timeout = 100000; + + /* poll for SPI bus to become idle */ + spi_busy = (ioread32((u8 __iomem *)amd_spi->io_remap_addr + + AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) == AMD_SPI_BUSY; + while (spi_busy) { + usleep_range(10, 20); + if (timeout-- < 0) + return -ETIMEDOUT; + + spi_busy = (ioread32((u8 __iomem *)amd_spi->io_remap_addr + + AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) == AMD_SPI_BUSY; + } + + return 0; +} + +static void amd_spi_execute_opcode(struct spi_master *master) +{ + struct amd_spi *amd_spi = spi_master_get_devdata(master); + + /* Set ExecuteOpCode bit in the CTRL0 register */ + amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD, + AMD_SPI_EXEC_CMD); + + amd_spi_busy_wait(amd_spi); +} + +static int amd_spi_master_setup(struct spi_device *spi) +{ + struct spi_master *master = spi->master; + + amd_spi_clear_fifo_ptr(master); + + return 0; +} + +static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi, + struct spi_master *master, + struct spi_message *message) +{ + struct spi_transfer *xfer = NULL; + u8 cmd_opcode; + u8 *buf = NULL; + u32 m_cmd = 0; + u32 i = 0; + u32 tx_len = 0, rx_len = 0; + + list_for_each_entry(xfer, &message->transfers, + transfer_list) { + if (xfer->rx_buf) + m_cmd = AMD_SPI_XFER_RX; + if (xfer->tx_buf) + m_cmd = AMD_SPI_XFER_TX; + + if (m_cmd & AMD_SPI_XFER_TX) { + buf = (u8 *)xfer->tx_buf; + tx_len = xfer->len - 1; + cmd_opcode = *(u8 *)xfer->tx_buf; + buf++; + amd_spi_set_opcode(master, cmd_opcode); + + /* Write data into the FIFO. */ + for (i = 0; i < tx_len; i++) { + iowrite8(buf[i], + ((u8 __iomem *)amd_spi->io_remap_addr + + AMD_SPI_FIFO_BASE + i)); + } + + amd_spi_set_tx_count(master, tx_len); + amd_spi_clear_fifo_ptr(master); + /* Execute command */ + amd_spi_execute_opcode(master); + } + if (m_cmd & AMD_SPI_XFER_RX) { + /* + * Store no. of bytes to be received from + * FIFO + */ + rx_len = xfer->len; + buf = (u8 *)xfer->rx_buf; + amd_spi_set_rx_count(master, rx_len); + amd_spi_clear_fifo_ptr(master); + /* Execute command */ + amd_spi_execute_opcode(master); + /* Read data from FIFO to receive buffer */ + for (i = 0; i < rx_len; i++) + buf[i] = amd_spi_readreg8(master, + AMD_SPI_FIFO_BASE + + tx_len + i); + } + } + + /* Update statistics */ + message->actual_length = tx_len + rx_len + 1; + /* complete the transaction */ + message->status = 0; + spi_finalize_current_message(master); + + return 0; +} + +static int amd_spi_master_transfer(struct spi_master *master, + struct spi_message *msg) +{ + struct amd_spi *amd_spi = spi_master_get_devdata(master); + struct spi_device *spi = msg->spi; + + amd_spi->chip_select = spi->chip_select; + amd_spi_select_chip(master); + + /* + * Extract spi_transfers from the spi message and + * program the controller. + */ + amd_spi_fifo_xfer(amd_spi, master, msg); + + return 0; +} + +static int amd_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_master *master; + struct amd_spi *amd_spi; + struct resource *res; + int err = 0; + + /* Allocate storage for spi_master and driver private data */ + master = spi_alloc_master(dev, sizeof(struct amd_spi)); + if (!master) { + dev_err(dev, "Error allocating SPI master\n"); + return -ENOMEM; + } + + amd_spi = spi_master_get_devdata(master); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + amd_spi->io_remap_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(amd_spi->io_remap_addr)) { + err = PTR_ERR(amd_spi->io_remap_addr); + dev_err(dev, "error %d ioremap of SPI registers failed\n", err); + goto err_free_master; + } + dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr); + + /* Initialize the spi_master fields */ + master->bus_num = 0; + master->num_chipselect = 4; + master->mode_bits = 0; + master->flags = SPI_MASTER_HALF_DUPLEX; + master->setup = amd_spi_master_setup; + master->transfer_one_message = amd_spi_master_transfer; + + /* Register the controller with SPI framework */ + err = devm_spi_register_master(dev, master); + if (err) { + dev_err(dev, "error %d registering SPI controller\n", err); + goto err_free_master; + } + + return 0; + +err_free_master: + spi_master_put(master); + + return err; +} + +static const struct acpi_device_id spi_acpi_match[] = { + { "AMDI0061", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, spi_acpi_match); + +static struct platform_driver amd_spi_driver = { + .driver = { + .name = "amd_spi", + .acpi_match_table = ACPI_PTR(spi_acpi_match), + }, + .probe = amd_spi_probe, +}; + +module_platform_driver(amd_spi_driver); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Sanjay Mehta <sanju.mehta@amd.com>"); +MODULE_DESCRIPTION("AMD SPI Master Controller Driver"); diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index e450ee17787f..fcde419e480c 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -276,11 +276,11 @@ static int a3700_spi_fifo_flush(struct a3700_spi *a3700_spi) return -ETIMEDOUT; } -static int a3700_spi_init(struct a3700_spi *a3700_spi) +static void a3700_spi_init(struct a3700_spi *a3700_spi) { struct spi_master *master = a3700_spi->master; u32 val; - int i, ret = 0; + int i; /* Reset SPI unit */ val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); @@ -311,8 +311,6 @@ static int a3700_spi_init(struct a3700_spi *a3700_spi) /* Mask the interrupts and clear cause bits */ spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, ~0U); - - return ret; } static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id) @@ -886,9 +884,7 @@ static int a3700_spi_probe(struct platform_device *pdev) master->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk), A3700_SPI_MAX_PRESCALE); - ret = a3700_spi_init(spi); - if (ret) - goto error_clk; + a3700_spi_init(spi); ret = devm_request_irq(dev, spi->irq, a3700_spi_interrupt, 0, dev_name(dev), master); diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 013458cabe3c..57ee8c3b7972 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -706,6 +706,7 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master, static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, struct spi_transfer *xfer, u32 *plen) + __must_hold(&as->lock) { struct atmel_spi *as = spi_master_get_devdata(master); struct dma_chan *rxchan = master->dma_rx; diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index eb9b78a90dcf..af86e6d6e16b 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -489,22 +489,6 @@ static int spi_engine_probe(struct platform_device *pdev) spin_lock_init(&spi_engine->lock); - spi_engine->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(spi_engine->base)) { - ret = PTR_ERR(spi_engine->base); - goto err_put_master; - } - - version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION); - if (SPI_ENGINE_VERSION_MAJOR(version) != 1) { - dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n", - SPI_ENGINE_VERSION_MAJOR(version), - SPI_ENGINE_VERSION_MINOR(version), - SPI_ENGINE_VERSION_PATCH(version)); - ret = -ENODEV; - goto err_put_master; - } - spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); if (IS_ERR(spi_engine->clk)) { ret = PTR_ERR(spi_engine->clk); @@ -525,6 +509,22 @@ static int spi_engine_probe(struct platform_device *pdev) if (ret) goto err_clk_disable; + spi_engine->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(spi_engine->base)) { + ret = PTR_ERR(spi_engine->base); + goto err_ref_clk_disable; + } + + version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION); + if (SPI_ENGINE_VERSION_MAJOR(version) != 1) { + dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n", + SPI_ENGINE_VERSION_MAJOR(version), + SPI_ENGINE_VERSION_MINOR(version), + SPI_ENGINE_VERSION_PATCH(version)); + ret = -ENODEV; + goto err_ref_clk_disable; + } + writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET); writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 23d295f36c80..681d09085175 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -91,6 +91,7 @@ #define MSPI_MSPI_STATUS 0x020 #define MSPI_CPTQP 0x024 #define MSPI_SPCR3 0x028 +#define MSPI_REV 0x02c #define MSPI_TXRAM 0x040 #define MSPI_RXRAM 0x0c0 #define MSPI_CDRAM 0x140 @@ -106,14 +107,22 @@ #define MSPI_SPCR2_SPE BIT(6) #define MSPI_SPCR2_CONT_AFTER_CMD BIT(7) +#define MSPI_SPCR3_FASTBR BIT(0) +#define MSPI_SPCR3_FASTDT BIT(1) +#define MSPI_SPCR3_SYSCLKSEL_MASK GENMASK(11, 10) +#define MSPI_SPCR3_SYSCLKSEL_27 (MSPI_SPCR3_SYSCLKSEL_MASK & \ + ~(BIT(10) | BIT(11))) +#define MSPI_SPCR3_SYSCLKSEL_108 (MSPI_SPCR3_SYSCLKSEL_MASK & \ + BIT(11)) + #define MSPI_MSPI_STATUS_SPIF BIT(0) #define INTR_BASE_BIT_SHIFT 0x02 #define INTR_COUNT 0x07 #define NUM_CHIPSELECT 4 -#define QSPI_SPBR_MIN 8U #define QSPI_SPBR_MAX 255U +#define MSPI_BASE_FREQ 27000000UL #define OPCODE_DIOR 0xBB #define OPCODE_QIOR 0xEB @@ -217,6 +226,9 @@ struct bcm_qspi { struct bcm_qspi_dev_id *dev_ids; struct completion mspi_done; struct completion bspi_done; + u8 mspi_maj_rev; + u8 mspi_min_rev; + bool mspi_spcr3_sysclk; }; static inline bool has_bspi(struct bcm_qspi *qspi) @@ -224,6 +236,36 @@ static inline bool has_bspi(struct bcm_qspi *qspi) return qspi->bspi_mode; } +/* hardware supports spcr3 and fast baud-rate */ +static inline bool bcm_qspi_has_fastbr(struct bcm_qspi *qspi) +{ + if (!has_bspi(qspi) && + ((qspi->mspi_maj_rev >= 1) && + (qspi->mspi_min_rev >= 5))) + return true; + + return false; +} + +/* hardware supports sys clk 108Mhz */ +static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi) +{ + if (!has_bspi(qspi) && (qspi->mspi_spcr3_sysclk || + ((qspi->mspi_maj_rev >= 1) && + (qspi->mspi_min_rev >= 6)))) + return true; + + return false; +} + +static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi) +{ + if (bcm_qspi_has_fastbr(qspi)) + return 1; + else + return 8; +} + /* Read qspi controller register*/ static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type, unsigned int offset) @@ -531,16 +573,39 @@ static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi, if (xp->speed_hz) spbr = qspi->base_clk / (2 * xp->speed_hz); - spcr = clamp_val(spbr, QSPI_SPBR_MIN, QSPI_SPBR_MAX); + spcr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX); bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spcr); - spcr = MSPI_MASTER_BIT; + if (!qspi->mspi_maj_rev) + /* legacy controller */ + spcr = MSPI_MASTER_BIT; + else + spcr = 0; + /* for 16 bit the data should be zero */ if (xp->bits_per_word != 16) spcr |= xp->bits_per_word << 2; spcr |= xp->mode & 3; + bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr); + if (bcm_qspi_has_fastbr(qspi)) { + spcr = 0; + + /* enable fastbr */ + spcr |= MSPI_SPCR3_FASTBR; + + if (bcm_qspi_has_sysclk_108(qspi)) { + /* SYSCLK_108 */ + spcr |= MSPI_SPCR3_SYSCLKSEL_108; + qspi->base_clk = MSPI_BASE_FREQ * 4; + /* Change spbr as we changed sysclk */ + bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, 4); + } + + bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr); + } + qspi->last_parms = *xp; } @@ -612,19 +677,15 @@ static int update_qspi_trans_byte_count(struct bcm_qspi *qspi, if (qt->trans->cs_change && (flags & TRANS_STATUS_BREAK_CS_CHANGE)) ret |= TRANS_STATUS_BREAK_CS_CHANGE; - if (ret) - goto done; - dev_dbg(&qspi->pdev->dev, "advance msg exit\n"); if (bcm_qspi_mspi_transfer_is_last(qspi, qt)) - ret = TRANS_STATUS_BREAK_EOM; + ret |= TRANS_STATUS_BREAK_EOM; else - ret = TRANS_STATUS_BREAK_NO_BYTES; + ret |= TRANS_STATUS_BREAK_NO_BYTES; qt->trans = NULL; } -done: dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n", qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret); return ret; @@ -670,7 +731,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) if (buf) buf[tp.byte] = read_rxram_slot_u8(qspi, slot); dev_dbg(&qspi->pdev->dev, "RD %02x\n", - buf ? buf[tp.byte] : 0xff); + buf ? buf[tp.byte] : 0x0); } else { u16 *buf = tp.trans->rx_buf; @@ -678,7 +739,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) buf[tp.byte / 2] = read_rxram_slot_u16(qspi, slot); dev_dbg(&qspi->pdev->dev, "RD %04x\n", - buf ? buf[tp.byte] : 0xffff); + buf ? buf[tp.byte / 2] : 0x0); } update_qspi_trans_byte_count(qspi, &tp, @@ -733,13 +794,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) while (!tstatus && slot < MSPI_NUM_CDRAM) { if (tp.trans->bits_per_word <= 8) { const u8 *buf = tp.trans->tx_buf; - u8 val = buf ? buf[tp.byte] : 0xff; + u8 val = buf ? buf[tp.byte] : 0x00; write_txram_slot_u8(qspi, slot, val); dev_dbg(&qspi->pdev->dev, "WR %02x\n", val); } else { const u16 *buf = tp.trans->tx_buf; - u16 val = buf ? buf[tp.byte / 2] : 0xffff; + u16 val = buf ? buf[tp.byte / 2] : 0x0000; write_txram_slot_u16(qspi, slot, val); dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); @@ -771,7 +832,16 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0); bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1); - if (tstatus & TRANS_STATUS_BREAK_DESELECT) { + /* + * case 1) EOM =1, cs_change =0: SSb inactive + * case 2) EOM =1, cs_change =1: SSb stay active + * case 3) EOM =0, cs_change =0: SSb stay active + * case 4) EOM =0, cs_change =1: SSb inactive + */ + if (((tstatus & TRANS_STATUS_BREAK_DESELECT) + == TRANS_STATUS_BREAK_CS_CHANGE) || + ((tstatus & TRANS_STATUS_BREAK_DESELECT) + == TRANS_STATUS_BREAK_EOM)) { mspi_cdram = read_cdram_slot(qspi, slot - 1) & ~MSPI_CDRAM_CONT_BIT; write_cdram_slot(qspi, slot - 1, mspi_cdram); @@ -1190,8 +1260,51 @@ static const struct spi_controller_mem_ops bcm_qspi_mem_ops = { .exec_op = bcm_qspi_exec_mem_op, }; +struct bcm_qspi_data { + bool has_mspi_rev; + bool has_spcr3_sysclk; +}; + +static const struct bcm_qspi_data bcm_qspi_no_rev_data = { + .has_mspi_rev = false, + .has_spcr3_sysclk = false, +}; + +static const struct bcm_qspi_data bcm_qspi_rev_data = { + .has_mspi_rev = true, + .has_spcr3_sysclk = false, +}; + +static const struct bcm_qspi_data bcm_qspi_spcr3_data = { + .has_mspi_rev = true, + .has_spcr3_sysclk = true, +}; + static const struct of_device_id bcm_qspi_of_match[] = { - { .compatible = "brcm,spi-bcm-qspi" }, + { + .compatible = "brcm,spi-bcm7425-qspi", + .data = &bcm_qspi_no_rev_data, + }, + { + .compatible = "brcm,spi-bcm7429-qspi", + .data = &bcm_qspi_no_rev_data, + }, + { + .compatible = "brcm,spi-bcm7435-qspi", + .data = &bcm_qspi_no_rev_data, + }, + { + .compatible = "brcm,spi-bcm-qspi", + .data = &bcm_qspi_rev_data, + }, + { + .compatible = "brcm,spi-bcm7216-qspi", + .data = &bcm_qspi_spcr3_data, + }, + { + .compatible = "brcm,spi-bcm7278-qspi", + .data = &bcm_qspi_spcr3_data, + }, {}, }; MODULE_DEVICE_TABLE(of, bcm_qspi_of_match); @@ -1199,12 +1312,15 @@ MODULE_DEVICE_TABLE(of, bcm_qspi_of_match); int bcm_qspi_probe(struct platform_device *pdev, struct bcm_qspi_soc_intc *soc_intc) { + const struct of_device_id *of_id = NULL; + const struct bcm_qspi_data *data; struct device *dev = &pdev->dev; struct bcm_qspi *qspi; struct spi_master *master; struct resource *res; int irq, ret = 0, num_ints = 0; u32 val; + u32 rev = 0; const char *name = NULL; int num_irqs = ARRAY_SIZE(qspi_irq_tab); @@ -1212,9 +1328,12 @@ int bcm_qspi_probe(struct platform_device *pdev, if (!dev->of_node) return -ENODEV; - if (!of_match_node(bcm_qspi_of_match, dev->of_node)) + of_id = of_match_node(bcm_qspi_of_match, dev->of_node); + if (!of_id) return -ENODEV; + data = of_id->data; + master = spi_alloc_master(dev, sizeof(struct bcm_qspi)); if (!master) { dev_err(dev, "error allocating spi_master\n"); @@ -1222,6 +1341,11 @@ int bcm_qspi_probe(struct platform_device *pdev, } qspi = spi_master_get_devdata(master); + + qspi->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(qspi->clk)) + return PTR_ERR(qspi->clk); + qspi->pdev = pdev; qspi->trans_pos.trans = NULL; qspi->trans_pos.byte = 0; @@ -1335,13 +1459,6 @@ int bcm_qspi_probe(struct platform_device *pdev, qspi->soc_intc = NULL; } - qspi->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(qspi->clk)) { - dev_warn(dev, "unable to get clock\n"); - ret = PTR_ERR(qspi->clk); - goto qspi_probe_err; - } - ret = clk_prepare_enable(qspi->clk); if (ret) { dev_err(dev, "failed to prepare clock\n"); @@ -1349,7 +1466,19 @@ int bcm_qspi_probe(struct platform_device *pdev, } qspi->base_clk = clk_get_rate(qspi->clk); - qspi->max_speed_hz = qspi->base_clk / (QSPI_SPBR_MIN * 2); + + if (data->has_mspi_rev) { + rev = bcm_qspi_read(qspi, MSPI, MSPI_REV); + /* some older revs do not have a MSPI_REV register */ + if ((rev & 0xff) == 0xff) + rev = 0; + } + + qspi->mspi_maj_rev = (rev >> 4) & 0xf; + qspi->mspi_min_rev = rev & 0xf; + qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk; + + qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2); bcm_qspi_hw_init(qspi); init_completion(&qspi->mspi_done); @@ -1406,7 +1535,7 @@ static int __maybe_unused bcm_qspi_suspend(struct device *dev) bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL); spi_master_suspend(qspi->master); - clk_disable(qspi->clk); + clk_disable_unprepare(qspi->clk); bcm_qspi_hw_uninit(qspi); return 0; @@ -1424,7 +1553,7 @@ static int __maybe_unused bcm_qspi_resume(struct device *dev) qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE, true); - ret = clk_enable(qspi->clk); + ret = clk_prepare_enable(qspi->clk); if (!ret) spi_master_resume(qspi->master); diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 11c235879bb7..237bd306c268 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -191,12 +191,12 @@ static void bcm2835_debugfs_remove(struct bcm2835_spi *bs) } #endif /* CONFIG_DEBUG_FS */ -static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) +static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned int reg) { return readl(bs->regs + reg); } -static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val) +static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned int reg, u32 val) { writel(val, bs->regs + reg); } @@ -940,6 +940,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) { dev_err(dev, "cannot map zero page - not using DMA mode\n"); bs->fill_tx_addr = 0; + ret = -ENOMEM; goto err_release; } @@ -949,6 +950,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, DMA_MEM_TO_DEV, 0); if (!bs->fill_tx_desc) { dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n"); + ret = -ENOMEM; goto err_release; } @@ -979,6 +981,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, if (dma_mapping_error(ctlr->dma_rx->device->dev, bs->clear_rx_addr)) { dev_err(dev, "cannot map clear_rx_cs - not using DMA mode\n"); bs->clear_rx_addr = 0; + ret = -ENOMEM; goto err_release; } @@ -989,6 +992,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, DMA_MEM_TO_DEV, 0); if (!bs->clear_rx_desc[i]) { dev_err(dev, "cannot prepare clear_rx_desc - not using DMA mode\n"); + ret = -ENOMEM; goto err_release; } @@ -1347,7 +1351,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) goto out_dma_release; } - err = devm_spi_register_controller(&pdev->dev, ctlr); + err = spi_register_controller(ctlr); if (err) { dev_err(&pdev->dev, "could not register SPI controller: %d\n", err); @@ -1374,17 +1378,28 @@ static int bcm2835_spi_remove(struct platform_device *pdev) bcm2835_debugfs_remove(bs); + spi_unregister_controller(ctlr); + + bcm2835_dma_release(ctlr, bs); + /* Clear FIFOs, and disable the HW block */ bcm2835_wr(bs, BCM2835_SPI_CS, BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); clk_disable_unprepare(bs->clk); - bcm2835_dma_release(ctlr, bs); - return 0; } +static void bcm2835_spi_shutdown(struct platform_device *pdev) +{ + int ret; + + ret = bcm2835_spi_remove(pdev); + if (ret) + dev_err(&pdev->dev, "failed to shutdown\n"); +} + static const struct of_device_id bcm2835_spi_match[] = { { .compatible = "brcm,bcm2835-spi", }, {} @@ -1398,6 +1413,7 @@ static struct platform_driver bcm2835_spi_driver = { }, .probe = bcm2835_spi_probe, .remove = bcm2835_spi_remove, + .shutdown = bcm2835_spi_shutdown, }; module_platform_driver(bcm2835_spi_driver); diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index a2162ff56a12..c331efd6e86b 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -569,7 +569,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) goto out_clk_disable; } - err = devm_spi_register_master(&pdev->dev, master); + err = spi_register_master(master); if (err) { dev_err(&pdev->dev, "could not register SPI master: %d\n", err); goto out_clk_disable; @@ -593,6 +593,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev) bcm2835aux_debugfs_remove(bs); + spi_unregister_master(master); + bcm2835aux_spi_reset_hw(bs); /* disable the HW block by releasing the clock */ diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw-core.c index 31e3f866d11a..323c66c5db50 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw-core.c @@ -24,74 +24,34 @@ struct chip_data { u8 tmode; /* TR/TO/RO/EEPROM */ u8 type; /* SPI/SSP/MicroWire */ - u8 poll_mode; /* 1 means use poll mode */ - u16 clk_div; /* baud rate divider */ u32 speed_hz; /* baud rate */ - void (*cs_control)(u32 command); }; #ifdef CONFIG_DEBUG_FS -#define SPI_REGS_BUFSIZE 1024 -static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct dw_spi *dws = file->private_data; - char *buf; - u32 len = 0; - ssize_t ret; - - buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL); - if (!buf) - return 0; - - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "%s registers:\n", dev_name(&dws->master->dev)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "=================================\n"); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR)); - len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, - "=================================\n"); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); - kfree(buf); - return ret; + +#define DW_SPI_DBGFS_REG(_name, _off) \ +{ \ + .name = _name, \ + .offset = _off, \ } -static const struct file_operations dw_spi_regs_ops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = dw_spi_show_regs, - .llseek = default_llseek, +static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = { + DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0), + DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1), + DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR), + DW_SPI_DBGFS_REG("SER", DW_SPI_SER), + DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR), + DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR), + DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR), + DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR), + DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR), + DW_SPI_DBGFS_REG("SR", DW_SPI_SR), + DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR), + DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR), + DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR), + DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR), + DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR), }; static int dw_spi_debugfs_init(struct dw_spi *dws) @@ -103,8 +63,11 @@ static int dw_spi_debugfs_init(struct dw_spi *dws) if (!dws->debugfs) return -ENOMEM; - debugfs_create_file("registers", S_IFREG | S_IRUGO, - dws->debugfs, (void *)dws, &dw_spi_regs_ops); + dws->regset.regs = dw_spi_dbgfs_regs; + dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs); + dws->regset.base = dws->regs; + debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset); + return 0; } @@ -127,13 +90,16 @@ static inline void dw_spi_debugfs_remove(struct dw_spi *dws) void dw_spi_set_cs(struct spi_device *spi, bool enable) { struct dw_spi *dws = spi_controller_get_devdata(spi->controller); - struct chip_data *chip = spi_get_ctldata(spi); - - /* Chip select logic is inverted from spi_set_cs() */ - if (chip && chip->cs_control) - chip->cs_control(!enable); + bool cs_high = !!(spi->mode & SPI_CS_HIGH); - if (!enable) + /* + * DW SPI controller demands any native CS being set in order to + * proceed with data transfer. So in order to activate the SPI + * communications we must set a corresponding bit in the Slave + * Enable register no matter whether the SPI core is configured to + * support active-high or active-low CS level. + */ + if (cs_high == enable) dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); else if (dws->cs_override) dw_writel(dws, DW_SPI_SER, 0); @@ -265,17 +231,56 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id) return dws->transfer_handler(dws); } -/* Must be called inside pump_transfers() */ -static int poll_transfer(struct dw_spi *dws) +/* Configure CTRLR0 for DW_apb_ssi */ +u32 dw_spi_update_cr0(struct spi_controller *master, struct spi_device *spi, + struct spi_transfer *transfer) { - do { - dw_writer(dws); - dw_reader(dws); - cpu_relax(); - } while (dws->rx_end > dws->rx); + struct chip_data *chip = spi_get_ctldata(spi); + u32 cr0; - return 0; + /* Default SPI mode is SCPOL = 0, SCPH = 0 */ + cr0 = (transfer->bits_per_word - 1) + | (chip->type << SPI_FRF_OFFSET) + | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) | + (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) | + (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET)) + | (chip->tmode << SPI_TMOD_OFFSET); + + return cr0; +} +EXPORT_SYMBOL_GPL(dw_spi_update_cr0); + +/* Configure CTRLR0 for DWC_ssi */ +u32 dw_spi_update_cr0_v1_01a(struct spi_controller *master, + struct spi_device *spi, + struct spi_transfer *transfer) +{ + struct chip_data *chip = spi_get_ctldata(spi); + u32 cr0; + + /* CTRLR0[ 4: 0] Data Frame Size */ + cr0 = (transfer->bits_per_word - 1); + + /* CTRLR0[ 7: 6] Frame Format */ + cr0 |= chip->type << DWC_SSI_CTRLR0_FRF_OFFSET; + + /* + * SPI mode (SCPOL|SCPH) + * CTRLR0[ 8] Serial Clock Phase + * CTRLR0[ 9] Serial Clock Polarity + */ + cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET; + cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET; + + /* CTRLR0[11:10] Transfer Mode */ + cr0 |= chip->tmode << DWC_SSI_CTRLR0_TMOD_OFFSET; + + /* CTRLR0[13] Shift Register Loop */ + cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET; + + return cr0; } +EXPORT_SYMBOL_GPL(dw_spi_update_cr0_v1_01a); static int dw_spi_transfer_one(struct spi_controller *master, struct spi_device *spi, struct spi_transfer *transfer) @@ -313,34 +318,11 @@ static int dw_spi_transfer_one(struct spi_controller *master, spi_set_clk(dws, chip->clk_div); } + transfer->effective_speed_hz = dws->max_freq / chip->clk_div; dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE); - dws->dma_width = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE); - - /* Default SPI mode is SCPOL = 0, SCPH = 0 */ - cr0 = (transfer->bits_per_word - 1) - | (chip->type << SPI_FRF_OFFSET) - | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) | - (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) | - (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET)) - | (chip->tmode << SPI_TMOD_OFFSET); - /* - * Adjust transfer mode if necessary. Requires platform dependent - * chipselect mechanism. - */ - if (chip->cs_control) { - if (dws->rx && dws->tx) - chip->tmode = SPI_TMOD_TR; - else if (dws->rx) - chip->tmode = SPI_TMOD_RO; - else - chip->tmode = SPI_TMOD_TO; - - cr0 &= ~SPI_TMOD_MASK; - cr0 |= (chip->tmode << SPI_TMOD_OFFSET); - } - - dw_writel(dws, DW_SPI_CTRL0, cr0); + cr0 = dws->update_cr0(master, spi, transfer); + dw_writel(dws, DW_SPI_CTRLR0, cr0); /* Check if current transfer is a DMA transaction */ if (master->can_dma && master->can_dma(master, spi, transfer)) @@ -359,9 +341,9 @@ static int dw_spi_transfer_one(struct spi_controller *master, spi_enable_chip(dws, 1); return ret; } - } else if (!chip->poll_mode) { + } else { txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes); - dw_writel(dws, DW_SPI_TXFLTR, txlevel); + dw_writel(dws, DW_SPI_TXFTLR, txlevel); /* Set the interrupt mask */ imask |= SPI_INT_TXEI | SPI_INT_TXOI | @@ -373,14 +355,8 @@ static int dw_spi_transfer_one(struct spi_controller *master, spi_enable_chip(dws, 1); - if (dws->dma_mapped) { - ret = dws->dma_ops->dma_transfer(dws, transfer); - if (ret < 0) - return ret; - } - - if (chip->poll_mode) - return poll_transfer(dws); + if (dws->dma_mapped) + return dws->dma_ops->dma_transfer(dws, transfer); return 1; } @@ -399,7 +375,6 @@ static void dw_spi_handle_err(struct spi_controller *master, /* This may be called twice for each spi dev */ static int dw_spi_setup(struct spi_device *spi) { - struct dw_spi_chip *chip_info = NULL; struct chip_data *chip; /* Only alloc on first setup */ @@ -411,21 +386,6 @@ static int dw_spi_setup(struct spi_device *spi) spi_set_ctldata(spi, chip); } - /* - * Protocol drivers may change the chip settings, so... - * if chip_info exists, use it - */ - chip_info = spi->controller_data; - - /* chip_info doesn't always exist */ - if (chip_info) { - if (chip_info->cs_control) - chip->cs_control = chip_info->cs_control; - - chip->poll_mode = chip_info->poll_mode; - chip->type = chip_info->type; - } - chip->tmode = SPI_TMOD_TR; return 0; @@ -452,11 +412,11 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws) u32 fifo; for (fifo = 1; fifo < 256; fifo++) { - dw_writel(dws, DW_SPI_TXFLTR, fifo); - if (fifo != dw_readl(dws, DW_SPI_TXFLTR)) + dw_writel(dws, DW_SPI_TXFTLR, fifo); + if (fifo != dw_readl(dws, DW_SPI_TXFTLR)) break; } - dw_writel(dws, DW_SPI_TXFLTR, 0); + dw_writel(dws, DW_SPI_TXFTLR, 0); dws->fifo_len = (fifo == 1) ? 0 : fifo; dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); @@ -481,7 +441,6 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->master = master; dws->type = SSI_MOTO_SPI; - dws->dma_inited = 0; dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); spin_lock_init(&dws->buf_lock); @@ -517,16 +476,16 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) spi_hw_init(dev, dws); if (dws->dma_ops && dws->dma_ops->dma_init) { - ret = dws->dma_ops->dma_init(dws); + ret = dws->dma_ops->dma_init(dev, dws); if (ret) { dev_warn(dev, "DMA init failed\n"); - dws->dma_inited = 0; } else { master->can_dma = dws->dma_ops->can_dma; + master->flags |= SPI_CONTROLLER_MUST_TX; } } - ret = devm_spi_register_controller(dev, master); + ret = spi_register_controller(master); if (ret) { dev_err(&master->dev, "problem registering spi master\n"); goto err_dma_exit; @@ -550,6 +509,8 @@ void dw_spi_remove_host(struct dw_spi *dws) { dw_spi_debugfs_remove(dws); + spi_unregister_controller(dws->master); + if (dws->dma_ops && dws->dma_ops->dma_exit) dws->dma_ops->dma_exit(dws); diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c new file mode 100644 index 000000000000..5986c520b196 --- /dev/null +++ b/drivers/spi/spi-dw-dma.c @@ -0,0 +1,480 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Special handling for DW DMA core + * + * Copyright (c) 2009, 2014 Intel Corporation. + */ + +#include <linux/completion.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/irqreturn.h> +#include <linux/jiffies.h> +#include <linux/pci.h> +#include <linux/platform_data/dma-dw.h> +#include <linux/spi/spi.h> +#include <linux/types.h> + +#include "spi-dw.h" + +#define WAIT_RETRIES 5 +#define RX_BUSY 0 +#define RX_BURST_LEVEL 16 +#define TX_BUSY 1 +#define TX_BURST_LEVEL 16 + +static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param) +{ + struct dw_dma_slave *s = param; + + if (s->dma_dev != chan->device->dev) + return false; + + chan->private = s; + return true; +} + +static void dw_spi_dma_maxburst_init(struct dw_spi *dws) +{ + struct dma_slave_caps caps; + u32 max_burst, def_burst; + int ret; + + def_burst = dws->fifo_len / 2; + + ret = dma_get_slave_caps(dws->rxchan, &caps); + if (!ret && caps.max_burst) + max_burst = caps.max_burst; + else + max_burst = RX_BURST_LEVEL; + + dws->rxburst = min(max_burst, def_burst); + + ret = dma_get_slave_caps(dws->txchan, &caps); + if (!ret && caps.max_burst) + max_burst = caps.max_burst; + else + max_burst = TX_BURST_LEVEL; + + dws->txburst = min(max_burst, def_burst); +} + +static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) +{ + struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx; + struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx; + struct pci_dev *dma_dev; + dma_cap_mask_t mask; + + /* + * Get pci device for DMA controller, currently it could only + * be the DMA controller of Medfield + */ + dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); + if (!dma_dev) + return -ENODEV; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* 1. Init rx channel */ + rx->dma_dev = &dma_dev->dev; + dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx); + if (!dws->rxchan) + goto err_exit; + + /* 2. Init tx channel */ + tx->dma_dev = &dma_dev->dev; + dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx); + if (!dws->txchan) + goto free_rxchan; + + dws->master->dma_rx = dws->rxchan; + dws->master->dma_tx = dws->txchan; + + init_completion(&dws->dma_completion); + + dw_spi_dma_maxburst_init(dws); + + return 0; + +free_rxchan: + dma_release_channel(dws->rxchan); + dws->rxchan = NULL; +err_exit: + return -EBUSY; +} + +static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) +{ + dws->rxchan = dma_request_slave_channel(dev, "rx"); + if (!dws->rxchan) + return -ENODEV; + + dws->txchan = dma_request_slave_channel(dev, "tx"); + if (!dws->txchan) { + dma_release_channel(dws->rxchan); + dws->rxchan = NULL; + return -ENODEV; + } + + dws->master->dma_rx = dws->rxchan; + dws->master->dma_tx = dws->txchan; + + init_completion(&dws->dma_completion); + + dw_spi_dma_maxburst_init(dws); + + return 0; +} + +static void dw_spi_dma_exit(struct dw_spi *dws) +{ + if (dws->txchan) { + dmaengine_terminate_sync(dws->txchan); + dma_release_channel(dws->txchan); + } + + if (dws->rxchan) { + dmaengine_terminate_sync(dws->rxchan); + dma_release_channel(dws->rxchan); + } + + dw_writel(dws, DW_SPI_DMACR, 0); +} + +static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws) +{ + u16 irq_status = dw_readl(dws, DW_SPI_ISR); + + if (!irq_status) + return IRQ_NONE; + + dw_readl(dws, DW_SPI_ICR); + spi_reset_chip(dws); + + dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__); + dws->master->cur_msg->status = -EIO; + complete(&dws->dma_completion); + return IRQ_HANDLED; +} + +static bool dw_spi_can_dma(struct spi_controller *master, + struct spi_device *spi, struct spi_transfer *xfer) +{ + struct dw_spi *dws = spi_controller_get_devdata(master); + + return xfer->len > dws->fifo_len; +} + +static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes) +{ + if (n_bytes == 1) + return DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (n_bytes == 2) + return DMA_SLAVE_BUSWIDTH_2_BYTES; + + return DMA_SLAVE_BUSWIDTH_UNDEFINED; +} + +static int dw_spi_dma_wait(struct dw_spi *dws, struct spi_transfer *xfer) +{ + unsigned long long ms; + + ms = xfer->len * MSEC_PER_SEC * BITS_PER_BYTE; + do_div(ms, xfer->effective_speed_hz); + ms += ms + 200; + + if (ms > UINT_MAX) + ms = UINT_MAX; + + ms = wait_for_completion_timeout(&dws->dma_completion, + msecs_to_jiffies(ms)); + + if (ms == 0) { + dev_err(&dws->master->cur_msg->spi->dev, + "DMA transaction timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) +{ + return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT); +} + +static int dw_spi_dma_wait_tx_done(struct dw_spi *dws, + struct spi_transfer *xfer) +{ + int retry = WAIT_RETRIES; + struct spi_delay delay; + u32 nents; + + nents = dw_readl(dws, DW_SPI_TXFLR); + delay.unit = SPI_DELAY_UNIT_SCK; + delay.value = nents * dws->n_bytes * BITS_PER_BYTE; + + while (dw_spi_dma_tx_busy(dws) && retry--) + spi_delay_exec(&delay, xfer); + + if (retry < 0) { + dev_err(&dws->master->dev, "Tx hanged up\n"); + return -EIO; + } + + return 0; +} + +/* + * dws->dma_chan_busy is set before the dma transfer starts, callback for tx + * channel will clear a corresponding bit. + */ +static void dw_spi_dma_tx_done(void *arg) +{ + struct dw_spi *dws = arg; + + clear_bit(TX_BUSY, &dws->dma_chan_busy); + if (test_bit(RX_BUSY, &dws->dma_chan_busy)) + return; + + dw_writel(dws, DW_SPI_DMACR, 0); + complete(&dws->dma_completion); +} + +static struct dma_async_tx_descriptor * +dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer) +{ + struct dma_slave_config txconf; + struct dma_async_tx_descriptor *txdesc; + + if (!xfer->tx_buf) + return NULL; + + memset(&txconf, 0, sizeof(txconf)); + txconf.direction = DMA_MEM_TO_DEV; + txconf.dst_addr = dws->dma_addr; + txconf.dst_maxburst = dws->txburst; + txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes); + txconf.device_fc = false; + + dmaengine_slave_config(dws->txchan, &txconf); + + txdesc = dmaengine_prep_slave_sg(dws->txchan, + xfer->tx_sg.sgl, + xfer->tx_sg.nents, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + return NULL; + + txdesc->callback = dw_spi_dma_tx_done; + txdesc->callback_param = dws; + + return txdesc; +} + +static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) +{ + return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT); +} + +static int dw_spi_dma_wait_rx_done(struct dw_spi *dws) +{ + int retry = WAIT_RETRIES; + struct spi_delay delay; + unsigned long ns, us; + u32 nents; + + /* + * It's unlikely that DMA engine is still doing the data fetching, but + * if it's let's give it some reasonable time. The timeout calculation + * is based on the synchronous APB/SSI reference clock rate, on a + * number of data entries left in the Rx FIFO, times a number of clock + * periods normally needed for a single APB read/write transaction + * without PREADY signal utilized (which is true for the DW APB SSI + * controller). + */ + nents = dw_readl(dws, DW_SPI_RXFLR); + ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; + if (ns <= NSEC_PER_USEC) { + delay.unit = SPI_DELAY_UNIT_NSECS; + delay.value = ns; + } else { + us = DIV_ROUND_UP(ns, NSEC_PER_USEC); + delay.unit = SPI_DELAY_UNIT_USECS; + delay.value = clamp_val(us, 0, USHRT_MAX); + } + + while (dw_spi_dma_rx_busy(dws) && retry--) + spi_delay_exec(&delay, NULL); + + if (retry < 0) { + dev_err(&dws->master->dev, "Rx hanged up\n"); + return -EIO; + } + + return 0; +} + +/* + * dws->dma_chan_busy is set before the dma transfer starts, callback for rx + * channel will clear a corresponding bit. + */ +static void dw_spi_dma_rx_done(void *arg) +{ + struct dw_spi *dws = arg; + + clear_bit(RX_BUSY, &dws->dma_chan_busy); + if (test_bit(TX_BUSY, &dws->dma_chan_busy)) + return; + + dw_writel(dws, DW_SPI_DMACR, 0); + complete(&dws->dma_completion); +} + +static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, + struct spi_transfer *xfer) +{ + struct dma_slave_config rxconf; + struct dma_async_tx_descriptor *rxdesc; + + if (!xfer->rx_buf) + return NULL; + + memset(&rxconf, 0, sizeof(rxconf)); + rxconf.direction = DMA_DEV_TO_MEM; + rxconf.src_addr = dws->dma_addr; + rxconf.src_maxburst = dws->rxburst; + rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes); + rxconf.device_fc = false; + + dmaengine_slave_config(dws->rxchan, &rxconf); + + rxdesc = dmaengine_prep_slave_sg(dws->rxchan, + xfer->rx_sg.sgl, + xfer->rx_sg.nents, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + return NULL; + + rxdesc->callback = dw_spi_dma_rx_done; + rxdesc->callback_param = dws; + + return rxdesc; +} + +static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) +{ + u16 imr = 0, dma_ctrl = 0; + + dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1); + dw_writel(dws, DW_SPI_DMATDLR, dws->fifo_len - dws->txburst); + + if (xfer->tx_buf) + dma_ctrl |= SPI_DMA_TDMAE; + if (xfer->rx_buf) + dma_ctrl |= SPI_DMA_RDMAE; + dw_writel(dws, DW_SPI_DMACR, dma_ctrl); + + /* Set the interrupt mask */ + if (xfer->tx_buf) + imr |= SPI_INT_TXOI; + if (xfer->rx_buf) + imr |= SPI_INT_RXUI | SPI_INT_RXOI; + spi_umask_intr(dws, imr); + + reinit_completion(&dws->dma_completion); + + dws->transfer_handler = dw_spi_dma_transfer_handler; + + return 0; +} + +static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) +{ + struct dma_async_tx_descriptor *txdesc, *rxdesc; + int ret; + + /* Prepare the TX dma transfer */ + txdesc = dw_spi_dma_prepare_tx(dws, xfer); + + /* Prepare the RX dma transfer */ + rxdesc = dw_spi_dma_prepare_rx(dws, xfer); + + /* rx must be started before tx due to spi instinct */ + if (rxdesc) { + set_bit(RX_BUSY, &dws->dma_chan_busy); + dmaengine_submit(rxdesc); + dma_async_issue_pending(dws->rxchan); + } + + if (txdesc) { + set_bit(TX_BUSY, &dws->dma_chan_busy); + dmaengine_submit(txdesc); + dma_async_issue_pending(dws->txchan); + } + + ret = dw_spi_dma_wait(dws, xfer); + if (ret) + return ret; + + if (txdesc && dws->master->cur_msg->status == -EINPROGRESS) { + ret = dw_spi_dma_wait_tx_done(dws, xfer); + if (ret) + return ret; + } + + if (rxdesc && dws->master->cur_msg->status == -EINPROGRESS) + ret = dw_spi_dma_wait_rx_done(dws); + + return ret; +} + +static void dw_spi_dma_stop(struct dw_spi *dws) +{ + if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { + dmaengine_terminate_sync(dws->txchan); + clear_bit(TX_BUSY, &dws->dma_chan_busy); + } + if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { + dmaengine_terminate_sync(dws->rxchan); + clear_bit(RX_BUSY, &dws->dma_chan_busy); + } + + dw_writel(dws, DW_SPI_DMACR, 0); +} + +static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = { + .dma_init = dw_spi_dma_init_mfld, + .dma_exit = dw_spi_dma_exit, + .dma_setup = dw_spi_dma_setup, + .can_dma = dw_spi_can_dma, + .dma_transfer = dw_spi_dma_transfer, + .dma_stop = dw_spi_dma_stop, +}; + +void dw_spi_dma_setup_mfld(struct dw_spi *dws) +{ + dws->dma_ops = &dw_spi_dma_mfld_ops; +} +EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld); + +static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = { + .dma_init = dw_spi_dma_init_generic, + .dma_exit = dw_spi_dma_exit, + .dma_setup = dw_spi_dma_setup, + .can_dma = dw_spi_can_dma, + .dma_transfer = dw_spi_dma_transfer, + .dma_stop = dw_spi_dma_stop, +}; + +void dw_spi_dma_setup_generic(struct dw_spi *dws) +{ + dws->dma_ops = &dw_spi_dma_generic_ops; +} +EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic); diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c deleted file mode 100644 index 0d86c37e0aeb..000000000000 --- a/drivers/spi/spi-dw-mid.c +++ /dev/null @@ -1,322 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Special handling for DW core on Intel MID platform - * - * Copyright (c) 2009, 2014 Intel Corporation. - */ - -#include <linux/dma-mapping.h> -#include <linux/dmaengine.h> -#include <linux/interrupt.h> -#include <linux/slab.h> -#include <linux/spi/spi.h> -#include <linux/types.h> - -#include "spi-dw.h" - -#ifdef CONFIG_SPI_DW_MID_DMA -#include <linux/pci.h> -#include <linux/platform_data/dma-dw.h> - -#define RX_BUSY 0 -#define TX_BUSY 1 - -static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 }; -static struct dw_dma_slave mid_dma_rx = { .src_id = 0 }; - -static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) -{ - struct dw_dma_slave *s = param; - - if (s->dma_dev != chan->device->dev) - return false; - - chan->private = s; - return true; -} - -static int mid_spi_dma_init(struct dw_spi *dws) -{ - struct pci_dev *dma_dev; - struct dw_dma_slave *tx = dws->dma_tx; - struct dw_dma_slave *rx = dws->dma_rx; - dma_cap_mask_t mask; - - /* - * Get pci device for DMA controller, currently it could only - * be the DMA controller of Medfield - */ - dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); - if (!dma_dev) - return -ENODEV; - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - - /* 1. Init rx channel */ - rx->dma_dev = &dma_dev->dev; - dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx); - if (!dws->rxchan) - goto err_exit; - dws->master->dma_rx = dws->rxchan; - - /* 2. Init tx channel */ - tx->dma_dev = &dma_dev->dev; - dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx); - if (!dws->txchan) - goto free_rxchan; - dws->master->dma_tx = dws->txchan; - - dws->dma_inited = 1; - return 0; - -free_rxchan: - dma_release_channel(dws->rxchan); -err_exit: - return -EBUSY; -} - -static void mid_spi_dma_exit(struct dw_spi *dws) -{ - if (!dws->dma_inited) - return; - - dmaengine_terminate_sync(dws->txchan); - dma_release_channel(dws->txchan); - - dmaengine_terminate_sync(dws->rxchan); - dma_release_channel(dws->rxchan); -} - -static irqreturn_t dma_transfer(struct dw_spi *dws) -{ - u16 irq_status = dw_readl(dws, DW_SPI_ISR); - - if (!irq_status) - return IRQ_NONE; - - dw_readl(dws, DW_SPI_ICR); - spi_reset_chip(dws); - - dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__); - dws->master->cur_msg->status = -EIO; - spi_finalize_current_transfer(dws->master); - return IRQ_HANDLED; -} - -static bool mid_spi_can_dma(struct spi_controller *master, - struct spi_device *spi, struct spi_transfer *xfer) -{ - struct dw_spi *dws = spi_controller_get_devdata(master); - - if (!dws->dma_inited) - return false; - - return xfer->len > dws->fifo_len; -} - -static enum dma_slave_buswidth convert_dma_width(u32 dma_width) { - if (dma_width == 1) - return DMA_SLAVE_BUSWIDTH_1_BYTE; - else if (dma_width == 2) - return DMA_SLAVE_BUSWIDTH_2_BYTES; - - return DMA_SLAVE_BUSWIDTH_UNDEFINED; -} - -/* - * dws->dma_chan_busy is set before the dma transfer starts, callback for tx - * channel will clear a corresponding bit. - */ -static void dw_spi_dma_tx_done(void *arg) -{ - struct dw_spi *dws = arg; - - clear_bit(TX_BUSY, &dws->dma_chan_busy); - if (test_bit(RX_BUSY, &dws->dma_chan_busy)) - return; - spi_finalize_current_transfer(dws->master); -} - -static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, - struct spi_transfer *xfer) -{ - struct dma_slave_config txconf; - struct dma_async_tx_descriptor *txdesc; - - if (!xfer->tx_buf) - return NULL; - - txconf.direction = DMA_MEM_TO_DEV; - txconf.dst_addr = dws->dma_addr; - txconf.dst_maxburst = 16; - txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - txconf.dst_addr_width = convert_dma_width(dws->dma_width); - txconf.device_fc = false; - - dmaengine_slave_config(dws->txchan, &txconf); - - txdesc = dmaengine_prep_slave_sg(dws->txchan, - xfer->tx_sg.sgl, - xfer->tx_sg.nents, - DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!txdesc) - return NULL; - - txdesc->callback = dw_spi_dma_tx_done; - txdesc->callback_param = dws; - - return txdesc; -} - -/* - * dws->dma_chan_busy is set before the dma transfer starts, callback for rx - * channel will clear a corresponding bit. - */ -static void dw_spi_dma_rx_done(void *arg) -{ - struct dw_spi *dws = arg; - - clear_bit(RX_BUSY, &dws->dma_chan_busy); - if (test_bit(TX_BUSY, &dws->dma_chan_busy)) - return; - spi_finalize_current_transfer(dws->master); -} - -static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, - struct spi_transfer *xfer) -{ - struct dma_slave_config rxconf; - struct dma_async_tx_descriptor *rxdesc; - - if (!xfer->rx_buf) - return NULL; - - rxconf.direction = DMA_DEV_TO_MEM; - rxconf.src_addr = dws->dma_addr; - rxconf.src_maxburst = 16; - rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - rxconf.src_addr_width = convert_dma_width(dws->dma_width); - rxconf.device_fc = false; - - dmaengine_slave_config(dws->rxchan, &rxconf); - - rxdesc = dmaengine_prep_slave_sg(dws->rxchan, - xfer->rx_sg.sgl, - xfer->rx_sg.nents, - DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!rxdesc) - return NULL; - - rxdesc->callback = dw_spi_dma_rx_done; - rxdesc->callback_param = dws; - - return rxdesc; -} - -static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) -{ - u16 dma_ctrl = 0; - - dw_writel(dws, DW_SPI_DMARDLR, 0xf); - dw_writel(dws, DW_SPI_DMATDLR, 0x10); - - if (xfer->tx_buf) - dma_ctrl |= SPI_DMA_TDMAE; - if (xfer->rx_buf) - dma_ctrl |= SPI_DMA_RDMAE; - dw_writel(dws, DW_SPI_DMACR, dma_ctrl); - - /* Set the interrupt mask */ - spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); - - dws->transfer_handler = dma_transfer; - - return 0; -} - -static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) -{ - struct dma_async_tx_descriptor *txdesc, *rxdesc; - - /* Prepare the TX dma transfer */ - txdesc = dw_spi_dma_prepare_tx(dws, xfer); - - /* Prepare the RX dma transfer */ - rxdesc = dw_spi_dma_prepare_rx(dws, xfer); - - /* rx must be started before tx due to spi instinct */ - if (rxdesc) { - set_bit(RX_BUSY, &dws->dma_chan_busy); - dmaengine_submit(rxdesc); - dma_async_issue_pending(dws->rxchan); - } - - if (txdesc) { - set_bit(TX_BUSY, &dws->dma_chan_busy); - dmaengine_submit(txdesc); - dma_async_issue_pending(dws->txchan); - } - - return 0; -} - -static void mid_spi_dma_stop(struct dw_spi *dws) -{ - if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { - dmaengine_terminate_sync(dws->txchan); - clear_bit(TX_BUSY, &dws->dma_chan_busy); - } - if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { - dmaengine_terminate_sync(dws->rxchan); - clear_bit(RX_BUSY, &dws->dma_chan_busy); - } -} - -static const struct dw_spi_dma_ops mid_dma_ops = { - .dma_init = mid_spi_dma_init, - .dma_exit = mid_spi_dma_exit, - .dma_setup = mid_spi_dma_setup, - .can_dma = mid_spi_can_dma, - .dma_transfer = mid_spi_dma_transfer, - .dma_stop = mid_spi_dma_stop, -}; -#endif - -/* Some specific info for SPI0 controller on Intel MID */ - -/* HW info for MRST Clk Control Unit, 32b reg per controller */ -#define MRST_SPI_CLK_BASE 100000000 /* 100m */ -#define MRST_CLK_SPI_REG 0xff11d86c -#define CLK_SPI_BDIV_OFFSET 0 -#define CLK_SPI_BDIV_MASK 0x00000007 -#define CLK_SPI_CDIV_OFFSET 9 -#define CLK_SPI_CDIV_MASK 0x00000e00 -#define CLK_SPI_DISABLE_OFFSET 8 - -int dw_spi_mid_init(struct dw_spi *dws) -{ - void __iomem *clk_reg; - u32 clk_cdiv; - - clk_reg = ioremap(MRST_CLK_SPI_REG, 16); - if (!clk_reg) - return -ENOMEM; - - /* Get SPI controller operating freq info */ - clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32)); - clk_cdiv &= CLK_SPI_CDIV_MASK; - clk_cdiv >>= CLK_SPI_CDIV_OFFSET; - dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); - - iounmap(clk_reg); - -#ifdef CONFIG_SPI_DW_MID_DMA - dws->dma_tx = &mid_dma_tx; - dws->dma_rx = &mid_dma_rx; - dws->dma_ops = &mid_dma_ops; -#endif - return 0; -} diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index 384a3ab6dc2d..403403deae66 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -7,7 +7,6 @@ #include <linux/clk.h> #include <linux/err.h> -#include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> @@ -20,6 +19,7 @@ #include <linux/acpi.h> #include <linux/property.h> #include <linux/regmap.h> +#include <linux/reset.h> #include "spi-dw.h" @@ -30,6 +30,7 @@ struct dw_spi_mmio { struct clk *clk; struct clk *pclk; void *priv; + struct reset_control *rstc; }; #define MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL 0x24 @@ -44,6 +45,13 @@ struct dw_spi_mmio { #define MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE BIT(13) #define MSCC_SPI_MST_SW_MODE_SW_SPI_CS(x) (x << 5) +/* + * For Keem Bay, CTRLR0[31] is used to select controller mode. + * 0: SSI is slave + * 1: SSI is master + */ +#define KEEMBAY_CTRLR0_SSIC_IS_MST BIT(31) + struct dw_spi_mscc { struct regmap *syscon; void __iomem *spi_mst; @@ -106,6 +114,9 @@ static int dw_spi_mscc_init(struct platform_device *pdev, dwsmmio->dws.set_cs = dw_spi_mscc_set_cs; dwsmmio->priv = dwsmscc; + /* Register hook to configure CTRLR0 */ + dwsmmio->dws.update_cr0 = dw_spi_update_cr0; + return 0; } @@ -128,6 +139,49 @@ static int dw_spi_alpine_init(struct platform_device *pdev, { dwsmmio->dws.cs_override = 1; + /* Register hook to configure CTRLR0 */ + dwsmmio->dws.update_cr0 = dw_spi_update_cr0; + + return 0; +} + +static int dw_spi_dw_apb_init(struct platform_device *pdev, + struct dw_spi_mmio *dwsmmio) +{ + /* Register hook to configure CTRLR0 */ + dwsmmio->dws.update_cr0 = dw_spi_update_cr0; + + dw_spi_dma_setup_generic(&dwsmmio->dws); + + return 0; +} + +static int dw_spi_dwc_ssi_init(struct platform_device *pdev, + struct dw_spi_mmio *dwsmmio) +{ + /* Register hook to configure CTRLR0 */ + dwsmmio->dws.update_cr0 = dw_spi_update_cr0_v1_01a; + + dw_spi_dma_setup_generic(&dwsmmio->dws); + + return 0; +} + +static u32 dw_spi_update_cr0_keembay(struct spi_controller *master, + struct spi_device *spi, + struct spi_transfer *transfer) +{ + u32 cr0 = dw_spi_update_cr0_v1_01a(master, spi, transfer); + + return cr0 | KEEMBAY_CTRLR0_SSIC_IS_MST; +} + +static int dw_spi_keembay_init(struct platform_device *pdev, + struct dw_spi_mmio *dwsmmio) +{ + /* Register hook to configure CTRLR0 */ + dwsmmio->dws.update_cr0 = dw_spi_update_cr0_keembay; + return 0; } @@ -136,6 +190,7 @@ static int dw_spi_mmio_probe(struct platform_device *pdev) int (*init_func)(struct platform_device *pdev, struct dw_spi_mmio *dwsmmio); struct dw_spi_mmio *dwsmmio; + struct resource *mem; struct dw_spi *dws; int ret; int num_cs; @@ -148,11 +203,11 @@ static int dw_spi_mmio_probe(struct platform_device *pdev) dws = &dwsmmio->dws; /* Get basic io resource and map it */ - dws->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(dws->regs)) { - dev_err(&pdev->dev, "SPI region map failed\n"); + dws->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); + if (IS_ERR(dws->regs)) return PTR_ERR(dws->regs); - } + + dws->paddr = mem->start; dws->irq = platform_get_irq(pdev, 0); if (dws->irq < 0) @@ -175,6 +230,14 @@ static int dw_spi_mmio_probe(struct platform_device *pdev) if (ret) goto out_clk; + /* find an optional reset controller */ + dwsmmio->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, "spi"); + if (IS_ERR(dwsmmio->rstc)) { + ret = PTR_ERR(dwsmmio->rstc); + goto out_clk; + } + reset_control_deassert(dwsmmio->rstc); + dws->bus_num = pdev->id; dws->max_freq = clk_get_rate(dwsmmio->clk); @@ -208,6 +271,8 @@ out: clk_disable_unprepare(dwsmmio->pclk); out_clk: clk_disable_unprepare(dwsmmio->clk); + reset_control_assert(dwsmmio->rstc); + return ret; } @@ -219,25 +284,30 @@ static int dw_spi_mmio_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); clk_disable_unprepare(dwsmmio->pclk); clk_disable_unprepare(dwsmmio->clk); + reset_control_assert(dwsmmio->rstc); return 0; } static const struct of_device_id dw_spi_mmio_of_match[] = { - { .compatible = "snps,dw-apb-ssi", }, + { .compatible = "snps,dw-apb-ssi", .data = dw_spi_dw_apb_init}, { .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_ocelot_init}, { .compatible = "mscc,jaguar2-spi", .data = dw_spi_mscc_jaguar2_init}, { .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init}, - { .compatible = "renesas,rzn1-spi", }, + { .compatible = "renesas,rzn1-spi", .data = dw_spi_dw_apb_init}, + { .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_dwc_ssi_init}, + { .compatible = "intel,keembay-ssi", .data = dw_spi_keembay_init}, { /* end of table */} }; MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match); +#ifdef CONFIG_ACPI static const struct acpi_device_id dw_spi_mmio_acpi_match[] = { - {"HISI0173", 0}, + {"HISI0173", (kernel_ulong_t)dw_spi_dw_apb_init}, {}, }; MODULE_DEVICE_TABLE(acpi, dw_spi_mmio_acpi_match); +#endif static struct platform_driver dw_spi_mmio_driver = { .probe = dw_spi_mmio_probe, diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index 12c131b5fb4e..2ea73809ca34 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c @@ -5,7 +5,6 @@ * Copyright (c) 2009, 2014 Intel Corporation. */ -#include <linux/interrupt.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/slab.h> @@ -16,6 +15,15 @@ #define DRIVER_NAME "dw_spi_pci" +/* HW info for MRST Clk Control Unit, 32b reg per controller */ +#define MRST_SPI_CLK_BASE 100000000 /* 100m */ +#define MRST_CLK_SPI_REG 0xff11d86c +#define CLK_SPI_BDIV_OFFSET 0 +#define CLK_SPI_BDIV_MASK 0x00000007 +#define CLK_SPI_CDIV_OFFSET 9 +#define CLK_SPI_CDIV_MASK 0x00000e00 +#define CLK_SPI_DISABLE_OFFSET 8 + struct spi_pci_desc { int (*setup)(struct dw_spi *); u16 num_cs; @@ -23,19 +31,55 @@ struct spi_pci_desc { u32 max_freq; }; +static int spi_mid_init(struct dw_spi *dws) +{ + void __iomem *clk_reg; + u32 clk_cdiv; + + clk_reg = ioremap(MRST_CLK_SPI_REG, 16); + if (!clk_reg) + return -ENOMEM; + + /* Get SPI controller operating freq info */ + clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32)); + clk_cdiv &= CLK_SPI_CDIV_MASK; + clk_cdiv >>= CLK_SPI_CDIV_OFFSET; + dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); + + iounmap(clk_reg); + + /* Register hook to configure CTRLR0 */ + dws->update_cr0 = dw_spi_update_cr0; + + dw_spi_dma_setup_mfld(dws); + + return 0; +} + +static int spi_generic_init(struct dw_spi *dws) +{ + /* Register hook to configure CTRLR0 */ + dws->update_cr0 = dw_spi_update_cr0; + + dw_spi_dma_setup_generic(dws); + + return 0; +} + static struct spi_pci_desc spi_pci_mid_desc_1 = { - .setup = dw_spi_mid_init, + .setup = spi_mid_init, .num_cs = 5, .bus_num = 0, }; static struct spi_pci_desc spi_pci_mid_desc_2 = { - .setup = dw_spi_mid_init, + .setup = spi_mid_init, .num_cs = 2, .bus_num = 1, }; static struct spi_pci_desc spi_pci_ehl_desc = { + .setup = spi_generic_init, .num_cs = 2, .bus_num = -1, .max_freq = 100000000, diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index 1bf5713e047d..151ba316619e 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h @@ -2,18 +2,21 @@ #ifndef DW_SPI_HEADER_H #define DW_SPI_HEADER_H +#include <linux/completion.h> +#include <linux/debugfs.h> +#include <linux/irqreturn.h> #include <linux/io.h> #include <linux/scatterlist.h> /* Register offsets */ -#define DW_SPI_CTRL0 0x00 -#define DW_SPI_CTRL1 0x04 +#define DW_SPI_CTRLR0 0x00 +#define DW_SPI_CTRLR1 0x04 #define DW_SPI_SSIENR 0x08 #define DW_SPI_MWCR 0x0c #define DW_SPI_SER 0x10 #define DW_SPI_BAUDR 0x14 -#define DW_SPI_TXFLTR 0x18 -#define DW_SPI_RXFLTR 0x1c +#define DW_SPI_TXFTLR 0x18 +#define DW_SPI_RXFTLR 0x1c #define DW_SPI_TXFLR 0x20 #define DW_SPI_RXFLR 0x24 #define DW_SPI_SR 0x28 @@ -57,6 +60,15 @@ #define SPI_SRL_OFFSET 11 #define SPI_CFS_OFFSET 12 +/* Bit fields in CTRLR0 based on DWC_ssi_databook.pdf v1.01a */ +#define DWC_SSI_CTRLR0_SRL_OFFSET 13 +#define DWC_SSI_CTRLR0_TMOD_OFFSET 10 +#define DWC_SSI_CTRLR0_TMOD_MASK GENMASK(11, 10) +#define DWC_SSI_CTRLR0_SCPOL_OFFSET 9 +#define DWC_SSI_CTRLR0_SCPH_OFFSET 8 +#define DWC_SSI_CTRLR0_FRF_OFFSET 6 +#define DWC_SSI_CTRLR0_DFS_OFFSET 0 + /* Bit fields in SR, 7 bits */ #define SR_MASK 0x7f /* cover 7 bits */ #define SR_BUSY (1 << 0) @@ -90,7 +102,7 @@ enum dw_ssi_type { struct dw_spi; struct dw_spi_dma_ops { - int (*dma_init)(struct dw_spi *dws); + int (*dma_init)(struct device *dev, struct dw_spi *dws); void (*dma_exit)(struct dw_spi *dws); int (*dma_setup)(struct dw_spi *dws, struct spi_transfer *xfer); bool (*can_dma)(struct spi_controller *master, struct spi_device *spi, @@ -114,6 +126,8 @@ struct dw_spi { u16 bus_num; u16 num_cs; /* supported slave numbers */ void (*set_cs)(struct spi_device *spi, bool enable); + u32 (*update_cr0)(struct spi_controller *master, struct spi_device *spi, + struct spi_transfer *transfer); /* Current message transfer state info */ size_t len; @@ -124,24 +138,22 @@ struct dw_spi { void *rx_end; int dma_mapped; u8 n_bytes; /* current is a 1/2 bytes op */ - u32 dma_width; irqreturn_t (*transfer_handler)(struct dw_spi *dws); u32 current_freq; /* frequency in hz */ /* DMA info */ - int dma_inited; struct dma_chan *txchan; + u32 txburst; struct dma_chan *rxchan; + u32 rxburst; unsigned long dma_chan_busy; dma_addr_t dma_addr; /* phy address of the Data register */ const struct dw_spi_dma_ops *dma_ops; - void *dma_tx; - void *dma_rx; + struct completion dma_completion; - /* Bus interface info */ - void *priv; #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; + struct debugfs_regset32 regset; #endif }; @@ -235,24 +247,28 @@ static inline void spi_shutdown_chip(struct dw_spi *dws) spi_set_clk(dws, 0); } -/* - * Each SPI slave device to work with dw_api controller should - * has such a structure claiming its working mode (poll or PIO/DMA), - * which can be save in the "controller_data" member of the - * struct spi_device. - */ -struct dw_spi_chip { - u8 poll_mode; /* 1 for controller polling mode */ - u8 type; /* SPI/SSP/MicroWire */ - void (*cs_control)(u32 command); -}; - extern void dw_spi_set_cs(struct spi_device *spi, bool enable); extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws); extern void dw_spi_remove_host(struct dw_spi *dws); extern int dw_spi_suspend_host(struct dw_spi *dws); extern int dw_spi_resume_host(struct dw_spi *dws); +extern u32 dw_spi_update_cr0(struct spi_controller *master, + struct spi_device *spi, + struct spi_transfer *transfer); +extern u32 dw_spi_update_cr0_v1_01a(struct spi_controller *master, + struct spi_device *spi, + struct spi_transfer *transfer); + +#ifdef CONFIG_SPI_DW_DMA + +extern void dw_spi_dma_setup_mfld(struct dw_spi *dws); +extern void dw_spi_dma_setup_generic(struct dw_spi *dws); + +#else + +static inline void dw_spi_dma_setup_mfld(struct dw_spi *dws) {} +static inline void dw_spi_dma_setup_generic(struct dw_spi *dws) {} + +#endif /* !CONFIG_SPI_DW_DMA */ -/* platform related setup */ -extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */ #endif /* DW_SPI_HEADER_H */ diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index 4e1ccd4e52b6..8c854b187b1d 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c @@ -31,7 +31,8 @@ #include <linux/platform_data/spi-ep93xx.h> #define SSPCR0 0x0000 -#define SSPCR0_MODE_SHIFT 6 +#define SSPCR0_SPO BIT(6) +#define SSPCR0_SPH BIT(7) #define SSPCR0_SCR_SHIFT 8 #define SSPCR1 0x0004 @@ -159,7 +160,10 @@ static int ep93xx_spi_chip_setup(struct spi_master *master, return err; cr0 = div_scr << SSPCR0_SCR_SHIFT; - cr0 |= (spi->mode & (SPI_CPHA | SPI_CPOL)) << SSPCR0_MODE_SHIFT; + if (spi->mode & SPI_CPOL) + cr0 |= SSPCR0_SPO; + if (spi->mode & SPI_CPHA) + cr0 |= SSPCR0_SPH; cr0 |= dss; dev_dbg(&master->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 50e41f66a2d7..a35faced0456 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ // // Copyright 2013 Freescale Semiconductor, Inc. +// Copyright 2020 NXP // // Freescale DSPI driver // This file contains a driver for the Freescale DSPI @@ -26,6 +27,9 @@ #define SPI_MCR_CLR_TXF BIT(11) #define SPI_MCR_CLR_RXF BIT(10) #define SPI_MCR_XSPI BIT(3) +#define SPI_MCR_DIS_TXF BIT(13) +#define SPI_MCR_DIS_RXF BIT(12) +#define SPI_MCR_HALT BIT(0) #define SPI_TCR 0x08 #define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16) @@ -246,13 +250,33 @@ struct fsl_dspi { static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) { - memcpy(txdata, dspi->tx, dspi->oper_word_size); + switch (dspi->oper_word_size) { + case 1: + *txdata = *(u8 *)dspi->tx; + break; + case 2: + *txdata = *(u16 *)dspi->tx; + break; + case 4: + *txdata = *(u32 *)dspi->tx; + break; + } dspi->tx += dspi->oper_word_size; } static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) { - memcpy(dspi->rx, &rxdata, dspi->oper_word_size); + switch (dspi->oper_word_size) { + case 1: + *(u8 *)dspi->rx = rxdata; + break; + case 2: + *(u16 *)dspi->rx = rxdata; + break; + case 4: + *(u32 *)dspi->rx = rxdata; + break; + } dspi->rx += dspi->oper_word_size; } @@ -1417,6 +1441,24 @@ static int dspi_remove(struct platform_device *pdev) return 0; } +static void dspi_shutdown(struct platform_device *pdev) +{ + struct spi_controller *ctlr = platform_get_drvdata(pdev); + struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); + + /* Disable RX and TX */ + regmap_update_bits(dspi->regmap, SPI_MCR, + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF, + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF); + + /* Stop Running */ + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT); + + dspi_release_dma(dspi); + clk_disable_unprepare(dspi->clk); + spi_unregister_controller(dspi->ctlr); +} + static struct platform_driver fsl_dspi_driver = { .driver.name = DRIVER_NAME, .driver.of_match_table = fsl_dspi_dt_ids, @@ -1424,6 +1466,7 @@ static struct platform_driver fsl_dspi_driver = { .driver.pm = &dspi_pm, .probe = dspi_probe, .remove = dspi_remove, + .shutdown = dspi_shutdown, }; module_platform_driver(fsl_dspi_driver); diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 8b41b70f6f5c..1552b28b9515 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -186,14 +186,13 @@ static bool fsl_lpspi_can_dma(struct spi_controller *controller, bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word); - switch (bytes_per_word) - { - case 1: - case 2: - case 4: - break; - default: - return false; + switch (bytes_per_word) { + case 1: + case 2: + case 4: + break; + default: + return false; } return true; @@ -941,7 +940,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(fsl_lpspi->dev); if (ret < 0) { dev_err(fsl_lpspi->dev, "failed to enable clock\n"); - goto out_controller_put; + goto out_pm_get; } temp = readl(fsl_lpspi->base + IMX7ULP_PARAM); @@ -950,13 +949,15 @@ static int fsl_lpspi_probe(struct platform_device *pdev) ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller); if (ret == -EPROBE_DEFER) - goto out_controller_put; + goto out_pm_get; if (ret < 0) dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); return 0; +out_pm_get: + pm_runtime_put_noidle(fsl_lpspi->dev); out_controller_put: spi_controller_put(controller); diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 02e5cba0a5bb..6766262d7e75 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -876,14 +876,15 @@ static int fsl_qspi_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI-memory"); - q->ahb_addr = devm_ioremap_resource(dev, res); - if (IS_ERR(q->ahb_addr)) { - ret = PTR_ERR(q->ahb_addr); + q->memmap_phy = res->start; + /* Since there are 4 cs, map size required is 4 times ahb_buf_size */ + q->ahb_addr = devm_ioremap(dev, q->memmap_phy, + (q->devtype_data->ahb_buf_size * 4)); + if (!q->ahb_addr) { + ret = -ENOMEM; goto err_put_ctrl; } - q->memmap_phy = res->start; - /* find the clocks */ q->clk_en = devm_clk_get(dev, "qspi_en"); if (IS_ERR(q->clk_en)) { diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 3b81772fea0d..67f022b8c81d 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -588,7 +588,7 @@ static void fsl_spi_grlib_probe(struct device *dev) pdata->cs_control = fsl_spi_grlib_cs_control; } -static struct spi_master * fsl_spi_probe(struct device *dev, +static struct spi_master *fsl_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) { struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c index e3b57252d075..64a18d08a4d9 100644 --- a/drivers/spi/spi-hisi-sfc-v3xx.c +++ b/drivers/spi/spi-hisi-sfc-v3xx.c @@ -17,6 +17,11 @@ #define HISI_SFC_V3XX_VERSION (0x1f8) +#define HISI_SFC_V3XX_INT_STAT (0x120) +#define HISI_SFC_V3XX_INT_STAT_PP_ERR BIT(2) +#define HISI_SFC_V3XX_INT_STAT_ADDR_IACCES BIT(5) +#define HISI_SFC_V3XX_INT_CLR (0x12c) +#define HISI_SFC_V3XX_INT_CLR_CLEAR (0xff) #define HISI_SFC_V3XX_CMD_CFG (0x300) #define HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT (1 << 17) #define HISI_SFC_V3XX_CMD_CFG_DUAL_IO (2 << 17) @@ -163,7 +168,7 @@ static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host, u8 chip_select) { int ret, len = op->data.nbytes; - u32 config = 0; + u32 int_stat, config = 0; if (op->addr.nbytes) config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK; @@ -228,6 +233,25 @@ static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host, if (ret) return ret; + /* + * The interrupt status register indicates whether an error occurs + * after per operation. Check it, and clear the interrupts for + * next time judgement. + */ + int_stat = readl(host->regbase + HISI_SFC_V3XX_INT_STAT); + writel(HISI_SFC_V3XX_INT_CLR_CLEAR, + host->regbase + HISI_SFC_V3XX_INT_CLR); + + if (int_stat & HISI_SFC_V3XX_INT_STAT_ADDR_IACCES) { + dev_err(host->dev, "fail to access protected address\n"); + return -EIO; + } + + if (int_stat & HISI_SFC_V3XX_INT_STAT_PP_ERR) { + dev_err(host->dev, "page program operation failed\n"); + return -EIO; + } + if (op->data.dir == SPI_MEM_DATA_IN) hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index f4f28a400a96..b7a85e3fe1c1 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -71,6 +71,7 @@ struct spi_imx_devtype_data { void (*reset)(struct spi_imx_data *); void (*setup_wml)(struct spi_imx_data *); void (*disable)(struct spi_imx_data *); + void (*disable_dma)(struct spi_imx_data *); bool has_dmamode; bool has_slavemode; unsigned int fifo_size; @@ -485,6 +486,11 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx) writel(reg, spi_imx->base + MX51_ECSPI_CTRL); } +static void mx51_disable_dma(struct spi_imx_data *spi_imx) +{ + writel(0, spi_imx->base + MX51_ECSPI_DMA); +} + static void mx51_ecspi_disable(struct spi_imx_data *spi_imx) { u32 ctrl; @@ -987,6 +993,7 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { .rx_available = mx51_ecspi_rx_available, .reset = mx51_ecspi_reset, .setup_wml = mx51_setup_wml, + .disable_dma = mx51_disable_dma, .fifo_size = 64, .has_dmamode = true, .dynamic_burst = true, @@ -1001,6 +1008,7 @@ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = { .prepare_transfer = mx51_ecspi_prepare_transfer, .trigger = mx51_ecspi_trigger, .rx_available = mx51_ecspi_rx_available, + .disable_dma = mx51_disable_dma, .reset = mx51_ecspi_reset, .fifo_size = 64, .has_dmamode = true, @@ -1385,6 +1393,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { dmaengine_terminate_all(master->dma_tx); + dmaengine_terminate_all(master->dma_rx); return -EINVAL; } @@ -1498,6 +1507,7 @@ static int spi_imx_transfer(struct spi_device *spi, struct spi_transfer *transfer) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + int ret; /* flush rxfifo before transfer */ while (spi_imx->devtype_data->rx_available(spi_imx)) @@ -1506,10 +1516,23 @@ static int spi_imx_transfer(struct spi_device *spi, if (spi_imx->slave_mode) return spi_imx_pio_transfer_slave(spi, transfer); - if (spi_imx->usedma) - return spi_imx_dma_transfer(spi_imx, transfer); - else - return spi_imx_pio_transfer(spi, transfer); + /* + * fallback PIO mode if dma setup error happen, for example sdma + * firmware may not be updated as ERR009165 required. + */ + if (spi_imx->usedma) { + ret = spi_imx_dma_transfer(spi_imx, transfer); + if (ret != -EINVAL) + return ret; + + spi_imx->devtype_data->disable_dma(spi_imx); + + spi_imx->usedma = false; + spi_imx->dynamic_burst = spi_imx->devtype_data->dynamic_burst; + dev_dbg(&spi->dev, "Fallback to PIO mode\n"); + } + + return spi_imx_pio_transfer(spi, transfer); } static int spi_imx_setup(struct spi_device *spi) diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index adaa0c49f966..9a86cc27fcc0 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -108,15 +108,17 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) return 0; case 2: - if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || - (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) + if ((tx && + (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) || + (!tx && + (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))) return 0; break; case 4: - if ((tx && (mode & SPI_TX_QUAD)) || - (!tx && (mode & SPI_RX_QUAD))) + if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) || + (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))) return 0; break; diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c index c15a9910549f..7bc302b50396 100644 --- a/drivers/spi/spi-mtk-nor.c +++ b/drivers/spi/spi-mtk-nor.c @@ -391,7 +391,7 @@ static int mtk_nor_pp_unbuffered(struct mtk_nor *sp, return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE); } -int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master); int ret; diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c index 4f94c9127fc1..cc9ef371db14 100644 --- a/drivers/spi/spi-mux.c +++ b/drivers/spi/spi-mux.c @@ -51,6 +51,10 @@ static int spi_mux_select(struct spi_device *spi) struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller); int ret; + ret = mux_control_select(priv->mux, spi->chip_select); + if (ret) + return ret; + if (priv->current_cs == spi->chip_select) return 0; @@ -62,10 +66,6 @@ static int spi_mux_select(struct spi_device *spi) priv->spi->mode = spi->mode; priv->spi->bits_per_word = spi->bits_per_word; - ret = mux_control_select(priv->mux, spi->chip_select); - if (ret) - return ret; - priv->current_cs = spi->chip_select; return 0; diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index 1f59beb7d27e..43f73db22f21 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c @@ -17,10 +17,8 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/clk.h> #include <linux/sizes.h> -#include <linux/gpio.h> #include <asm/unaligned.h> #define DRIVER_NAME "orion_spi" @@ -98,7 +96,6 @@ struct orion_spi { struct clk *clk; struct clk *axi_clk; const struct orion_spi_dev *devdata; - int unused_hw_gpio; struct orion_child_options child[ORION_NUM_CHIPSELECTS]; }; @@ -325,20 +322,27 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) static void orion_spi_set_cs(struct spi_device *spi, bool enable) { struct orion_spi *orion_spi; - int cs; orion_spi = spi_master_get_devdata(spi->master); - if (gpio_is_valid(spi->cs_gpio)) - cs = orion_spi->unused_hw_gpio; - else - cs = spi->chip_select; - + /* + * If this line is using a GPIO to control chip select, this internal + * .set_cs() function will still be called, so we clear any previous + * chip select. The CS we activate will not have any elecrical effect, + * as it is handled by a GPIO, but that doesn't matter. What we need + * is to deassert the old chip select and assert some other chip select. + */ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK); orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, - ORION_SPI_CS(cs)); + ORION_SPI_CS(spi->chip_select)); - /* Chip select logic is inverted from spi_set_cs */ + /* + * Chip select logic is inverted from spi_set_cs(). For lines using a + * GPIO to do chip select SPI_CS_HIGH is enforced and inversion happens + * in the GPIO library, but we don't care about that, because in those + * cases we are dealing with an unused native CS anyways so the polarity + * doesn't matter. + */ if (!enable) orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); else @@ -503,9 +507,6 @@ static int orion_spi_transfer_one(struct spi_master *master, static int orion_spi_setup(struct spi_device *spi) { - if (gpio_is_valid(spi->cs_gpio)) { - gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); - } return orion_spi_setup_transfer(spi, NULL); } @@ -622,13 +623,13 @@ static int orion_spi_probe(struct platform_device *pdev) master->setup = orion_spi_setup; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); master->auto_runtime_pm = true; + master->use_gpio_descriptors = true; master->flags = SPI_MASTER_GPIO_SS; platform_set_drvdata(pdev, master); spi = spi_master_get_devdata(master); spi->master = master; - spi->unused_hw_gpio = -1; of_id = of_match_device(orion_spi_of_match_table, &pdev->dev); devdata = (of_id) ? of_id->data : &orion_spi_dev_data; @@ -683,7 +684,6 @@ static int orion_spi_probe(struct platform_device *pdev) for_each_available_child_of_node(pdev->dev.of_node, np) { struct orion_direct_acc *dir_acc; u32 cs; - int cs_gpio; /* Get chip-select number from the "reg" property */ status = of_property_read_u32(np, "reg", &cs); @@ -695,44 +695,6 @@ static int orion_spi_probe(struct platform_device *pdev) } /* - * Initialize the CS GPIO: - * - properly request the actual GPIO signal - * - de-assert the logical signal so that all GPIO CS lines - * are inactive when probing for slaves - * - find an unused physical CS which will be driven for any - * slave which uses a CS GPIO - */ - cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", cs); - if (cs_gpio > 0) { - char *gpio_name; - int cs_flags; - - if (spi->unused_hw_gpio == -1) { - dev_info(&pdev->dev, - "Selected unused HW CS#%d for any GPIO CSes\n", - cs); - spi->unused_hw_gpio = cs; - } - - gpio_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, - "%s-CS%d", dev_name(&pdev->dev), cs); - if (!gpio_name) { - status = -ENOMEM; - goto out_rel_axi_clk; - } - - cs_flags = of_property_read_bool(np, "spi-cs-high") ? - GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH; - status = devm_gpio_request_one(&pdev->dev, cs_gpio, - cs_flags, gpio_name); - if (status) { - dev_err(&pdev->dev, - "Can't request GPIO for CS %d\n", cs); - goto out_rel_axi_clk; - } - } - - /* * Check if an address is configured for this SPI device. If * not, the MBus mapping via the 'ranges' property in the 'soc' * node is not configured and this device should not use the diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 73d2a65d0b6e..6721910e5f2a 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -150,6 +150,7 @@ static const struct lpss_config lpss_platforms[] = { .tx_threshold_hi = 48, .cs_sel_shift = 8, .cs_sel_mask = 3 << 8, + .cs_clk_stays_gated = true, }, { /* LPSS_CNL_SSP */ .offset = 0x200, @@ -1884,7 +1885,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); - status = devm_spi_register_controller(&pdev->dev, controller); + status = spi_register_controller(controller); if (status != 0) { dev_err(&pdev->dev, "problem registering spi controller\n"); goto out_error_pm_runtime_enabled; @@ -1893,7 +1894,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) return status; out_error_pm_runtime_enabled: - pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); out_error_clock_enabled: @@ -1916,6 +1916,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) pm_runtime_get_sync(&pdev->dev); + spi_unregister_controller(drv_data->controller); + /* Disable the SSP at the peripheral and SOC level */ pxa2xx_spi_write(drv_data, SSCR0, 0); clk_disable_unprepare(ssp->clk); diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c index 4c9620e0d18c..8aa51beb4ff3 100644 --- a/drivers/spi/spi-rb4xx.c +++ b/drivers/spi/spi-rb4xx.c @@ -14,6 +14,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/spi/spi.h> +#include <linux/of.h> #include <asm/mach-ath79/ar71xx_regs.h> @@ -150,6 +151,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev) if (IS_ERR(ahb_clk)) return PTR_ERR(ahb_clk); + master->dev.of_node = pdev->dev.of_node; master->bus_num = 0; master->num_chipselect = 3; master->mode_bits = SPI_TX_DUAL; @@ -158,6 +160,11 @@ static int rb4xx_spi_probe(struct platform_device *pdev) master->transfer_one = rb4xx_transfer_one; master->set_cs = rb4xx_set_cs; + rbspi = spi_master_get_devdata(master); + rbspi->base = spi_base; + rbspi->clk = ahb_clk; + platform_set_drvdata(pdev, rbspi); + err = devm_spi_register_master(&pdev->dev, master); if (err) { dev_err(&pdev->dev, "failed to register SPI master\n"); @@ -168,11 +175,6 @@ static int rb4xx_spi_probe(struct platform_device *pdev) if (err) return err; - rbspi = spi_master_get_devdata(master); - rbspi->base = spi_base; - rbspi->clk = ahb_clk; - platform_set_drvdata(pdev, rbspi); - /* Enable SPI */ rb4xx_write(rbspi, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO); @@ -188,11 +190,18 @@ static int rb4xx_spi_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id rb4xx_spi_dt_match[] = { + { .compatible = "mikrotik,rb4xx-spi" }, + { }, +}; +MODULE_DEVICE_TABLE(of, rb4xx_spi_dt_match); + static struct platform_driver rb4xx_spi_drv = { .probe = rb4xx_spi_probe, .remove = rb4xx_spi_remove, .driver = { .name = "rb4xx-spi", + .of_match_table = of_match_ptr(rb4xx_spi_dt_match), }, }; diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 70ef63e0b6b8..9b8a5e1233c0 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -183,6 +183,8 @@ struct rockchip_spi { u8 rsd; bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM]; + + bool slave_abort; }; static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable) @@ -219,8 +221,8 @@ static u32 get_fifo_len(struct rockchip_spi *rs) static void rockchip_spi_set_cs(struct spi_device *spi, bool enable) { - struct spi_master *master = spi->master; - struct rockchip_spi *rs = spi_master_get_devdata(master); + struct spi_controller *ctlr = spi->controller; + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); bool cs_asserted = !enable; /* Return immediately for no-op */ @@ -244,10 +246,10 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable) rs->cs_asserted[spi->chip_select] = cs_asserted; } -static void rockchip_spi_handle_err(struct spi_master *master, +static void rockchip_spi_handle_err(struct spi_controller *ctlr, struct spi_message *msg) { - struct rockchip_spi *rs = spi_master_get_devdata(master); + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); /* stop running spi transfer * this also flushes both rx and tx fifos @@ -258,10 +260,10 @@ static void rockchip_spi_handle_err(struct spi_master *master, writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR); if (atomic_read(&rs->state) & TXDMA) - dmaengine_terminate_async(master->dma_tx); + dmaengine_terminate_async(ctlr->dma_tx); if (atomic_read(&rs->state) & RXDMA) - dmaengine_terminate_async(master->dma_rx); + dmaengine_terminate_async(ctlr->dma_rx); } static void rockchip_spi_pio_writer(struct rockchip_spi *rs) @@ -319,8 +321,8 @@ static void rockchip_spi_pio_reader(struct rockchip_spi *rs) static irqreturn_t rockchip_spi_isr(int irq, void *dev_id) { - struct spi_master *master = dev_id; - struct rockchip_spi *rs = spi_master_get_devdata(master); + struct spi_controller *ctlr = dev_id; + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); if (rs->tx_left) rockchip_spi_pio_writer(rs); @@ -329,7 +331,7 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id) if (!rs->rx_left) { spi_enable_chip(rs, false); writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR); - spi_finalize_current_transfer(master); + spi_finalize_current_transfer(ctlr); } return IRQ_HANDLED; @@ -355,35 +357,35 @@ static int rockchip_spi_prepare_irq(struct rockchip_spi *rs, static void rockchip_spi_dma_rxcb(void *data) { - struct spi_master *master = data; - struct rockchip_spi *rs = spi_master_get_devdata(master); + struct spi_controller *ctlr = data; + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); int state = atomic_fetch_andnot(RXDMA, &rs->state); - if (state & TXDMA) + if (state & TXDMA && !rs->slave_abort) return; spi_enable_chip(rs, false); - spi_finalize_current_transfer(master); + spi_finalize_current_transfer(ctlr); } static void rockchip_spi_dma_txcb(void *data) { - struct spi_master *master = data; - struct rockchip_spi *rs = spi_master_get_devdata(master); + struct spi_controller *ctlr = data; + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); int state = atomic_fetch_andnot(TXDMA, &rs->state); - if (state & RXDMA) + if (state & RXDMA && !rs->slave_abort) return; /* Wait until the FIFO data completely. */ wait_for_idle(rs); spi_enable_chip(rs, false); - spi_finalize_current_transfer(master); + spi_finalize_current_transfer(ctlr); } static int rockchip_spi_prepare_dma(struct rockchip_spi *rs, - struct spi_master *master, struct spi_transfer *xfer) + struct spi_controller *ctlr, struct spi_transfer *xfer) { struct dma_async_tx_descriptor *rxdesc, *txdesc; @@ -398,17 +400,17 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs, .src_maxburst = 1, }; - dmaengine_slave_config(master->dma_rx, &rxconf); + dmaengine_slave_config(ctlr->dma_rx, &rxconf); rxdesc = dmaengine_prep_slave_sg( - master->dma_rx, + ctlr->dma_rx, xfer->rx_sg.sgl, xfer->rx_sg.nents, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!rxdesc) return -EINVAL; rxdesc->callback = rockchip_spi_dma_rxcb; - rxdesc->callback_param = master; + rxdesc->callback_param = ctlr; } txdesc = NULL; @@ -420,27 +422,27 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs, .dst_maxburst = rs->fifo_len / 4, }; - dmaengine_slave_config(master->dma_tx, &txconf); + dmaengine_slave_config(ctlr->dma_tx, &txconf); txdesc = dmaengine_prep_slave_sg( - master->dma_tx, + ctlr->dma_tx, xfer->tx_sg.sgl, xfer->tx_sg.nents, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!txdesc) { if (rxdesc) - dmaengine_terminate_sync(master->dma_rx); + dmaengine_terminate_sync(ctlr->dma_rx); return -EINVAL; } txdesc->callback = rockchip_spi_dma_txcb; - txdesc->callback_param = master; + txdesc->callback_param = ctlr; } /* rx must be started before tx due to spi instinct */ if (rxdesc) { atomic_or(RXDMA, &rs->state); dmaengine_submit(rxdesc); - dma_async_issue_pending(master->dma_rx); + dma_async_issue_pending(ctlr->dma_rx); } spi_enable_chip(rs, true); @@ -448,7 +450,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs, if (txdesc) { atomic_or(TXDMA, &rs->state); dmaengine_submit(txdesc); - dma_async_issue_pending(master->dma_tx); + dma_async_issue_pending(ctlr->dma_tx); } /* 1 means the transfer is in progress */ @@ -457,7 +459,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs, static void rockchip_spi_config(struct rockchip_spi *rs, struct spi_device *spi, struct spi_transfer *xfer, - bool use_dma) + bool use_dma, bool slave_mode) { u32 cr0 = CR0_FRF_SPI << CR0_FRF_OFFSET | CR0_BHT_8BIT << CR0_BHT_OFFSET @@ -466,6 +468,10 @@ static void rockchip_spi_config(struct rockchip_spi *rs, u32 cr1; u32 dmacr = 0; + if (slave_mode) + cr0 |= CR0_OPM_SLAVE << CR0_OPM_OFFSET; + rs->slave_abort = false; + cr0 |= rs->rsd << CR0_RSD_OFFSET; cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET; if (spi->mode & SPI_LSB_FIRST) @@ -493,7 +499,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs, break; default: /* we only whitelist 4, 8 and 16 bit words in - * master->bits_per_word_mask, so this shouldn't + * ctlr->bits_per_word_mask, so this shouldn't * happen */ unreachable(); @@ -535,12 +541,22 @@ static size_t rockchip_spi_max_transfer_size(struct spi_device *spi) return ROCKCHIP_SPI_MAX_TRANLEN; } +static int rockchip_spi_slave_abort(struct spi_controller *ctlr) +{ + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + + rs->slave_abort = true; + complete(&ctlr->xfer_completion); + + return 0; +} + static int rockchip_spi_transfer_one( - struct spi_master *master, + struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - struct rockchip_spi *rs = spi_master_get_devdata(master); + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); bool use_dma; WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) && @@ -558,21 +574,21 @@ static int rockchip_spi_transfer_one( rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2; - use_dma = master->can_dma ? master->can_dma(master, spi, xfer) : false; + use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false; - rockchip_spi_config(rs, spi, xfer, use_dma); + rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave); if (use_dma) - return rockchip_spi_prepare_dma(rs, master, xfer); + return rockchip_spi_prepare_dma(rs, ctlr, xfer); return rockchip_spi_prepare_irq(rs, xfer); } -static bool rockchip_spi_can_dma(struct spi_master *master, +static bool rockchip_spi_can_dma(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - struct rockchip_spi *rs = spi_master_get_devdata(master); + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); unsigned int bytes_per_word = xfer->bits_per_word <= 8 ? 1 : 2; /* if the numbor of spi words to transfer is less than the fifo @@ -586,44 +602,55 @@ static int rockchip_spi_probe(struct platform_device *pdev) { int ret; struct rockchip_spi *rs; - struct spi_master *master; + struct spi_controller *ctlr; struct resource *mem; + struct device_node *np = pdev->dev.of_node; u32 rsd_nsecs; + bool slave_mode; + + slave_mode = of_property_read_bool(np, "spi-slave"); + + if (slave_mode) + ctlr = spi_alloc_slave(&pdev->dev, + sizeof(struct rockchip_spi)); + else + ctlr = spi_alloc_master(&pdev->dev, + sizeof(struct rockchip_spi)); - master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi)); - if (!master) + if (!ctlr) return -ENOMEM; - platform_set_drvdata(pdev, master); + platform_set_drvdata(pdev, ctlr); - rs = spi_master_get_devdata(master); + rs = spi_controller_get_devdata(ctlr); + ctlr->slave = slave_mode; /* Get basic io resource and map it */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); rs->regs = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(rs->regs)) { ret = PTR_ERR(rs->regs); - goto err_put_master; + goto err_put_ctlr; } rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(rs->apb_pclk)) { dev_err(&pdev->dev, "Failed to get apb_pclk\n"); ret = PTR_ERR(rs->apb_pclk); - goto err_put_master; + goto err_put_ctlr; } rs->spiclk = devm_clk_get(&pdev->dev, "spiclk"); if (IS_ERR(rs->spiclk)) { dev_err(&pdev->dev, "Failed to get spi_pclk\n"); ret = PTR_ERR(rs->spiclk); - goto err_put_master; + goto err_put_ctlr; } ret = clk_prepare_enable(rs->apb_pclk); if (ret < 0) { dev_err(&pdev->dev, "Failed to enable apb_pclk\n"); - goto err_put_master; + goto err_put_ctlr; } ret = clk_prepare_enable(rs->spiclk); @@ -639,7 +666,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) goto err_disable_spiclk; ret = devm_request_threaded_irq(&pdev->dev, ret, rockchip_spi_isr, NULL, - IRQF_ONESHOT, dev_name(&pdev->dev), master); + IRQF_ONESHOT, dev_name(&pdev->dev), ctlr); if (ret) goto err_disable_spiclk; @@ -673,78 +700,90 @@ static int rockchip_spi_probe(struct platform_device *pdev) pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - master->auto_runtime_pm = true; - master->bus_num = pdev->id; - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST; - master->num_chipselect = ROCKCHIP_SPI_MAX_CS_NUM; - master->dev.of_node = pdev->dev.of_node; - master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4); - master->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX; - master->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT); - - master->set_cs = rockchip_spi_set_cs; - master->transfer_one = rockchip_spi_transfer_one; - master->max_transfer_size = rockchip_spi_max_transfer_size; - master->handle_err = rockchip_spi_handle_err; - master->flags = SPI_MASTER_GPIO_SS; - - master->dma_tx = dma_request_chan(rs->dev, "tx"); - if (IS_ERR(master->dma_tx)) { + ctlr->auto_runtime_pm = true; + ctlr->bus_num = pdev->id; + ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST; + if (slave_mode) { + ctlr->mode_bits |= SPI_NO_CS; + ctlr->slave_abort = rockchip_spi_slave_abort; + } else { + ctlr->flags = SPI_MASTER_GPIO_SS; + ctlr->max_native_cs = ROCKCHIP_SPI_MAX_CS_NUM; + /* + * rk spi0 has two native cs, spi1..5 one cs only + * if num-cs is missing in the dts, default to 1 + */ + if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect)) + ctlr->num_chipselect = 1; + ctlr->use_gpio_descriptors = true; + } + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4); + ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX; + ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT); + + ctlr->set_cs = rockchip_spi_set_cs; + ctlr->transfer_one = rockchip_spi_transfer_one; + ctlr->max_transfer_size = rockchip_spi_max_transfer_size; + ctlr->handle_err = rockchip_spi_handle_err; + + ctlr->dma_tx = dma_request_chan(rs->dev, "tx"); + if (IS_ERR(ctlr->dma_tx)) { /* Check tx to see if we need defer probing driver */ - if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) { + if (PTR_ERR(ctlr->dma_tx) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto err_disable_pm_runtime; } dev_warn(rs->dev, "Failed to request TX DMA channel\n"); - master->dma_tx = NULL; + ctlr->dma_tx = NULL; } - master->dma_rx = dma_request_chan(rs->dev, "rx"); - if (IS_ERR(master->dma_rx)) { - if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) { + ctlr->dma_rx = dma_request_chan(rs->dev, "rx"); + if (IS_ERR(ctlr->dma_rx)) { + if (PTR_ERR(ctlr->dma_rx) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto err_free_dma_tx; } dev_warn(rs->dev, "Failed to request RX DMA channel\n"); - master->dma_rx = NULL; + ctlr->dma_rx = NULL; } - if (master->dma_tx && master->dma_rx) { + if (ctlr->dma_tx && ctlr->dma_rx) { rs->dma_addr_tx = mem->start + ROCKCHIP_SPI_TXDR; rs->dma_addr_rx = mem->start + ROCKCHIP_SPI_RXDR; - master->can_dma = rockchip_spi_can_dma; + ctlr->can_dma = rockchip_spi_can_dma; } - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret < 0) { - dev_err(&pdev->dev, "Failed to register master\n"); + dev_err(&pdev->dev, "Failed to register controller\n"); goto err_free_dma_rx; } return 0; err_free_dma_rx: - if (master->dma_rx) - dma_release_channel(master->dma_rx); + if (ctlr->dma_rx) + dma_release_channel(ctlr->dma_rx); err_free_dma_tx: - if (master->dma_tx) - dma_release_channel(master->dma_tx); + if (ctlr->dma_tx) + dma_release_channel(ctlr->dma_tx); err_disable_pm_runtime: pm_runtime_disable(&pdev->dev); err_disable_spiclk: clk_disable_unprepare(rs->spiclk); err_disable_apbclk: clk_disable_unprepare(rs->apb_pclk); -err_put_master: - spi_master_put(master); +err_put_ctlr: + spi_controller_put(ctlr); return ret; } static int rockchip_spi_remove(struct platform_device *pdev) { - struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); - struct rockchip_spi *rs = spi_master_get_devdata(master); + struct spi_controller *ctlr = spi_controller_get(platform_get_drvdata(pdev)); + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); pm_runtime_get_sync(&pdev->dev); @@ -755,12 +794,12 @@ static int rockchip_spi_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); - if (master->dma_tx) - dma_release_channel(master->dma_tx); - if (master->dma_rx) - dma_release_channel(master->dma_rx); + if (ctlr->dma_tx) + dma_release_channel(ctlr->dma_tx); + if (ctlr->dma_rx) + dma_release_channel(ctlr->dma_rx); - spi_master_put(master); + spi_controller_put(ctlr); return 0; } @@ -769,9 +808,9 @@ static int rockchip_spi_remove(struct platform_device *pdev) static int rockchip_spi_suspend(struct device *dev) { int ret; - struct spi_master *master = dev_get_drvdata(dev); + struct spi_controller *ctlr = dev_get_drvdata(dev); - ret = spi_master_suspend(master); + ret = spi_controller_suspend(ctlr); if (ret < 0) return ret; @@ -787,8 +826,8 @@ static int rockchip_spi_suspend(struct device *dev) static int rockchip_spi_resume(struct device *dev) { int ret; - struct spi_master *master = dev_get_drvdata(dev); - struct rockchip_spi *rs = spi_master_get_devdata(master); + struct spi_controller *ctlr = dev_get_drvdata(dev); + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); pinctrl_pm_select_default_state(dev); @@ -796,7 +835,7 @@ static int rockchip_spi_resume(struct device *dev) if (ret < 0) return ret; - ret = spi_master_resume(master); + ret = spi_controller_resume(ctlr); if (ret < 0) { clk_disable_unprepare(rs->spiclk); clk_disable_unprepare(rs->apb_pclk); @@ -809,8 +848,8 @@ static int rockchip_spi_resume(struct device *dev) #ifdef CONFIG_PM static int rockchip_spi_runtime_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct rockchip_spi *rs = spi_master_get_devdata(master); + struct spi_controller *ctlr = dev_get_drvdata(dev); + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); clk_disable_unprepare(rs->spiclk); clk_disable_unprepare(rs->apb_pclk); @@ -821,8 +860,8 @@ static int rockchip_spi_runtime_suspend(struct device *dev) static int rockchip_spi_runtime_resume(struct device *dev) { int ret; - struct spi_master *master = dev_get_drvdata(dev); - struct rockchip_spi *rs = spi_master_get_devdata(master); + struct spi_controller *ctlr = dev_get_drvdata(dev); + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); ret = clk_prepare_enable(rs->apb_pclk); if (ret < 0) diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c index 5497eeb3bf3e..ee0f3edf49cd 100644 --- a/drivers/spi/spi-sc18is602.c +++ b/drivers/spi/spi-sc18is602.c @@ -345,6 +345,6 @@ static struct i2c_driver sc18is602_driver = { module_i2c_driver(sc18is602_driver); -MODULE_DESCRIPTION("SC18IC602/603 SPI Master Driver"); +MODULE_DESCRIPTION("SC18IS602/603 SPI Master Driver"); MODULE_AUTHOR("Guenter Roeck"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 1c11a00a2c36..b2579af0e3eb 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -1398,7 +1398,7 @@ static int sh_msiof_spi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, sh_msiof_spi_resume); -#define DEV_PM_OPS &sh_msiof_spi_pm_ops +#define DEV_PM_OPS (&sh_msiof_spi_pm_ops) #else #define DEV_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index 87dadb6b8ebf..88e6543648cb 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -319,7 +319,7 @@ static int sprd_adi_transfer_one(struct spi_controller *ctlr, static void sprd_adi_set_wdt_rst_mode(struct sprd_adi *sadi) { -#ifdef CONFIG_SPRD_WATCHDOG +#if IS_ENABLED(CONFIG_SPRD_WATCHDOG) u32 val; /* Set default watchdog reboot mode */ diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index d066f5144c3e..3c44bb2fd9b1 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -16,6 +16,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> +#include <linux/pm_runtime.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/sizes.h> @@ -87,6 +88,7 @@ #define STM32_BUSY_TIMEOUT_US 100000 #define STM32_ABT_TIMEOUT_US 100000 #define STM32_COMP_TIMEOUT_MS 1000 +#define STM32_AUTOSUSPEND_DELAY -1 struct stm32_qspi_flash { struct stm32_qspi *qspi; @@ -431,10 +433,17 @@ static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master); int ret; + ret = pm_runtime_get_sync(qspi->dev); + if (ret < 0) + return ret; + mutex_lock(&qspi->lock); ret = stm32_qspi_send(mem, op); mutex_unlock(&qspi->lock); + pm_runtime_mark_last_busy(qspi->dev); + pm_runtime_put_autosuspend(qspi->dev); + return ret; } @@ -444,6 +453,7 @@ static int stm32_qspi_setup(struct spi_device *spi) struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl); struct stm32_qspi_flash *flash; u32 presc; + int ret; if (ctrl->busy) return -EBUSY; @@ -451,6 +461,10 @@ static int stm32_qspi_setup(struct spi_device *spi) if (!spi->max_speed_hz) return -EINVAL; + ret = pm_runtime_get_sync(qspi->dev); + if (ret < 0) + return ret; + presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1; flash = &qspi->flash[spi->chip_select]; @@ -467,6 +481,9 @@ static int stm32_qspi_setup(struct spi_device *spi) writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR); mutex_unlock(&qspi->lock); + pm_runtime_mark_last_busy(qspi->dev); + pm_runtime_put_autosuspend(qspi->dev); + return 0; } @@ -538,10 +555,15 @@ static const struct spi_controller_mem_ops stm32_qspi_mem_ops = { static void stm32_qspi_release(struct stm32_qspi *qspi) { + pm_runtime_get_sync(qspi->dev); /* disable qspi */ writel_relaxed(0, qspi->io_base + QSPI_CR); stm32_qspi_dma_free(qspi); mutex_destroy(&qspi->lock); + pm_runtime_put_noidle(qspi->dev); + pm_runtime_disable(qspi->dev); + pm_runtime_set_suspended(qspi->dev); + pm_runtime_dont_use_autosuspend(qspi->dev); clk_disable_unprepare(qspi->clk); } @@ -643,9 +665,20 @@ static int stm32_qspi_probe(struct platform_device *pdev) ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP; ctrl->dev.of_node = dev->of_node; + pm_runtime_set_autosuspend_delay(dev, STM32_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_get_noresume(dev); + ret = devm_spi_register_master(dev, ctrl); - if (!ret) - return 0; + if (ret) + goto err_qspi_release; + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; err_qspi_release: stm32_qspi_release(qspi); @@ -660,14 +693,28 @@ static int stm32_qspi_remove(struct platform_device *pdev) struct stm32_qspi *qspi = platform_get_drvdata(pdev); stm32_qspi_release(qspi); + return 0; } -static int __maybe_unused stm32_qspi_suspend(struct device *dev) +static int __maybe_unused stm32_qspi_runtime_suspend(struct device *dev) { struct stm32_qspi *qspi = dev_get_drvdata(dev); clk_disable_unprepare(qspi->clk); + + return 0; +} + +static int __maybe_unused stm32_qspi_runtime_resume(struct device *dev) +{ + struct stm32_qspi *qspi = dev_get_drvdata(dev); + + return clk_prepare_enable(qspi->clk); +} + +static int __maybe_unused stm32_qspi_suspend(struct device *dev) +{ pinctrl_pm_select_sleep_state(dev); return 0; @@ -683,10 +730,17 @@ static int __maybe_unused stm32_qspi_resume(struct device *dev) writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR); writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR); + pm_runtime_mark_last_busy(qspi->dev); + pm_runtime_put_autosuspend(qspi->dev); + return 0; } -static SIMPLE_DEV_PM_OPS(stm32_qspi_pm_ops, stm32_qspi_suspend, stm32_qspi_resume); +static const struct dev_pm_ops stm32_qspi_pm_ops = { + SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend, + stm32_qspi_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend, stm32_qspi_resume) +}; static const struct of_device_id stm32_qspi_match[] = { {.compatible = "st,stm32f469-qspi"}, diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 44ac6eb3298d..4c643dfc7fbb 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -811,7 +811,9 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id) mask |= STM32F4_SPI_SR_TXE; } - if (!spi->cur_usedma && spi->cur_comm == SPI_FULL_DUPLEX) { + if (!spi->cur_usedma && (spi->cur_comm == SPI_FULL_DUPLEX || + spi->cur_comm == SPI_SIMPLEX_RX || + spi->cur_comm == SPI_3WIRE_RX)) { /* TXE flag is set and is handled when RXNE flag occurs */ sr &= ~STM32F4_SPI_SR_TXE; mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR; @@ -850,7 +852,7 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id) stm32f4_spi_read_rx(spi); if (spi->rx_len == 0) end = true; - else /* Load data for discontinuous mode */ + else if (spi->tx_buf)/* Load data for discontinuous mode */ stm32f4_spi_write_tx(spi); } @@ -1151,7 +1153,9 @@ static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi) /* Enable the interrupts relative to the current communication mode */ if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) { cr2 |= STM32F4_SPI_CR2_TXEIE; - } else if (spi->cur_comm == SPI_FULL_DUPLEX) { + } else if (spi->cur_comm == SPI_FULL_DUPLEX || + spi->cur_comm == SPI_SIMPLEX_RX || + spi->cur_comm == SPI_3WIRE_RX) { /* In transmit-only mode, the OVR flag is set in the SR register * since the received data are never read. Therefore set OVR * interrupt only when rx buffer is available. @@ -1462,10 +1466,16 @@ static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type) stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_BIDIMODE | STM32F4_SPI_CR1_BIDIOE); - } else if (comm_type == SPI_FULL_DUPLEX) { + } else if (comm_type == SPI_FULL_DUPLEX || + comm_type == SPI_SIMPLEX_RX) { stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_BIDIMODE | STM32F4_SPI_CR1_BIDIOE); + } else if (comm_type == SPI_3WIRE_RX) { + stm32_spi_set_bits(spi, STM32F4_SPI_CR1, + STM32F4_SPI_CR1_BIDIMODE); + stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, + STM32F4_SPI_CR1_BIDIOE); } else { return -EINVAL; } @@ -1906,6 +1916,7 @@ static int stm32_spi_probe(struct platform_device *pdev) master->prepare_message = stm32_spi_prepare_msg; master->transfer_one = stm32_spi_transfer_one; master->unprepare_message = stm32_spi_unprepare_msg; + master->flags = SPI_MASTER_MUST_TX; spi->dma_tx = dma_request_chan(spi->dev, "tx"); if (IS_ERR(spi->dma_tx)) { diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index ec7967be9e2f..ecea15534c42 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -470,6 +470,7 @@ static int sun6i_spi_probe(struct platform_device *pdev) master->max_speed_hz = 100 * 1000 * 1000; master->min_speed_hz = 3 * 1000; + master->use_gpio_descriptors = true; master->set_cs = sun6i_spi_set_cs; master->transfer_one = sun6i_spi_transfer_one; master->num_chipselect = 4; diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index 83edabdb41ad..c2c58871a947 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -1398,6 +1398,7 @@ static int tegra_spi_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret); + pm_runtime_put_noidle(&pdev->dev); goto exit_pm_disable; } diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c index 514429379206..02cf5f463ba6 100644 --- a/drivers/spi/spi-tegra20-sflash.c +++ b/drivers/spi/spi-tegra20-sflash.c @@ -491,6 +491,7 @@ static int tegra_sflash_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret); + pm_runtime_put_noidle(&pdev->dev); goto exit_pm_disable; } diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 7f4d932dade7..a07b72e9c344 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1118,6 +1118,7 @@ static int tegra_slink_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret); + pm_runtime_put_noidle(&pdev->dev); goto exit_pm_disable; } tspi->def_command_reg = SLINK_M_S; diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index 0fa50979644d..6a9ef8ee3cc9 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -659,8 +659,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) priv->master = master; priv->is_save_param = false; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, res); + priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto out_master_put; @@ -716,8 +715,10 @@ static int uniphier_spi_probe(struct platform_device *pdev) master->dma_tx = dma_request_chan(&pdev->dev, "tx"); if (IS_ERR_OR_NULL(master->dma_tx)) { - if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) + if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; goto out_disable_clk; + } master->dma_tx = NULL; dma_tx_burst = INT_MAX; } else { @@ -732,8 +733,10 @@ static int uniphier_spi_probe(struct platform_device *pdev) master->dma_rx = dma_request_chan(&pdev->dev, "rx"); if (IS_ERR_OR_NULL(master->dma_rx)) { - if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) + if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; goto out_disable_clk; + } master->dma_rx = NULL; dma_rx_burst = INT_MAX; } else { diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index c92c89467e7e..8158e281f354 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1023,7 +1023,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) void *tmp; unsigned int max_tx, max_rx; - if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) { + if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) + && !(msg->spi->mode & SPI_3WIRE)) { max_tx = 0; max_rx = 0; @@ -1075,7 +1076,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr, { struct spi_statistics *statm = &ctlr->statistics; struct spi_statistics *stats = &msg->spi->statistics; - unsigned long long ms = 1; + unsigned long long ms; if (spi_controller_is_slave(ctlr)) { if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { @@ -1160,6 +1161,8 @@ int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) { int delay; + might_sleep(); + if (!_delay) return -EINVAL; @@ -2111,6 +2114,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) } lookup->max_speed_hz = sb->connection_speed; + lookup->bits_per_word = sb->data_bit_length; if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) lookup->mode |= SPI_CPHA; @@ -2760,6 +2764,8 @@ void spi_unregister_controller(struct spi_controller *ctlr) struct spi_controller *found; int id = ctlr->bus_num; + device_for_each_child(&ctlr->dev, NULL, __unregister); + /* First make sure that this controller was ever added */ mutex_lock(&board_lock); found = idr_find(&spi_master_idr, id); @@ -2772,7 +2778,6 @@ void spi_unregister_controller(struct spi_controller *ctlr) list_del(&ctlr->list); mutex_unlock(&board_lock); - device_for_each_child(&ctlr->dev, NULL, __unregister); device_unregister(&ctlr->dev); /* free bus id */ mutex_lock(&board_lock); @@ -3853,8 +3858,7 @@ static u8 *buf; * is zero for success, else a negative errno status code. * This call may only be used from a context that may sleep. * - * Parameters to this routine are always copied using a small buffer; - * portable code should never use this for more than 32 bytes. + * Parameters to this routine are always copied using a small buffer. * Performance-sensitive or bulk transfer code should instead use * spi_{async,sync}() calls with dma-safe buffers. * diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 80dd1025b953..d753df700e9e 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -62,7 +62,8 @@ static DECLARE_BITMAP(minors, N_SPI_MINORS); #define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \ | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \ | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \ - | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD) + | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \ + | SPI_RX_QUAD | SPI_RX_OCTAL) struct spidev_data { dev_t devt; |