diff options
author | Johannes Berg | 2019-02-22 13:48:13 +0100 |
---|---|---|
committer | Johannes Berg | 2019-02-22 13:48:13 +0100 |
commit | b7b14ec1ebef35d22f3f4087816468f22c987f75 (patch) | |
tree | 3f99f4d7b770d7bba3ee84663b32f98dfbe7582d /drivers | |
parent | 77ff2c6b49843b01adef1f80abb091753e4c9c65 (diff) | |
parent | 7a25c6c0aac85bbc50d3ce49cd08888adb14508b (diff) |
Merge remote-tracking branch 'net-next/master' into mac80211-next
Merge net-next to resolve a conflict and to get the mac80211
rhashtable fixes so further patches can be applied on top.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers')
737 files changed, 18663 insertions, 12133 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 5c093ce01bcd..147f6c7ea59c 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1029,6 +1029,9 @@ void __init acpi_early_init(void) acpi_permanent_mmap = true; + /* Initialize debug output. Linux does not use ACPICA defaults */ + acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR; + #ifdef CONFIG_X86 /* * If the machine falls into the DMI check table, diff --git a/drivers/android/binder.c b/drivers/android/binder.c index cdfc87629efb..4d2b2ad1ee0e 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -5854,9 +5854,10 @@ static int __init init_binder_device(const char *name) static int __init binder_init(void) { int ret; - char *device_name, *device_names, *device_tmp; + char *device_name, *device_tmp; struct binder_device *device; struct hlist_node *tmp; + char *device_names = NULL; ret = binder_alloc_shrinker_init(); if (ret) @@ -5898,23 +5899,29 @@ static int __init binder_init(void) &transaction_log_fops); } - /* - * Copy the module_parameter string, because we don't want to - * tokenize it in-place. - */ - device_names = kstrdup(binder_devices_param, GFP_KERNEL); - if (!device_names) { - ret = -ENOMEM; - goto err_alloc_device_names_failed; - } + if (strcmp(binder_devices_param, "") != 0) { + /* + * Copy the module_parameter string, because we don't want to + * tokenize it in-place. + */ + device_names = kstrdup(binder_devices_param, GFP_KERNEL); + if (!device_names) { + ret = -ENOMEM; + goto err_alloc_device_names_failed; + } - device_tmp = device_names; - while ((device_name = strsep(&device_tmp, ","))) { - ret = init_binder_device(device_name); - if (ret) - goto err_init_binder_device_failed; + device_tmp = device_names; + while ((device_name = strsep(&device_tmp, ","))) { + ret = init_binder_device(device_name); + if (ret) + goto err_init_binder_device_failed; + } } + ret = init_binderfs(); + if (ret) + goto err_init_binder_device_failed; + return ret; err_init_binder_device_failed: diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 7fb97f503ef2..045b3e42d98b 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -46,4 +46,13 @@ static inline bool is_binderfs_device(const struct inode *inode) } #endif +#ifdef CONFIG_ANDROID_BINDERFS +extern int __init init_binderfs(void); +#else +static inline int __init init_binderfs(void) +{ + return 0; +} +#endif + #endif /* _LINUX_BINDER_INTERNAL_H */ diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 6a2185eb66c5..e773f45d19d9 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -395,6 +395,11 @@ static int binderfs_binder_ctl_create(struct super_block *sb) struct inode *inode = NULL; struct dentry *root = sb->s_root; struct binderfs_info *info = sb->s_fs_info; +#if defined(CONFIG_IPC_NS) + bool use_reserve = (info->ipc_ns == &init_ipc_ns); +#else + bool use_reserve = true; +#endif device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) @@ -413,7 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb) /* Reserve a new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); - minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); + minor = ida_alloc_max(&binderfs_minors, + use_reserve ? BINDERFS_MAX_MINOR : + BINDERFS_MAX_MINOR_CAPPED, + GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); if (minor < 0) { ret = minor; @@ -542,7 +550,7 @@ static struct file_system_type binder_fs_type = { .fs_flags = FS_USERNS_MOUNT, }; -static int __init init_binderfs(void) +int __init init_binderfs(void) { int ret; @@ -560,5 +568,3 @@ static int __init init_binderfs(void) return ret; } - -device_initcall(init_binderfs); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b8c3f9e6af89..adf28788cab5 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, + { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, }, /* devices that don't properly handle queued TRIM commands */ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index a43276c76fc6..21393ec3b9a4 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client) struct ht16k33_priv *priv = i2c_get_clientdata(client); struct ht16k33_fbdev *fbdev = &priv->fbdev; - cancel_delayed_work(&fbdev->work); + cancel_delayed_work_sync(&fbdev->work); unregister_framebuffer(fbdev->info); framebuffer_release(fbdev->info); free_page((unsigned long) fbdev->buffer); diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cf78fa6d470d..a7359535caf5 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].size_prop; - if (of_property_read_u32(np, propname, &this_leaf->size)) - this_leaf->size = 0; + of_property_read_u32(np, propname, &this_leaf->size); } /* not cache_line_size() because that's a macro in include/linux/cache.h */ @@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].nr_sets_prop; - if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) - this_leaf->number_of_sets = 0; + of_property_read_u32(np, propname, &this_leaf->number_of_sets); } static void cache_associativity(struct cacheinfo *this_leaf) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 457be03b744d..0ea2139c50d8 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -130,7 +130,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; u64 last_busy, expires = 0; - u64 now = ktime_to_ns(ktime_get()); + u64 now = ktime_get_mono_fast_ns(); if (!dev->power.use_autosuspend) goto out; @@ -909,7 +909,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) * If 'expires' is after the current time, we've been called * too early. */ - if (expires > 0 && expires < ktime_to_ns(ktime_get())) { + if (expires > 0 && expires < ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); @@ -928,7 +928,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) int pm_schedule_suspend(struct device *dev, unsigned int delay) { unsigned long flags; - ktime_t expires; + u64 expires; int retval; spin_lock_irqsave(&dev->power.lock, flags); @@ -945,8 +945,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); - expires = ktime_add(ktime_get(), ms_to_ktime(delay)); - dev->power.timer_expires = ktime_to_ns(expires); + expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; + dev->power.timer_expires = expires; dev->power.timer_autosuspends = 0; hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index a4aac370f21f..6eded32d1aac 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -10,13 +10,13 @@ #include <linux/delay.h> #define bcma_err(bus, fmt, ...) \ - pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__) + dev_err((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__) #define bcma_warn(bus, fmt, ...) \ - pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__) + dev_warn((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__) #define bcma_info(bus, fmt, ...) \ - pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__) + dev_info((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__) #define bcma_debug(bus, fmt, ...) \ - pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__) + dev_dbg((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__) struct bcma_bus; @@ -33,7 +33,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus); int bcma_bus_suspend(struct bcma_bus *bus); int bcma_bus_resume(struct bcma_bus *bus); #endif -struct device *bcma_bus_get_host_dev(struct bcma_bus *bus); /* scan.c */ void bcma_detect_chip(struct bcma_bus *bus); diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 2c0ffb77d738..a5df3d111334 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -183,7 +183,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) chip->direction_input = bcma_gpio_direction_input; chip->direction_output = bcma_gpio_direction_output; chip->owner = THIS_MODULE; - chip->parent = bcma_bus_get_host_dev(bus); + chip->parent = bus->dev; #if IS_BUILTIN(CONFIG_OF) chip->of_node = cc->core->dev.of_node; #endif diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 63410ecfe640..f52239feb4cb 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -196,6 +196,8 @@ static int bcma_host_pci_probe(struct pci_dev *dev, goto err_pci_release_regions; } + bus->dev = &dev->dev; + /* Map MMIO */ err = -ENOMEM; bus->mmio = pci_iomap(dev, 0, ~0UL); diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c index 2dce34789329..c8073b509a2b 100644 --- a/drivers/bcma/host_soc.c +++ b/drivers/bcma/host_soc.c @@ -179,7 +179,6 @@ int __init bcma_host_soc_register(struct bcma_soc *soc) /* Host specific */ bus->hosttype = BCMA_HOSTTYPE_SOC; bus->ops = &bcma_host_soc_ops; - bus->host_pdev = NULL; /* Initialize struct, detect chip */ bcma_init_bus(bus); @@ -213,6 +212,8 @@ static int bcma_host_soc_probe(struct platform_device *pdev) if (!bus) return -ENOMEM; + bus->dev = dev; + /* Map MMIO */ bus->mmio = of_iomap(np, 0); if (!bus->mmio) @@ -221,7 +222,6 @@ static int bcma_host_soc_probe(struct platform_device *pdev) /* Host specific */ bus->hosttype = BCMA_HOSTTYPE_SOC; bus->ops = &bcma_host_soc_ops; - bus->host_pdev = pdev; /* Initialize struct, detect chip */ bcma_init_bus(bus); diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index fc1f4acdd189..6535614a7dc1 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -223,8 +223,8 @@ unsigned int bcma_core_irq(struct bcma_device *core, int num) mips_irq = bcma_core_mips_irq(core); return mips_irq <= 4 ? mips_irq + 2 : 0; } - if (bus->host_pdev) - return bcma_of_get_irq(&bus->host_pdev->dev, core, num); + if (bus->dev) + return bcma_of_get_irq(bus->dev, core, num); return 0; case BCMA_HOSTTYPE_SDIO: return 0; @@ -239,18 +239,18 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) core->dev.release = bcma_release_core_dev; core->dev.bus = &bcma_bus_type; dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); - core->dev.parent = bcma_bus_get_host_dev(bus); - if (core->dev.parent) - bcma_of_fill_device(core->dev.parent, core); + core->dev.parent = bus->dev; + if (bus->dev) + bcma_of_fill_device(bus->dev, core); switch (bus->hosttype) { case BCMA_HOSTTYPE_PCI: - core->dma_dev = &bus->host_pci->dev; + core->dma_dev = bus->dev; core->irq = bus->host_pci->irq; break; case BCMA_HOSTTYPE_SOC: - if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) { - core->dma_dev = &bus->host_pdev->dev; + if (IS_ENABLED(CONFIG_OF) && bus->dev) { + core->dma_dev = bus->dev; } else { core->dev.dma_mask = &core->dev.coherent_dma_mask; core->dma_dev = &core->dev; @@ -261,28 +261,6 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) } } -struct device *bcma_bus_get_host_dev(struct bcma_bus *bus) -{ - switch (bus->hosttype) { - case BCMA_HOSTTYPE_PCI: - if (bus->host_pci) - return &bus->host_pci->dev; - else - return NULL; - case BCMA_HOSTTYPE_SOC: - if (bus->host_pdev) - return &bus->host_pdev->dev; - else - return NULL; - case BCMA_HOSTTYPE_SDIO: - if (bus->host_sdio) - return &bus->host_sdio->dev; - else - return NULL; - } - return NULL; -} - void bcma_init_bus(struct bcma_bus *bus) { mutex_lock(&bcma_buses_mutex); @@ -402,7 +380,6 @@ int bcma_bus_register(struct bcma_bus *bus) { int err; struct bcma_device *core; - struct device *dev; /* Scan for devices (cores) */ err = bcma_bus_scan(bus); @@ -425,10 +402,8 @@ int bcma_bus_register(struct bcma_bus *bus) bcma_core_pci_early_init(&bus->drv_pci[0]); } - dev = bcma_bus_get_host_dev(bus); - if (dev) { - of_platform_default_populate(dev->of_node, NULL, dev); - } + if (bus->dev) + of_platform_default_populate(bus->dev->of_node, NULL, bus->dev); /* Cores providing flash access go before SPROM init */ list_for_each_entry(core, &bus->cores, list) { diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 6f2856c6d0f2..55481b40df9a 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk, if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { if (lock_fdc(drive)) - return -EINTR; + return 0; poll_drive(false, 0); process_fd_request(); } diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f94d33525771..d299ec79e4c3 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, - SYSC_QUIRK_LEGACY_IDLE), + 0), /* Some timers on omap4 and later */ SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, - SYSC_QUIRK_LEGACY_IDLE), + 0), SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, - SYSC_QUIRK_LEGACY_IDLE), + 0), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), /* Uarts on omap4 and later */ diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 6ccdbedb02f3..d2477a5058ac 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1513,9 +1513,19 @@ static int clk_fetch_parent_index(struct clk_core *core, if (!parent) return -EINVAL; - for (i = 0; i < core->num_parents; i++) - if (clk_core_get_parent_by_index(core, i) == parent) + for (i = 0; i < core->num_parents; i++) { + if (core->parents[i] == parent) + return i; + + if (core->parents[i]) + continue; + + /* Fallback to comparing globally unique names */ + if (!strcmp(parent->name, core->parent_names[i])) { + core->parents[i] = parent; return i; + } + } return -EINVAL; } diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c index 0026c3969b1e..76b9eb15604e 100644 --- a/drivers/clk/imx/clk-frac-pll.c +++ b/drivers/clk/imx/clk-frac-pll.c @@ -155,13 +155,14 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, { struct clk_frac_pll *pll = to_clk_frac_pll(hw); u32 val, divfi, divff; - u64 temp64 = parent_rate; + u64 temp64; int ret; parent_rate *= 8; rate *= 2; divfi = rate / parent_rate; - temp64 *= rate - divfi; + temp64 = parent_rate * divfi; + temp64 = rate - temp64; temp64 *= PLL_FRAC_DENOM; do_div(temp64, parent_rate); divff = temp64; diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 61fefc046ec5..d083b860f083 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -53,7 +53,6 @@ #define APMU_DISP1 0x110 #define APMU_CCIC0 0x50 #define APMU_CCIC1 0xf4 -#define APMU_SP 0x68 #define MPMU_UART_PLL 0x14 struct mmp2_clk_unit { @@ -210,8 +209,6 @@ static struct mmp_clk_mix_config ccic1_mix_config = { .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32), }; -static DEFINE_SPINLOCK(sp_lock); - static struct mmp_param_mux_clk apmu_mux_clks[] = { {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock}, {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock}, @@ -242,7 +239,6 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock}, {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, - {MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock}, }; static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit) diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index c782e62dd98b..58fa5c247af1 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = { "core_bi_pll_test_se", }; -static const char * const gcc_parent_names_7[] = { - "bi_tcxo", +static const char * const gcc_parent_names_7_ao[] = { + "bi_tcxo_ao", "gpll0", "gpll0_out_even", "core_bi_pll_test_se", @@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = { "core_bi_pll_test_se", }; +static const char * const gcc_parent_names_8_ao[] = { + "bi_tcxo_ao", + "gpll0", + "core_bi_pll_test_se", +}; + static const struct parent_map gcc_parent_map_10[] = { { P_BI_TCXO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, @@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "gcc_cpuss_ahb_clk_src", - .parent_names = gcc_parent_names_7, + .parent_names = gcc_parent_names_7_ao, .num_parents = 4, .ops = &clk_rcg2_ops, }, @@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = { .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "gcc_cpuss_rbcpr_clk_src", - .parent_names = gcc_parent_names_8, + .parent_names = gcc_parent_names_8_ao, .num_parents = 3, .ops = &clk_rcg2_ops, }, diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index 8d77090ad94a..0241450f3eb3 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div, num_dividers = i; tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); - if (!tmp) + if (!tmp) { + *table = ERR_PTR(-ENOMEM); return -ENOMEM; + } valid_div = 0; *width = 0; @@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) { struct clk_omap_divider *div; struct clk_omap_reg *reg; + int ret; if (!setup) return NULL; @@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) div->flags |= CLK_DIVIDER_POWER_OF_TWO; div->table = _get_div_table_from_setup(setup, &div->width); + if (IS_ERR(div->table)) { + ret = PTR_ERR(div->table); + kfree(div); + return ERR_PTR(ret); + } + div->shift = setup->bit_shift; div->latch = -EINVAL; diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 595124074821..c364027638e1 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer) if (IS_ERR(parent)) return -ENODEV; + /* Bail out if both clocks point to fck */ + if (clk_is_match(parent, timer->fclk)) + return 0; + ret = clk_set_parent(timer->fclk, parent); if (ret < 0) pr_err("%s: failed to set parent\n", __func__); @@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev) timer->pdev = pdev; pm_runtime_enable(dev); - pm_runtime_irq_safe(dev); if (!timer->reserved) { ret = pm_runtime_get_sync(dev); diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c index b17d153e724f..23a1b27579a5 100644 --- a/drivers/cpuidle/poll_state.c +++ b/drivers/cpuidle/poll_state.c @@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev, local_irq_enable(); if (!current_set_polling_and_test()) { unsigned int loop_count = 0; - u64 limit = TICK_USEC; + u64 limit = TICK_NSEC; int i; for (i = 1; i < drv->state_count; i++) { diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index fe070d75c842..4c97478d44bd 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -537,6 +537,8 @@ static void process_response_list(struct nitrox_cmdq *cmdq) struct nitrox_device *ndev = cmdq->ndev; struct nitrox_softreq *sr; int req_completed = 0, err = 0, budget; + completion_t callback; + void *cb_arg; /* check all pending requests */ budget = atomic_read(&cmdq->pending_count); @@ -564,13 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq) smp_mb__after_atomic(); /* remove from response list */ response_list_del(sr, cmdq); - /* ORH error code */ err = READ_ONCE(*sr->resp.orh) & 0xff; - - if (sr->callback) - sr->callback(sr->cb_arg, err); + callback = sr->callback; + cb_arg = sr->cb_arg; softreq_destroy(sr); + if (callback) + callback(cb_arg, err); req_completed++; } diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 8ada308d72ee..b0125ad65825 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -380,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev) rc = cc_ivgen_init(new_drvdata); if (rc) { dev_err(dev, "cc_ivgen_init failed\n"); - goto post_power_mgr_err; + goto post_buf_mgr_err; } /* Allocate crypto algs */ @@ -403,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev) goto post_hash_err; } + /* All set, we can allow autosuspend */ + cc_pm_go(new_drvdata); + /* If we got here and FIPS mode is enabled * it means all FIPS test passed, so let TEE * know we're good. @@ -417,8 +420,6 @@ post_cipher_err: cc_cipher_free(new_drvdata); post_ivgen_err: cc_ivgen_fini(new_drvdata); -post_power_mgr_err: - cc_pm_fini(new_drvdata); post_buf_mgr_err: cc_buffer_mgr_fini(new_drvdata); post_req_mgr_err: diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index d990f472e89f..6ff7e75ad90e 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev) int cc_pm_init(struct cc_drvdata *drvdata) { - int rc = 0; struct device *dev = drvdata_to_dev(drvdata); /* must be before the enabling to avoid resdundent suspending */ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); /* activate the PM module */ - rc = pm_runtime_set_active(dev); - if (rc) - return rc; - /* enable the PM module*/ - pm_runtime_enable(dev); + return pm_runtime_set_active(dev); +} - return rc; +/* enable the PM module*/ +void cc_pm_go(struct cc_drvdata *drvdata) +{ + pm_runtime_enable(drvdata_to_dev(drvdata)); } void cc_pm_fini(struct cc_drvdata *drvdata) diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index 020a5403c58b..f62624357020 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h @@ -16,6 +16,7 @@ extern const struct dev_pm_ops ccree_pm; int cc_pm_init(struct cc_drvdata *drvdata); +void cc_pm_go(struct cc_drvdata *drvdata); void cc_pm_fini(struct cc_drvdata *drvdata); int cc_pm_suspend(struct device *dev); int cc_pm_resume(struct device *dev); @@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata) return 0; } +static void cc_pm_go(struct cc_drvdata *drvdata) {} + static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} static inline int cc_pm_suspend(struct device *dev) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 4e557684f792..fe69dccfa0c0 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -203,6 +203,7 @@ struct at_xdmac_chan { u32 save_cim; u32 save_cnda; u32 save_cndc; + u32 irq_status; unsigned long status; struct tasklet_struct tasklet; struct dma_slave_config sconfig; @@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data) struct at_xdmac_desc *desc; u32 error_mask; - dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", - __func__, atchan->status); + dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", + __func__, atchan->irq_status); error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS @@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data) if (at_xdmac_chan_is_cyclic(atchan)) { at_xdmac_handle_cyclic(atchan); - } else if ((atchan->status & AT_XDMAC_CIS_LIS) - || (atchan->status & error_mask)) { + } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) + || (atchan->irq_status & error_mask)) { struct dma_async_tx_descriptor *txd; - if (atchan->status & AT_XDMAC_CIS_RBEIS) + if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) dev_err(chan2dev(&atchan->chan), "read bus error!!!"); - if (atchan->status & AT_XDMAC_CIS_WBEIS) + if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) dev_err(chan2dev(&atchan->chan), "write bus error!!!"); - if (atchan->status & AT_XDMAC_CIS_ROIS) + if (atchan->irq_status & AT_XDMAC_CIS_ROIS) dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); spin_lock(&atchan->lock); @@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) atchan = &atxdmac->chan[i]; chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); - atchan->status = chan_status & chan_imr; + atchan->irq_status = chan_status & chan_imr; dev_vdbg(atxdmac->dma.dev, "%s: chan%d: imr=0x%x, status=0x%x\n", __func__, i, chan_imr, chan_status); @@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) at_xdmac_chan_read(atchan, AT_XDMAC_CDA), at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); - if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) + if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); tasklet_schedule(&atchan->tasklet); diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 1a44c8086d77..ae10f5614f95 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg( } } -static int bcm2835_dma_abort(void __iomem *chan_base) +static int bcm2835_dma_abort(struct bcm2835_chan *c) { - unsigned long cs; + void __iomem *chan_base = c->chan_base; long int timeout = 10000; - cs = readl(chan_base + BCM2835_DMA_CS); - if (!(cs & BCM2835_DMA_ACTIVE)) + /* + * A zero control block address means the channel is idle. + * (The ACTIVE flag in the CS register is not a reliable indicator.) + */ + if (!readl(chan_base + BCM2835_DMA_ADDR)) return 0; /* Write 0 to the active bit - Pause the DMA */ writel(0, chan_base + BCM2835_DMA_CS); /* Wait for any current AXI transfer to complete */ - while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { + while ((readl(chan_base + BCM2835_DMA_CS) & + BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) cpu_relax(); - cs = readl(chan_base + BCM2835_DMA_CS); - } - /* We'll un-pause when we set of our next DMA */ + /* Peripheral might be stuck and fail to signal AXI write responses */ if (!timeout) - return -ETIMEDOUT; - - if (!(cs & BCM2835_DMA_ACTIVE)) - return 0; - - /* Terminate the control block chain */ - writel(0, chan_base + BCM2835_DMA_NEXTCB); - - /* Abort the whole DMA */ - writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, - chan_base + BCM2835_DMA_CS); + dev_err(c->vc.chan.device->dev, + "failed to complete outstanding writes\n"); + writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); return 0; } @@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) spin_lock_irqsave(&c->vc.lock, flags); - /* Acknowledge interrupt */ - writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); + /* + * Clear the INT flag to receive further interrupts. Keep the channel + * active in case the descriptor is cyclic or in case the client has + * already terminated the descriptor and issued a new one. (May happen + * if this IRQ handler is threaded.) If the channel is finished, it + * will remain idle despite the ACTIVE flag being set. + */ + writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, + c->chan_base + BCM2835_DMA_CS); d = c->desc; @@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) if (d->cyclic) { /* call the cyclic callback */ vchan_cyclic_callback(&d->vd); - - /* Keep the DMA engine running */ - writel(BCM2835_DMA_ACTIVE, - c->chan_base + BCM2835_DMA_CS); - } else { + } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { vchan_cookie_complete(&c->desc->vd); bcm2835_dma_start_desc(c); } @@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); unsigned long flags; - int timeout = 10000; LIST_HEAD(head); spin_lock_irqsave(&c->vc.lock, flags); @@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) list_del_init(&c->node); spin_unlock(&d->lock); - /* - * Stop DMA activity: we assume the callback will not be called - * after bcm_dma_abort() returns (even if it does, it will see - * c->desc is NULL and exit.) - */ + /* stop DMA activity */ if (c->desc) { vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; - bcm2835_dma_abort(c->chan_base); - - /* Wait for stopping */ - while (--timeout) { - if (!(readl(c->chan_base + BCM2835_DMA_CS) & - BCM2835_DMA_ACTIVE)) - break; - - cpu_relax(); - } - - if (!timeout) - dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); + bcm2835_dma_abort(c); } vchan_get_all_descriptors(&c->vc, &head); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 2eea4ef72915..6511928b4cdf 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -711,11 +711,9 @@ static int dmatest_func(void *data) srcs[i] = um->addr[i] + src_off; ret = dma_mapping_error(dev->dev, um->addr[i]); if (ret) { - dmaengine_unmap_put(um); result("src mapping error", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } um->to_cnt++; } @@ -730,11 +728,9 @@ static int dmatest_func(void *data) DMA_BIDIRECTIONAL); ret = dma_mapping_error(dev->dev, dsts[i]); if (ret) { - dmaengine_unmap_put(um); result("dst mapping error", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } um->bidi_cnt++; } @@ -762,12 +758,10 @@ static int dmatest_func(void *data) } if (!tx) { - dmaengine_unmap_put(um); result("prep error", total_tests, src_off, dst_off, len, ret); msleep(100); - failed_tests++; - continue; + goto error_unmap_continue; } done->done = false; @@ -776,12 +770,10 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { - dmaengine_unmap_put(um); result("submit error", total_tests, src_off, dst_off, len, ret); msleep(100); - failed_tests++; - continue; + goto error_unmap_continue; } dma_async_issue_pending(chan); @@ -790,22 +782,20 @@ static int dmatest_func(void *data) status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); - dmaengine_unmap_put(um); - if (!done->done) { result("test timed out", total_tests, src_off, dst_off, len, 0); - failed_tests++; - continue; + goto error_unmap_continue; } else if (status != DMA_COMPLETE) { result(status == DMA_ERROR ? "completion error status" : "completion busy status", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } + dmaengine_unmap_put(um); + if (params->noverify) { verbose_result("test passed", total_tests, src_off, dst_off, len, 0); @@ -846,6 +836,12 @@ static int dmatest_func(void *data) verbose_result("test passed", total_tests, src_off, dst_off, len, 0); } + + continue; + +error_unmap_continue: + dmaengine_unmap_put(um); + failed_tests++; } ktime = ktime_sub(ktime_get(), ktime); ktime = ktime_sub(ktime, comparetime); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index c2fff3f6c9ca..4a09af3cd546 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data) { struct imxdma_channel *imxdmac = (void *)data; struct imxdma_engine *imxdma = imxdmac->imxdma; - struct imxdma_desc *desc; + struct imxdma_desc *desc, *next_desc; unsigned long flags; spin_lock_irqsave(&imxdma->lock, flags); @@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data) list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); if (!list_empty(&imxdmac->ld_queue)) { - desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, - node); + next_desc = list_first_entry(&imxdmac->ld_queue, + struct imxdma_desc, node); list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); - if (imxdma_xfer_desc(desc) < 0) + if (imxdma_xfer_desc(next_desc) < 0) dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", __func__, imxdmac->channel); } diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 472c88ae1c0f..92f843eaf1e0 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver) } EXPORT_SYMBOL_GPL(scmi_driver_unregister); +static void scmi_device_release(struct device *dev) +{ + kfree(to_scmi_dev(dev)); +} + struct scmi_device * scmi_device_create(struct device_node *np, struct device *parent, int protocol) { @@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) scmi_dev->dev.parent = parent; scmi_dev->dev.of_node = np; scmi_dev->dev.bus = &scmi_bus_type; + scmi_dev->dev.release = scmi_device_release; dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); retval = device_register(&scmi_dev->dev); @@ -156,9 +162,8 @@ free_mem: void scmi_device_destroy(struct scmi_device *scmi_dev) { scmi_handle_put(scmi_dev->handle); - device_unregister(&scmi_dev->dev); ida_simple_remove(&scmi_bus_id, scmi_dev->id); - kfree(scmi_dev); + device_unregister(&scmi_dev->dev); } void scmi_set_handle(struct scmi_device *scmi_dev) diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 23ea1ed409d1..352bd2473162 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -37,8 +37,9 @@ extern u64 efi_system_table; static struct ptdump_info efi_ptdump_info = { .mm = &efi_mm, .markers = (struct addr_marker[]){ - { 0, "UEFI runtime start" }, - { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" } + { 0, "UEFI runtime start" }, + { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" }, + { -1, NULL } }, .base_addr = 0, }; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 4c46ff6f2242..55b77c576c42 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, early_memunmap(tbl, sizeof(*tbl)); } - return 0; -} -int __init efi_apply_persistent_mem_reservations(void) -{ if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { unsigned long prsv = efi.mem_reserve; diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index eee42d5e25ee..c037c6c5d0b7 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; efi_status_t status; - if (IS_ENABLED(CONFIG_ARM)) - return; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), (void **)&rsv); if (status != EFI_SUCCESS) { diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 8903b9ccfc2b..e2abfdb5cee6 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -147,6 +147,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) static DEFINE_SEMAPHORE(efi_runtime_lock); /* + * Expose the EFI runtime lock to the UV platform + */ +#ifdef CONFIG_X86_UV +extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); +#endif + +/* * Calls the appropriate efi_runtime_service() with the appropriate * arguments. * diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index a1a09e04fab8..13851b3d1c56 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c @@ -508,14 +508,11 @@ static int __init s10_init(void) return -ENODEV; np = of_find_matching_node(fw_np, s10_of_match); - if (!np) { - of_node_put(fw_np); + if (!np) return -ENODEV; - } of_node_put(np); ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); - of_node_put(fw_np); if (ret) return ret; diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c index 6b11f1314248..7f9e0304b510 100644 --- a/drivers/gpio/gpio-altera-a10sr.c +++ b/drivers/gpio/gpio-altera-a10sr.c @@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc, static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc, unsigned int nr, int value) { - if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) + if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) { + altr_a10sr_gpio_set(gc, nr, value); return 0; + } return -EINVAL; } diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index e0d6a0a7bc69..e41223c05f6e 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset) static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset) { - return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); + struct sprd_eic *sprd_eic = gpiochip_get_data(chip); + + switch (sprd_eic->type) { + case SPRD_EIC_DEBOUNCE: + return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); + case SPRD_EIC_ASYNC: + return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA); + case SPRD_EIC_SYNC: + return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA); + default: + return -ENOTSUPP; + } } static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset) @@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type) irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_BOTH: + sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0); sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1); irq_set_handler_locked(data, handle_edge_irq); break; diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index adf72dda25a2..68a35b65925a 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table); */ struct pcf857x { struct gpio_chip chip; + struct irq_chip irqchip; struct i2c_client *client; struct mutex lock; /* protect 'out' */ unsigned out; /* software latch */ @@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data) mutex_unlock(&gpio->lock); } -static struct irq_chip pcf857x_irq_chip = { - .name = "pcf857x", - .irq_enable = pcf857x_irq_enable, - .irq_disable = pcf857x_irq_disable, - .irq_ack = noop, - .irq_mask = noop, - .irq_unmask = noop, - .irq_set_wake = pcf857x_irq_set_wake, - .irq_bus_lock = pcf857x_irq_bus_lock, - .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, -}; - /*-------------------------------------------------------------------------*/ static int pcf857x_probe(struct i2c_client *client, @@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client, /* Enable irqchip if we have an interrupt */ if (client->irq) { + gpio->irqchip.name = "pcf857x", + gpio->irqchip.irq_enable = pcf857x_irq_enable, + gpio->irqchip.irq_disable = pcf857x_irq_disable, + gpio->irqchip.irq_ack = noop, + gpio->irqchip.irq_mask = noop, + gpio->irqchip.irq_unmask = noop, + gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake, + gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock, + gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, status = gpiochip_irqchip_add_nested(&gpio->chip, - &pcf857x_irq_chip, + &gpio->irqchip, 0, handle_level_irq, IRQ_TYPE_NONE); if (status) { @@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client, if (status) goto fail; - gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip, + gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip, client->irq); gpio->irq_parent = client->irq; } diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 1b79ebcfce3e..541fa6ac399d 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -253,6 +253,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) struct vf610_gpio_port *port; struct resource *iores; struct gpio_chip *gc; + int i; int ret; port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); @@ -319,6 +320,10 @@ static int vf610_gpio_probe(struct platform_device *pdev) if (ret < 0) return ret; + /* Mask all GPIO interrupts */ + for (i = 0; i < gc->ngpio; i++) + vf610_gpio_writel(0, port->base + PORT_PCR(i)); + /* Clear the interrupt status register for all GPIO's */ vf610_gpio_writel(~0, port->base + PORT_ISFR); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 1651d7f0a303..d1adfdf50fb3 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) /* Do not leak kernel stack to userspace */ memset(&ge, 0, sizeof(ge)); - ge.timestamp = le->timestamp; + /* + * We may be running from a nested threaded interrupt in which case + * we didn't get the timestamp from lineevent_irq_handler(). + */ + if (!le->timestamp) + ge.timestamp = ktime_get_real_ns(); + else + ge.timestamp = le->timestamp; if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 6896dec97fc7..0ed41a9d2d77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, effective_mode &= ~S_IWUSR; if ((adev->flags & AMD_IS_APU) && - (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || + (attr == &sensor_dev_attr_power1_average.dev_attr.attr || + attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 71913a18d142..a38e0fb4a6fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -38,6 +38,7 @@ #include "amdgpu_gem.h" #include <drm/amdgpu_drm.h> #include <linux/dma-buf.h> +#include <linux/dma-fence-array.h> /** * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table @@ -187,6 +188,48 @@ error: return ERR_PTR(ret); } +static int +__reservation_object_make_exclusive(struct reservation_object *obj) +{ + struct dma_fence **fences; + unsigned int count; + int r; + + if (!reservation_object_get_list(obj)) /* no shared fences to convert */ + return 0; + + r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); + if (r) + return r; + + if (count == 0) { + /* Now that was unexpected. */ + } else if (count == 1) { + reservation_object_add_excl_fence(obj, fences[0]); + dma_fence_put(fences[0]); + kfree(fences); + } else { + struct dma_fence_array *array; + + array = dma_fence_array_create(count, fences, + dma_fence_context_alloc(1), 0, + false); + if (!array) + goto err_fences_put; + + reservation_object_add_excl_fence(obj, &array->base); + dma_fence_put(&array->base); + } + + return 0; + +err_fences_put: + while (count--) + dma_fence_put(fences[count]); + kfree(fences); + return -ENOMEM; +} + /** * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation * @dma_buf: Shared DMA buffer @@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, if (attach->dev->driver != adev->dev->driver) { /* - * Wait for all shared fences to complete before we switch to future - * use of exclusive fence on this prime shared bo. + * We only create shared fences for internal use, but importers + * of the dmabuf rely on exclusive fences for implicitly + * tracking write hazards. As any of the current fences may + * correspond to a write, we need to convert all existing + * fences on the reservation object into a single exclusive + * fence. */ - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, - true, false, - MAX_SCHEDULE_TIMEOUT); - if (unlikely(r < 0)) { - DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); + r = __reservation_object_make_exclusive(bo->tbo.resv); + if (r) goto error_unreserve; - } } /* pin buffer into GTT */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8fab0d637ee5..3a9b48b227ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -90,8 +90,10 @@ static int psp_sw_fini(void *handle) adev->psp.sos_fw = NULL; release_firmware(adev->psp.asd_fw); adev->psp.asd_fw = NULL; - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; + if (adev->psp.ta_fw) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + } return 0; } @@ -435,6 +437,9 @@ static int psp_xgmi_initialize(struct psp_context *psp) struct ta_xgmi_shared_memory *xgmi_cmd; int ret; + if (!psp->adev->psp.ta_fw) + return -ENOENT; + if (!psp->xgmi_context.initialized) { ret = psp_xgmi_init_shared_buf(psp); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d2ea5ce2cefb..7c108e687683 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -3363,14 +3363,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, struct amdgpu_task_info *task_info) { struct amdgpu_vm *vm; + unsigned long flags; - spin_lock(&adev->vm_manager.pasid_lock); + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); if (vm) *task_info = vm->task_info; - spin_unlock(&adev->vm_manager.pasid_lock); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 4cd31a276dcd..186db182f924 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev, static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, bool enable) { + u32 tmp = 0; + if (enable) { + tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) | + REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) | + REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0); + + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW, + lower_32_bits(adev->doorbell.base)); + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH, + upper_32_bits(adev->doorbell.base)); + } + + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp); } static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 0c6e7f9b143f..189fcb004579 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -152,18 +152,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); - if (err) - goto out2; - - err = amdgpu_ucode_validate(adev->psp.ta_fw); - if (err) - goto out2; - - ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); - adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); - adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + - le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + if (err) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + dev_info(adev->dev, + "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); + } else { + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out2; + + ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; + adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); + adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); + adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8849b74078d6..9b639974c70c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle) case CHIP_RAVEN: adev->asic_funcs = &soc15_asic_funcs; if (adev->rev_id >= 0x8) - adev->external_rev_id = adev->rev_id + 0x81; + adev->external_rev_id = adev->rev_id + 0x79; else if (adev->pdev->device == 0x15d8) adev->external_rev_id = adev->rev_id + 0x41; + else if (adev->rev_id == 1) + adev->external_rev_id = adev->rev_id + 0x20; else - adev->external_rev_id = 0x1; + adev->external_rev_id = adev->rev_id + 0x01; if (adev->rev_id >= 0x8) { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 5d85ff341385..2e7c44955f43 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -863,7 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, return 0; } -#if CONFIG_X86_64 +#ifdef CONFIG_X86_64 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, uint32_t *num_entries, struct crat_subtype_iolink *sub_type_hdr) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f4fa40c387d3..0b392bfca284 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4082,7 +4082,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, } if (connector_type == DRM_MODE_CONNECTOR_HDMIA || - connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_eDP) { drm_connector_attach_vrr_capable_property( &aconnector->base); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 9a7ac58eb18e..ddd75a4d8ba5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -671,6 +671,25 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us return bytes_from_user; } +/* + * Returns the min and max vrr vfreq through the connector's debugfs file. + * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range + */ +static int vrr_range_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (connector->status != connector_status_connected) + return -ENODEV; + + seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq); + seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(vrr_range); + static const struct file_operations dp_link_settings_debugfs_fops = { .owner = THIS_MODULE, .read = dp_link_settings_read, @@ -697,7 +716,8 @@ static const struct { } dp_debugfs_entries[] = { {"link_settings", &dp_link_settings_debugfs_fops}, {"phy_settings", &dp_phy_settings_debugfs_fop}, - {"test_pattern", &dp_phy_test_pattern_fops} + {"test_pattern", &dp_phy_test_pattern_fops}, + {"vrr_range", &vrr_range_fops} }; int connector_debugfs_init(struct amdgpu_dm_connector *connector) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index afd287f08bc9..19801bdba0d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c @@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements( dc, context->bw.dce.sclk_khz); - pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; + /* + * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. + * This is not required for less than 5 displays, + * thus don't request decfclk in dc to avoid impact + * on power saving. + * + */ + pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? + pp_display_cfg->min_engine_clock_khz : 0; pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dce.sclk_deep_sleep_khz; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index f95c5f50eb0f..5273de3c5b98 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, break; case amd_pp_dpp_clock: pclk_vol_table = pinfo->vdd_dep_on_dppclk; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 99cba8ea5d82..5df1256618cc 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -528,7 +528,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, object_count = cl->object_count; - object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32)); + object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), + array_size(object_count, sizeof(__u32))); if (IS_ERR(object_ids)) return PTR_ERR(object_ids); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 24a750436559..f91e02c87fd8 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode) if (mode->hsync) return mode->hsync; - if (mode->htotal < 0) + if (mode->htotal <= 0) return 0; calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 216f52b744a6..c882ea94172c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, return 0; } +static inline bool +__vma_matches(struct vm_area_struct *vma, struct file *filp, + unsigned long addr, unsigned long size) +{ + if (vma->vm_file != filp) + return false; + + return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; +} + /** * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address * it is mapped to. @@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, return -EINTR; } vma = find_vma(mm, addr); - if (vma) + if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); else diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d6c8f8fdfda5..017fc602a10e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -594,7 +594,8 @@ static void i915_pmu_enable(struct perf_event *event) * Update the bitmask of enabled events and increment * the event reference counter. */ - GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); + BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); + GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); i915->pmu.enable |= BIT_ULL(bit); i915->pmu.enable_count[bit]++; @@ -615,11 +616,16 @@ static void i915_pmu_enable(struct perf_event *event) engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); - GEM_BUG_ON(!engine); - engine->pmu.enable |= BIT(sample); - GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); + BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != + I915_ENGINE_SAMPLE_COUNT); + BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != + I915_ENGINE_SAMPLE_COUNT); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); + + engine->pmu.enable |= BIT(sample); engine->pmu.enable_count[sample]++; } @@ -649,9 +655,11 @@ static void i915_pmu_disable(struct perf_event *event) engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); - GEM_BUG_ON(!engine); - GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); + + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); + /* * Decrement the reference count and clear the enabled * bitmask when the last listener on an event goes away. @@ -660,7 +668,7 @@ static void i915_pmu_disable(struct perf_event *event) engine->pmu.enable &= ~BIT(sample); } - GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); + GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); /* * Decrement the reference count and clear the enabled diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 7f164ca3db12..b3728c5f13e7 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -31,6 +31,8 @@ enum { ((1 << I915_PMU_SAMPLE_BITS) + \ (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) +#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1) + struct i915_pmu_sample { u64 cur; }; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0a7d60509ca7..067054cf4a86 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1790,7 +1790,7 @@ enum i915_power_well_id { #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 -#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ +#define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \ _CNL_PORT_TX_AE_GRP_OFFSET, \ _CNL_PORT_TX_B_GRP_OFFSET, \ _CNL_PORT_TX_B_GRP_OFFSET, \ @@ -1798,7 +1798,7 @@ enum i915_power_well_id { _CNL_PORT_TX_AE_GRP_OFFSET, \ _CNL_PORT_TX_F_GRP_OFFSET) + \ 4 * (dw)) -#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ +#define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \ _CNL_PORT_TX_AE_LN0_OFFSET, \ _CNL_PORT_TX_B_LN0_OFFSET, \ _CNL_PORT_TX_B_LN0_OFFSET, \ @@ -1834,9 +1834,9 @@ enum i915_power_well_id { #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 -#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) -#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) -#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ +#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port))) +#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port))) +#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ _CNL_PORT_TX_DW4_LN0_AE))) #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) @@ -1864,8 +1864,12 @@ enum i915_power_well_id { #define RTERM_SELECT(x) ((x) << 3) #define RTERM_SELECT_MASK (0x7 << 3) -#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) -#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) +#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port))) +#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port))) +#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) +#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) +#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) +#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f3e1d6a0b7dd..7edce1b7b348 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ }; -struct icl_combo_phy_ddi_buf_trans { - u32 dw2_swing_select; - u32 dw2_swing_scalar; - u32 dw4_scaling; -}; - -/* Voltage Swing Programming for VccIO 0.85V for DP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { - /* Voltage mV db */ - { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ - { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ - { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ - { 0x2, 0x98, 0x900F }, /* 400 9.5 */ - { 0xB, 0x70, 0x0018 }, /* 600 0.0 */ - { 0xB, 0x70, 0x3015 }, /* 600 3.5 */ - { 0xB, 0x70, 0x6012 }, /* 600 6.0 */ - { 0x5, 0x00, 0x0018 }, /* 800 0.0 */ - { 0x5, 0x00, 0x3015 }, /* 800 3.5 */ - { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ -}; - -/* FIXME - After table is updated in Bspec */ -/* Voltage Swing Programming for VccIO 0.85V for eDP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = { - /* Voltage mV db */ - { 0x0, 0x00, 0x00 }, /* 200 0.0 */ - { 0x0, 0x00, 0x00 }, /* 200 1.5 */ - { 0x0, 0x00, 0x00 }, /* 200 4.0 */ - { 0x0, 0x00, 0x00 }, /* 200 6.0 */ - { 0x0, 0x00, 0x00 }, /* 250 0.0 */ - { 0x0, 0x00, 0x00 }, /* 250 1.5 */ - { 0x0, 0x00, 0x00 }, /* 250 4.0 */ - { 0x0, 0x00, 0x00 }, /* 300 0.0 */ - { 0x0, 0x00, 0x00 }, /* 300 1.5 */ - { 0x0, 0x00, 0x00 }, /* 350 0.0 */ -}; - -/* Voltage Swing Programming for VccIO 0.95V for DP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = { - /* Voltage mV db */ - { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ - { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ - { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ - { 0x2, 0x98, 0x900F }, /* 400 9.5 */ - { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ - { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ - { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ - { 0x5, 0x76, 0x0018 }, /* 800 0.0 */ - { 0x5, 0x76, 0x3015 }, /* 800 3.5 */ - { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ +/* icl_combo_phy_ddi_translations */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ }; -/* FIXME - After table is updated in Bspec */ -/* Voltage Swing Programming for VccIO 0.95V for eDP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { - /* Voltage mV db */ - { 0x0, 0x00, 0x00 }, /* 200 0.0 */ - { 0x0, 0x00, 0x00 }, /* 200 1.5 */ - { 0x0, 0x00, 0x00 }, /* 200 4.0 */ - { 0x0, 0x00, 0x00 }, /* 200 6.0 */ - { 0x0, 0x00, 0x00 }, /* 250 0.0 */ - { 0x0, 0x00, 0x00 }, /* 250 1.5 */ - { 0x0, 0x00, 0x00 }, /* 250 4.0 */ - { 0x0, 0x00, 0x00 }, /* 300 0.0 */ - { 0x0, 0x00, 0x00 }, /* 300 1.5 */ - { 0x0, 0x00, 0x00 }, /* 350 0.0 */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { + /* NT mV Trans mV db */ + { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ + { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ + { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ + { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ + { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ + { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ + { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ }; -/* Voltage Swing Programming for VccIO 1.05V for DP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { - /* Voltage mV db */ - { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ - { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ - { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ - { 0x2, 0x98, 0x900F }, /* 400 9.5 */ - { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ - { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ - { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ - { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ - { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ - { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ }; -/* FIXME - After table is updated in Bspec */ -/* Voltage Swing Programming for VccIO 1.05V for eDP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { - /* Voltage mV db */ - { 0x0, 0x00, 0x00 }, /* 200 0.0 */ - { 0x0, 0x00, 0x00 }, /* 200 1.5 */ - { 0x0, 0x00, 0x00 }, /* 200 4.0 */ - { 0x0, 0x00, 0x00 }, /* 200 6.0 */ - { 0x0, 0x00, 0x00 }, /* 250 0.0 */ - { 0x0, 0x00, 0x00 }, /* 250 1.5 */ - { 0x0, 0x00, 0x00 }, /* 250 4.0 */ - { 0x0, 0x00, 0x00 }, /* 300 0.0 */ - { 0x0, 0x00, 0x00 }, /* 300 1.5 */ - { 0x0, 0x00, 0x00 }, /* 350 0.0 */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { + /* NT mV Trans mV db */ + { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ + { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ + { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ + { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ + { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ }; struct icl_mg_phy_ddi_buf_trans { @@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) } } -static const struct icl_combo_phy_ddi_buf_trans * +static const struct cnl_ddi_buf_trans * icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, - int type, int *n_entries) + int type, int rate, int *n_entries) { - u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; - - if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { - switch (voltage) { - case VOLTAGE_INFO_0_85V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); - return icl_combo_phy_ddi_translations_edp_0_85V; - case VOLTAGE_INFO_0_95V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); - return icl_combo_phy_ddi_translations_edp_0_95V; - case VOLTAGE_INFO_1_05V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V); - return icl_combo_phy_ddi_translations_edp_1_05V; - default: - MISSING_CASE(voltage); - return NULL; - } - } else { - switch (voltage) { - case VOLTAGE_INFO_0_85V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V); - return icl_combo_phy_ddi_translations_dp_hdmi_0_85V; - case VOLTAGE_INFO_0_95V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V); - return icl_combo_phy_ddi_translations_dp_hdmi_0_95V; - case VOLTAGE_INFO_1_05V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V); - return icl_combo_phy_ddi_translations_dp_hdmi_1_05V; - default: - MISSING_CASE(voltage); - return NULL; - } + if (type == INTEL_OUTPUT_HDMI) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; + } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); + return icl_combo_phy_ddi_translations_edp_hbr3; + } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); + return icl_combo_phy_ddi_translations_edp_hbr2; } + + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); + return icl_combo_phy_ddi_translations_dp_hbr2; } static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) @@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por if (IS_ICELAKE(dev_priv)) { if (intel_port_is_combophy(dev_priv, port)) - icl_get_combo_buf_trans(dev_priv, port, - INTEL_OUTPUT_HDMI, &n_entries); + icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, + 0, &n_entries); else n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); default_entry = n_entries - 1; @@ -1086,7 +1021,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, return DDI_CLK_SEL_TBT_810; default: MISSING_CASE(clock); - break; + return DDI_CLK_SEL_NONE; } case DPLL_ID_ICL_MGPLL1: case DPLL_ID_ICL_MGPLL2: @@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = encoder->port; int n_entries; if (IS_ICELAKE(dev_priv)) { if (intel_port_is_combophy(dev_priv, port)) icl_get_combo_buf_trans(dev_priv, port, encoder->type, - &n_entries); + intel_dp->link_rate, &n_entries); else n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); } else if (IS_CANNONLAKE(dev_priv)) { @@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, } static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, - u32 level, enum port port, int type) + u32 level, enum port port, int type, + int rate) { - const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; + const struct cnl_ddi_buf_trans *ddi_translations = NULL; u32 n_entries, val; int ln; ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, - &n_entries); + rate, &n_entries); if (!ddi_translations) return; @@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, level = n_entries - 1; } - /* Set PORT_TX_DW5 Rterm Sel to 110b. */ + /* Set PORT_TX_DW5 */ val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); - val &= ~RTERM_SELECT_MASK; + val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | + TAP2_DISABLE | TAP3_DISABLE); + val |= SCALING_MODE_SEL(0x2); val |= RTERM_SELECT(0x6); - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); - - /* Program PORT_TX_DW5 */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); - /* Set DisableTap2 and DisableTap3 if MIPI DSI - * Clear DisableTap2 and DisableTap3 for all other Ports - */ - if (type == INTEL_OUTPUT_DSI) { - val |= TAP2_DISABLE; - val |= TAP3_DISABLE; - } else { - val &= ~TAP2_DISABLE; - val &= ~TAP3_DISABLE; - } + val |= TAP3_DISABLE; I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); /* Program PORT_TX_DW2 */ val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); - val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); - val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); + val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); + val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); /* Program Rcomp scalar for every table entry */ - val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); + val |= RCOMP_SCALAR(0x98); I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); /* Program PORT_TX_DW4 */ @@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); - val |= ddi_translations[level].dw4_scaling; + val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); + val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); + val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); } + + /* Program PORT_TX_DW7 */ + val = I915_READ(ICL_PORT_TX_DW7_LN0(port)); + val &= ~N_SCALAR_MASK; + val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); + I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val); } static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, @@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); /* 5. Program swing and de-emphasis */ - icl_ddi_combo_vswing_program(dev_priv, level, port, type); + icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate); /* 6. Set training enable to trigger update */ val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3da9c0f9e948..248128126422 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, } } +static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + + /* + * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram + * the hardware when a high res displays plugged in. DPLL P + * divider is zero, and the pipe timings are bonkers. We'll + * try to disable everything in that case. + * + * FIXME would be nice to be able to sanitize this state + * without several WARNs, but for now let's take the easy + * road. + */ + return IS_GEN6(dev_priv) && + crtc_state->base.active && + crtc_state->shared_dpll && + crtc_state->port_clock == 0; +} + static void intel_sanitize_encoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_connector *connector; + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct intel_crtc_state *crtc_state = crtc ? + to_intel_crtc_state(crtc->base.state) : NULL; /* We need to check both for a crtc link (meaning that the * encoder is active and trying to read from a pipe) and the * pipe itself being active. */ - bool has_active_crtc = encoder->base.crtc && - to_intel_crtc(encoder->base.crtc)->active; + bool has_active_crtc = crtc_state && + crtc_state->base.active; + + if (crtc_state && has_bogus_dpll_config(crtc_state)) { + DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", + pipe_name(crtc->pipe)); + has_active_crtc = false; + } connector = intel_encoder_find_connector(encoder); if (connector && !has_active_crtc) { @@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) /* Connector is active, but has no active pipe. This is * fallout from our resume register restoring. Disable * the encoder manually again. */ - if (encoder->base.crtc) { - struct drm_crtc_state *crtc_state = encoder->base.crtc->state; + if (crtc_state) { + struct drm_encoder *best_encoder; DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", encoder->base.base.id, encoder->base.name); + + /* avoid oopsing in case the hooks consult best_encoder */ + best_encoder = connector->base.state->best_encoder; + connector->base.state->best_encoder = &encoder->base; + if (encoder->disable) - encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); + encoder->disable(encoder, crtc_state, + connector->base.state); if (encoder->post_disable) - encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); + encoder->post_disable(encoder, crtc_state, + connector->base.state); + + connector->base.state->best_encoder = best_encoder; } encoder->base.crtc = NULL; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fdd2cbc56fa3..22a74608c6e4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp) static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum port port = dig_port->base.port; - if (port == PORT_B) + if (intel_port_is_combophy(dev_priv, port) && + !intel_dp_is_edp(intel_dp)) return 540000; return 810000; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f94a04b4ad87..e9ddeaf05a14 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -209,6 +209,16 @@ struct intel_fbdev { unsigned long vma_flags; async_cookie_t cookie; int preferred_bpp; + + /* Whether or not fbdev hpd processing is temporarily suspended */ + bool hpd_suspended : 1; + /* Set when a hotplug was received while HPD processing was + * suspended + */ + bool hpd_waiting : 1; + + /* Protects hpd_suspended */ + struct mutex hpd_lock; }; struct intel_encoder { diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fb5bb5b32a60..7f365ac0b549 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -679,6 +679,7 @@ int intel_fbdev_init(struct drm_device *dev) if (ifbdev == NULL) return -ENOMEM; + mutex_init(&ifbdev->hpd_lock); drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); if (!intel_fbdev_init_bios(dev, ifbdev)) @@ -752,6 +753,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) intel_fbdev_destroy(ifbdev); } +/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD + * processing, fbdev will perform a full connector reprobe if a hotplug event + * was received while HPD was suspended. + */ +static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state) +{ + bool send_hpd = false; + + mutex_lock(&ifbdev->hpd_lock); + ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED; + send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting; + ifbdev->hpd_waiting = false; + mutex_unlock(&ifbdev->hpd_lock); + + if (send_hpd) { + DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n"); + drm_fb_helper_hotplug_event(&ifbdev->helper); + } +} + void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -773,6 +794,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous */ if (state != FBINFO_STATE_RUNNING) flush_work(&dev_priv->fbdev_suspend_work); + console_lock(); } else { /* @@ -800,17 +822,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous drm_fb_helper_set_suspend(&ifbdev->helper, state); console_unlock(); + + intel_fbdev_hpd_set_suspend(ifbdev, state); } void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + bool send_hpd; if (!ifbdev) return; intel_fbdev_sync(ifbdev); - if (ifbdev->vma || ifbdev->helper.deferred_setup) + + mutex_lock(&ifbdev->hpd_lock); + send_hpd = !ifbdev->hpd_suspended; + ifbdev->hpd_waiting = true; + mutex_unlock(&ifbdev->hpd_lock); + + if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) drm_fb_helper_hotplug_event(&ifbdev->helper); } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b8f106d9ecf8..3ac20153705a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -55,7 +55,12 @@ struct opregion_header { u8 signature[16]; u32 size; - u32 opregion_ver; + struct { + u8 rsvd; + u8 revision; + u8 minor; + u8 major; + } __packed over; u8 bios_ver[32]; u8 vbios_ver[16]; u8 driver_ver[16]; @@ -119,7 +124,8 @@ struct opregion_asle { u64 fdss; u32 fdsp; u32 stat; - u64 rvda; /* Physical address of raw vbt data */ + u64 rvda; /* Physical (2.0) or relative from opregion (2.1+) + * address of raw VBT data. */ u32 rvds; /* Size of raw vbt data */ u8 rsvd[58]; } __packed; @@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) opregion->header = base; opregion->lid_state = base + ACPI_CLID; + DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n", + opregion->header->over.major, + opregion->header->over.minor, + opregion->header->over.revision); + mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); @@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) if (dmi_check_system(intel_no_opregion_vbt)) goto out; - if (opregion->header->opregion_ver >= 2 && opregion->asle && + if (opregion->header->over.major >= 2 && opregion->asle && opregion->asle->rvda && opregion->asle->rvds) { - opregion->rvda = memremap(opregion->asle->rvda, - opregion->asle->rvds, + resource_size_t rvda = opregion->asle->rvda; + + /* + * opregion 2.0: rvda is the physical VBT address. + * + * opregion 2.1+: rvda is unsigned, relative offset from + * opregion base, and should never point within opregion. + */ + if (opregion->header->over.major > 2 || + opregion->header->over.minor >= 1) { + WARN_ON(rvda < OPREGION_SIZE); + + rvda += asls; + } + + opregion->rvda = memremap(rvda, opregion->asle->rvds, MEMREMAP_WB); + vbt = opregion->rvda; vbt_size = opregion->asle->rvds; if (intel_bios_is_valid_vbt(vbt, vbt_size)) { @@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) goto out; } else { DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); + memunmap(opregion->rvda); + opregion->rvda = NULL; } } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 72edaa7ff411..a1a7cc29fdd1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -415,16 +415,17 @@ struct intel_engine_cs { /** * @enable_count: Reference count for the enabled samplers. * - * Index number corresponds to the bit number from @enable. + * Index number corresponds to @enum drm_i915_pmu_engine_sample. */ - unsigned int enable_count[I915_PMU_SAMPLE_BITS]; + unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; /** * @sample: Counter values for sampling events. * * Our internal timer stores the current counters in this field. + * + * Index number corresponds to @enum drm_i915_pmu_engine_sample. */ -#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) - struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; + struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; } pmu; /* diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index d2e003d8f3db..5170a0f5fe7b 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane, keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); - keymsk = key->channel_mask & 0x3ffffff; + keymsk = key->channel_mask & 0x7ffffff; if (alpha < 0xff) keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 2c5bbe317353..e31e263cf86b 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -643,8 +643,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) int bus_format; ret = of_property_read_u32(child, "reg", &i); - if (ret || i < 0 || i > 1) - return -EINVAL; + if (ret || i < 0 || i > 1) { + ret = -EINVAL; + goto free_child; + } if (!of_device_is_available(child)) continue; @@ -657,7 +659,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) channel = &imx_ldb->channel[i]; channel->ldb = imx_ldb; channel->chno = i; - channel->child = child; /* * The output port is port@4 with an external 4-port mux or @@ -667,13 +668,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) imx_ldb->lvds_mux ? 4 : 2, 0, &channel->panel, &channel->bridge); if (ret && ret != -ENODEV) - return ret; + goto free_child; /* panel ddc only if there is no bridge */ if (!channel->bridge) { ret = imx_ldb_panel_ddc(dev, channel, child); if (ret) - return ret; + goto free_child; } bus_format = of_get_bus_format(dev, child); @@ -689,18 +690,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) if (bus_format < 0) { dev_err(dev, "could not determine data mapping: %d\n", bus_format); - return bus_format; + ret = bus_format; + goto free_child; } channel->bus_format = bus_format; + channel->child = child; ret = imx_ldb_register(drm, channel); - if (ret) - return ret; + if (ret) { + channel->child = NULL; + goto free_child; + } } dev_set_drvdata(dev, imx_ldb); return 0; + +free_child: + of_node_put(child); + return ret; } static void imx_ldb_unbind(struct device *dev, struct device *master, diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index c390924de93d..21e964f6ab5c 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -370,9 +370,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - /* CRTC should be enabled */ + /* nothing to check when disabling or disabled */ if (!crtc_state->enable) - return -EINVAL; + return 0; switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 00a9c2ab9e6c..64fb788b6647 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll) static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) { - struct dsi_data *dsi = p; + struct dsi_data *dsi = s->private; struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; enum dss_clk_source dispc_clk_src, dsi_clk_src; int dsi_module = dsi->module_id; @@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) { - struct dsi_data *dsi = p; + struct dsi_data *dsi = s->private; unsigned long flags; struct dsi_irq_stats stats; @@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) static int dsi_dump_dsi_regs(struct seq_file *s, void *p) { - struct dsi_data *dsi = p; + struct dsi_data *dsi = s->private; if (dsi_runtime_get(dsi)) return 0; @@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev, dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; + /* + * HACK: These flags should be handled through the omap_dss_device bus + * flags, but this will only be possible when the DSI encoder will be + * converted to the omapdrm-managed encoder model. + */ + dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE; + dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; + dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW; + dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH; + dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE; + dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; dss_mgr_set_timings(&dsi->output, &dsi->vm); @@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); dsi->debugfs.regs = dss_debugfs_create_file(dss, name, - dsi_dump_dsi_regs, &dsi); + dsi_dump_dsi_regs, dsi); #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, - dsi_dump_dsi_irqs, &dsi); + dsi_dump_dsi_irqs, dsi); #endif snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); dsi->debugfs.clks = dss_debugfs_create_file(dss, name, - dsi_dump_dsi_clocks, &dsi); + dsi_dump_dsi_clocks, dsi); return 0; } @@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data) dss_debugfs_remove_file(dsi->debugfs.irqs); dss_debugfs_remove_file(dsi->debugfs.regs); - of_platform_depopulate(dev); - WARN_ON(dsi->scp_clk_refcount > 0); dss_pll_unregister(&dsi->pll); @@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev) dsi_uninit_output(dsi); + of_platform_depopulate(&pdev->dev); + pm_runtime_disable(&pdev->dev); if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index d587779a80b4..a97294ac96d5 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev) u16 data_offset, size; u8 frev, crev; struct ci_power_info *pi; - enum pci_bus_speed speed_cap; + enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; struct pci_dev *root = rdev->pdev->bus->self; int ret; @@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev) return -ENOMEM; rdev->pm.dpm.priv = pi; - speed_cap = pcie_get_speed_cap(root); + if (!pci_is_root_bus(rdev->pdev->bus)) + speed_cap = pcie_get_speed_cap(root); if (speed_cap == PCI_SPEED_UNKNOWN) { pi->sys_pcie_mask = 0; } else { diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 8fb60b3af015..0a785ef0ab66 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev) struct ni_power_info *ni_pi; struct si_power_info *si_pi; struct atom_clock_dividers dividers; - enum pci_bus_speed speed_cap; + enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; struct pci_dev *root = rdev->pdev->bus->self; int ret; @@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev) eg_pi = &ni_pi->eg; pi = &eg_pi->rv7xx; - speed_cap = pcie_get_speed_cap(root); + if (!pci_is_root_bus(rdev->pdev->bus)) + speed_cap = pcie_get_speed_cap(root); if (speed_cap == PCI_SPEED_UNKNOWN) { si_pi->sys_pcie_mask = 0; } else { diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 37f93022a106..c0351abf83a3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c @@ -1,17 +1,8 @@ -//SPDX-License-Identifier: GPL-2.0+ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd * Author: * Sandy Huang <hjc@rock-chips.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <drm/drmP.h> diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h index 38b52e63b2b0..27b9635124bc 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.h +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h @@ -1,17 +1,8 @@ -//SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd * Author: * Sandy Huang <hjc@rock-chips.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifdef CONFIG_ROCKCHIP_RGB diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 4463d3826ecb..e2942c9a11a7 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -440,13 +440,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) while ((entity->dependency = sched->ops->dependency(sched_job, entity))) { + trace_drm_sched_job_wait_dep(sched_job, entity->dependency); - if (drm_sched_entity_add_dependency_cb(entity)) { - - trace_drm_sched_job_wait_dep(sched_job, - entity->dependency); + if (drm_sched_entity_add_dependency_cb(entity)) return NULL; - } } /* skip jobs from entity that marked guilty */ diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 0420f5c978b9..cf45d0f940f9 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, return PTR_ERR(tcon->sclk0); } } + clk_prepare_enable(tcon->sclk0); if (tcon->quirks->has_channel_1) { tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); @@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) { + clk_disable_unprepare(tcon->sclk0); clk_disable_unprepare(tcon->clk); } diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c index 9d9e8146db90..d7b409a3c0f8 100644 --- a/drivers/gpu/drm/vkms/vkms_crc.c +++ b/drivers/gpu/drm/vkms/vkms_crc.c @@ -1,4 +1,5 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0+ + #include "vkms_drv.h" #include <linux/crc32.h> #include <drm/drm_atomic.h> diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 177bbcb38306..eb56ee893761 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include "vkms_drv.h" #include <drm/drm_atomic_helper.h> diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 83087877565c..7dcbecb5fac2 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -1,9 +1,4 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ /** * DOC: vkms (Virtual Kernel Modesetting) diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index e4469cd3d254..81f1cfbeb936 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + #ifndef _VKMS_DRV_H_ #define _VKMS_DRV_H_ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 80311daed47a..138b0bb325cf 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include <linux/shmem_fs.h> diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 271a0eb9042c..4173e4f48334 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include "vkms_drv.h" #include <drm/drm_crtc_helper.h> diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 418817600ad1..0e67d2d42f0c 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include "vkms_drv.h" #include <drm/drm_plane_helper.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 25afb1d594e3..7ef5dcb06104 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -26,6 +26,7 @@ **************************************************************************/ #include <linux/module.h> #include <linux/console.h> +#include <linux/dma-mapping.h> #include <drm/drmP.h> #include "vmwgfx_drv.h" @@ -34,7 +35,6 @@ #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_module.h> -#include <linux/intel-iommu.h> #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" #define VMWGFX_CHIP_SVGAII 0 @@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) } /** + * vmw_assume_iommu - Figure out whether coherent dma-remapping might be + * taking place. + * @dev: Pointer to the struct drm_device. + * + * Return: true if iommu present, false otherwise. + */ +static bool vmw_assume_iommu(struct drm_device *dev) +{ + const struct dma_map_ops *ops = get_dma_ops(dev->dev); + + return !dma_is_direct(ops) && ops && + ops->map_page != dma_direct_map_page; +} + +/** * vmw_dma_select_mode - Determine how DMA mappings should be set up for this * system. * @@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", [vmw_dma_map_populate] = "Keeping DMA mappings.", [vmw_dma_map_bind] = "Giving up DMA mappings early."}; -#ifdef CONFIG_X86 - const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); -#ifdef CONFIG_INTEL_IOMMU - if (intel_iommu_enabled) { + if (vmw_force_coherent) + dev_priv->map_mode = vmw_dma_alloc_coherent; + else if (vmw_assume_iommu(dev_priv->dev)) dev_priv->map_mode = vmw_dma_map_populate; - goto out_fixup; - } -#endif - - if (!(vmw_force_iommu || vmw_force_coherent)) { + else if (!vmw_force_iommu) dev_priv->map_mode = vmw_dma_phys; - DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); - return 0; - } - - dev_priv->map_mode = vmw_dma_map_populate; - - if (dma_ops && dma_ops->sync_single_for_cpu) + else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl()) dev_priv->map_mode = vmw_dma_alloc_coherent; -#ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl() == 0) + else dev_priv->map_mode = vmw_dma_map_populate; -#endif -#ifdef CONFIG_INTEL_IOMMU -out_fixup: -#endif - if (dev_priv->map_mode == vmw_dma_map_populate && - vmw_restrict_iommu) + if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) dev_priv->map_mode = vmw_dma_map_bind; - if (vmw_force_coherent) - dev_priv->map_mode = vmw_dma_alloc_coherent; - -#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) - /* - * No coherent page pool - */ - if (dev_priv->map_mode == vmw_dma_alloc_coherent) + /* No TTM coherent page pool? FIXME: Ask TTM instead! */ + if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && + (dev_priv->map_mode == vmw_dma_alloc_coherent)) return -EINVAL; -#endif - -#else /* CONFIG_X86 */ - dev_priv->map_mode = vmw_dma_map_populate; -#endif /* CONFIG_X86 */ DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); - return 0; } @@ -625,24 +612,20 @@ out_fixup: * With 32-bit we can only handle 32 bit PFNs. Optionally set that * restriction also for 64-bit systems. */ -#ifdef CONFIG_INTEL_IOMMU static int vmw_dma_masks(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; + int ret = 0; - if (intel_iommu_enabled && + ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); + if (dev_priv->map_mode != vmw_dma_phys && (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { DRM_INFO("Restricting DMA addresses to 44 bits.\n"); - return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); + return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); } - return 0; -} -#else -static int vmw_dma_masks(struct vmw_private *dev_priv) -{ - return 0; + + return ret; } -#endif static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f2d13a72c05d..88b8178d4687 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, *p_fence = NULL; } - return 0; + return ret; } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b351fb5214d3..ed2f67822f45 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, struct drm_connector_state *conn_state; struct vmw_connector_state *vmw_conn_state; - if (!du->pref_active) { + if (!du->pref_active && new_crtc_state->enable) { ret = -EINVAL; goto clean; } @@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, user_fence_rep) { struct vmw_fence_obj *fence = NULL; - uint32_t handle; - int ret; + uint32_t handle = 0; + int ret = 0; if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || out_fence) diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 474b00e19697..0a7d4395d427 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = { .cpmem_ofs = 0x1f000000, .srm_ofs = 0x1f040000, .tpm_ofs = 0x1f060000, - .csi0_ofs = 0x1f030000, - .csi1_ofs = 0x1f038000, + .csi0_ofs = 0x1e030000, + .csi1_ofs = 0x1e038000, .ic_ofs = 0x1e020000, .disp0_ofs = 0x1e040000, .disp1_ofs = 0x1e048000, @@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = { .cpmem_ofs = 0x07000000, .srm_ofs = 0x07040000, .tpm_ofs = 0x07060000, - .csi0_ofs = 0x07030000, - .csi1_ofs = 0x07038000, + .csi0_ofs = 0x06030000, + .csi1_ofs = 0x06038000, .ic_ofs = 0x06020000, .disp0_ofs = 0x06040000, .disp1_ofs = 0x06048000, diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index 2f8db9d62551..4a28f3fbb0a2 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c @@ -106,6 +106,7 @@ struct ipu_pre { void *buffer_virt; bool in_use; unsigned int safe_window_end; + unsigned int last_bufaddr; }; static DEFINE_MUTEX(ipu_pre_list_mutex); @@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); + pre->last_bufaddr = bufaddr; val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | @@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) unsigned short current_yblock; u32 val; + if (bufaddr == pre->last_bufaddr) + return; + writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); + pre->last_bufaddr = bufaddr; do { if (time_after(jiffies, timeout)) { diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index c530476edba6..ac9fda1b5a72 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -30,6 +30,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/kfifo.h> #include <linux/sched/signal.h> #include <linux/export.h> #include <linux/slab.h> @@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device); /* enqueue string to 'events' ring buffer */ void hid_debug_event(struct hid_device *hdev, char *buf) { - unsigned i; struct hid_debug_list *list; unsigned long flags; spin_lock_irqsave(&hdev->debug_list_lock, flags); - list_for_each_entry(list, &hdev->debug_list, node) { - for (i = 0; buf[i]; i++) - list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = - buf[i]; - list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; - } + list_for_each_entry(list, &hdev->debug_list, node) + kfifo_in(&list->hid_debug_fifo, buf, strlen(buf)); spin_unlock_irqrestore(&hdev->debug_list_lock, flags); wake_up_interruptible(&hdev->debug_wait); @@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu hid_debug_event(hdev, buf); kfree(buf); - wake_up_interruptible(&hdev->debug_wait); - + wake_up_interruptible(&hdev->debug_wait); } EXPORT_SYMBOL_GPL(hid_dump_input); @@ -1083,8 +1078,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) goto out; } - if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) { - err = -ENOMEM; + err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL); + if (err) { kfree(list); goto out; } @@ -1104,77 +1099,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct hid_debug_list *list = file->private_data; - int ret = 0, len; + int ret = 0, copied; DECLARE_WAITQUEUE(wait, current); mutex_lock(&list->read_mutex); - while (ret == 0) { - if (list->head == list->tail) { - add_wait_queue(&list->hdev->debug_wait, &wait); - set_current_state(TASK_INTERRUPTIBLE); - - while (list->head == list->tail) { - if (file->f_flags & O_NONBLOCK) { - ret = -EAGAIN; - break; - } - if (signal_pending(current)) { - ret = -ERESTARTSYS; - break; - } + if (kfifo_is_empty(&list->hid_debug_fifo)) { + add_wait_queue(&list->hdev->debug_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + + while (kfifo_is_empty(&list->hid_debug_fifo)) { + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } - if (!list->hdev || !list->hdev->debug) { - ret = -EIO; - set_current_state(TASK_RUNNING); - goto out; - } + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } - /* allow O_NONBLOCK from other threads */ - mutex_unlock(&list->read_mutex); - schedule(); - mutex_lock(&list->read_mutex); - set_current_state(TASK_INTERRUPTIBLE); + /* if list->hdev is NULL we cannot remove_wait_queue(). + * if list->hdev->debug is 0 then hid_debug_unregister() + * was already called and list->hdev is being destroyed. + * if we add remove_wait_queue() here we can hit a race. + */ + if (!list->hdev || !list->hdev->debug) { + ret = -EIO; + set_current_state(TASK_RUNNING); + goto out; } - set_current_state(TASK_RUNNING); - remove_wait_queue(&list->hdev->debug_wait, &wait); + /* allow O_NONBLOCK from other threads */ + mutex_unlock(&list->read_mutex); + schedule(); + mutex_lock(&list->read_mutex); + set_current_state(TASK_INTERRUPTIBLE); } - if (ret) - goto out; + __set_current_state(TASK_RUNNING); + remove_wait_queue(&list->hdev->debug_wait, &wait); - /* pass the ringbuffer contents to userspace */ -copy_rest: - if (list->tail == list->head) + if (ret) goto out; - if (list->tail > list->head) { - len = list->tail - list->head; - if (len > count) - len = count; - - if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { - ret = -EFAULT; - goto out; - } - ret += len; - list->head += len; - } else { - len = HID_DEBUG_BUFSIZE - list->head; - if (len > count) - len = count; - - if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { - ret = -EFAULT; - goto out; - } - list->head = 0; - ret += len; - count -= len; - if (count > 0) - goto copy_rest; - } - } + + /* pass the fifo content to userspace, locking is not needed with only + * one concurrent reader and one concurrent writer + */ + ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied); + if (ret) + goto out; + ret = copied; out: mutex_unlock(&list->read_mutex); return ret; @@ -1185,7 +1160,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait) struct hid_debug_list *list = file->private_data; poll_wait(file, &list->hdev->debug_wait, wait); - if (list->head != list->tail) + if (!kfifo_is_empty(&list->hid_debug_fifo)) return EPOLLIN | EPOLLRDNORM; if (!list->hdev->debug) return EPOLLERR | EPOLLHUP; @@ -1200,7 +1175,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file) spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_del(&list->node); spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); - kfree(list->hid_debug_buf); + kfifo_free(&list->hid_debug_fifo); kfree(list); return 0; @@ -1246,4 +1221,3 @@ void hid_debug_exit(void) { debugfs_remove_recursive(hid_debug_root); } - diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 4adec4ab7d06..59ee01f3d022 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -3594,7 +3594,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data) fan5pin |= cr1b & BIT(5); fan5pin |= creb & BIT(5); - fan6pin = creb & BIT(3); + fan6pin = !dsw_en && (cr2d & BIT(1)); + fan6pin |= creb & BIT(3); pwm5pin |= cr2d & BIT(7); pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0)); diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index ec6e69aa3a8e..d2fbb4bb4a43 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -183,6 +183,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev) bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); } +static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev) +{ + i2c_dev->curr_msg = NULL; + i2c_dev->num_msgs = 0; + + i2c_dev->msg_buf = NULL; + i2c_dev->msg_buf_remaining = 0; +} + /* * Note about I2C_C_CLEAR on error: * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in @@ -283,6 +292,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], time_left = wait_for_completion_timeout(&i2c_dev->completion, adap->timeout); + + bcm2835_i2c_finish_transfer(i2c_dev); + if (!time_left) { bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR); diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index b13605718291..d917cefc5a19 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if (id->recv_count > CDNS_I2C_FIFO_DEPTH) + if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) ctrl_reg |= CDNS_I2C_CR_HOLD; + else + ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); @@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if (id->send_count > CDNS_I2C_FIFO_DEPTH) + if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) ctrl_reg |= CDNS_I2C_CR_HOLD; + else + ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Clear the interrupts in interrupt status register. */ diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index b1086bfb0465..cd9c65f3d404 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1500,8 +1500,7 @@ static int omap_i2c_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int omap_i2c_runtime_suspend(struct device *dev) +static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev) { struct omap_i2c_dev *omap = dev_get_drvdata(dev); @@ -1527,7 +1526,7 @@ static int omap_i2c_runtime_suspend(struct device *dev) return 0; } -static int omap_i2c_runtime_resume(struct device *dev) +static int __maybe_unused omap_i2c_runtime_resume(struct device *dev) { struct omap_i2c_dev *omap = dev_get_drvdata(dev); @@ -1542,20 +1541,18 @@ static int omap_i2c_runtime_resume(struct device *dev) } static const struct dev_pm_ops omap_i2c_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, omap_i2c_runtime_resume, NULL) }; -#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) -#else -#define OMAP_I2C_PM_OPS NULL -#endif /* CONFIG_PM */ static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, .remove = omap_i2c_remove, .driver = { .name = "omap_i2c", - .pm = OMAP_I2C_PM_OPS, + .pm = &omap_i2c_pm_ops, .of_match_table = of_match_ptr(omap_i2c_of_match), }, }; diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index c39f89d2deba..2dc628d4f1ae 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -1828,7 +1828,7 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, ret = i3c_master_retrieve_dev_info(newdev); if (ret) - goto err_free_dev; + goto err_detach_dev; olddev = i3c_master_search_i3c_dev_duplicate(newdev); if (olddev) { diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index f8c00b94817f..bb03079fbade 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -419,12 +419,9 @@ static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master, spin_unlock_irqrestore(&master->xferqueue.lock, flags); } -static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, - struct dw_i3c_xfer *xfer) +static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master, + struct dw_i3c_xfer *xfer) { - unsigned long flags; - - spin_lock_irqsave(&master->xferqueue.lock, flags); if (master->xferqueue.cur == xfer) { u32 status; @@ -439,6 +436,15 @@ static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, } else { list_del_init(&xfer->node); } +} + +static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, + struct dw_i3c_xfer *xfer) +{ + unsigned long flags; + + spin_lock_irqsave(&master->xferqueue.lock, flags); + dw_i3c_master_dequeue_xfer_locked(master, xfer); spin_unlock_irqrestore(&master->xferqueue.lock, flags); } @@ -494,7 +500,7 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr) complete(&xfer->comp); if (ret < 0) { - dw_i3c_master_dequeue_xfer(master, xfer); + dw_i3c_master_dequeue_xfer_locked(master, xfer); writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, master->regs + DEVICE_CTRL); } diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index da58020a144e..33a28cde126c 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense); int ide_queue_sense_rq(ide_drive_t *drive, void *special) { - struct request *sense_rq = drive->sense_rq; + ide_hwif_t *hwif = drive->hwif; + struct request *sense_rq; + unsigned long flags; + + spin_lock_irqsave(&hwif->lock, flags); /* deferred failure from ide_prep_sense() */ if (!drive->sense_rq_armed) { printk(KERN_WARNING PFX "%s: error queuing a sense request\n", drive->name); + spin_unlock_irqrestore(&hwif->lock, flags); return -ENOMEM; } + sense_rq = drive->sense_rq; ide_req(sense_rq)->special = special; drive->sense_rq_armed = false; drive->hwif->rq = NULL; ide_insert_request_head(drive, sense_rq); + spin_unlock_irqrestore(&hwif->lock, flags); return 0; } EXPORT_SYMBOL_GPL(ide_queue_sense_rq); diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 8445b484ae69..b137f27a34d5 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error, } if (!blk_update_request(rq, error, nr_bytes)) { - if (rq == drive->sense_rq) + if (rq == drive->sense_rq) { drive->sense_rq = NULL; + drive->sense_rq_active = false; + } __blk_mq_end_request(rq, error); return 0; @@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); } -/* - * Issue a new request to a device. - */ -blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, - const struct blk_mq_queue_data *bd) +blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq, + bool local_requeue) { - ide_drive_t *drive = hctx->queue->queuedata; - ide_hwif_t *hwif = drive->hwif; + ide_hwif_t *hwif = drive->hwif; struct ide_host *host = hwif->host; - struct request *rq = bd->rq; ide_startstop_t startstop; if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { @@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, if (ide_lock_host(host, hwif)) return BLK_STS_DEV_RESOURCE; - blk_mq_start_request(rq); - spin_lock_irq(&hwif->lock); if (!ide_lock_port(hwif)) { @@ -511,18 +506,6 @@ repeat: drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); /* - * we know that the queue isn't empty, but this can happen - * if ->prep_rq() decides to kill a request - */ - if (!rq) { - rq = bd->rq; - if (!rq) { - ide_unlock_port(hwif); - goto out; - } - } - - /* * Sanity: don't accept a request that isn't a PM request * if we are currently power managed. This is very important as * blk_stop_queue() doesn't prevent the blk_fetch_request() @@ -560,9 +543,12 @@ repeat: } } else { plug_device: + if (local_requeue) + list_add(&rq->queuelist, &drive->rq_list); spin_unlock_irq(&hwif->lock); ide_unlock_host(host); - ide_requeue_and_plug(drive, rq); + if (!local_requeue) + ide_requeue_and_plug(drive, rq); return BLK_STS_OK; } @@ -573,6 +559,26 @@ out: return BLK_STS_OK; } +/* + * Issue a new request to a device. + */ +blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + ide_drive_t *drive = hctx->queue->queuedata; + ide_hwif_t *hwif = drive->hwif; + + spin_lock_irq(&hwif->lock); + if (drive->sense_rq_active) { + spin_unlock_irq(&hwif->lock); + return BLK_STS_DEV_RESOURCE; + } + spin_unlock_irq(&hwif->lock); + + blk_mq_start_request(bd->rq); + return ide_issue_rq(drive, bd->rq, false); +} + static int drive_is_ready(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; @@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer); void ide_insert_request_head(ide_drive_t *drive, struct request *rq) { - ide_hwif_t *hwif = drive->hwif; - unsigned long flags; - - spin_lock_irqsave(&hwif->lock, flags); + drive->sense_rq_active = true; list_add_tail(&rq->queuelist, &drive->rq_list); - spin_unlock_irqrestore(&hwif->lock, flags); - kblockd_schedule_work(&drive->rq_work); } EXPORT_SYMBOL_GPL(ide_insert_request_head); diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c index 102aa3bc3e7f..8af7af6001eb 100644 --- a/drivers/ide/ide-park.c +++ b/drivers/ide/ide-park.c @@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; scsi_req(rq)->cmd_len = 1; ide_req(rq)->type = ATA_PRIV_MISC; + spin_lock_irq(&hwif->lock); ide_insert_request_head(drive, rq); + spin_unlock_irq(&hwif->lock); out: return; diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 63627be0811a..5aeaca24a28f 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work) ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); ide_hwif_t *hwif = drive->hwif; struct request *rq; + blk_status_t ret; LIST_HEAD(list); - spin_lock_irq(&hwif->lock); - if (!list_empty(&drive->rq_list)) - list_splice_init(&drive->rq_list, &list); - spin_unlock_irq(&hwif->lock); + blk_mq_quiesce_queue(drive->queue); - while (!list_empty(&list)) { - rq = list_first_entry(&list, struct request, queuelist); + ret = BLK_STS_OK; + spin_lock_irq(&hwif->lock); + while (!list_empty(&drive->rq_list)) { + rq = list_first_entry(&drive->rq_list, struct request, queuelist); list_del_init(&rq->queuelist); - blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL); + + spin_unlock_irq(&hwif->lock); + ret = ide_issue_rq(drive, rq, true); + spin_lock_irq(&hwif->lock); } + spin_unlock_irq(&hwif->lock); + + blk_mq_unquiesce_queue(drive->queue); + + if (ret != BLK_STS_OK) + kblockd_schedule_work(&drive->rq_work); } static const u8 ide_hwif_to_major[] = diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 031d568b4972..4e339cfd0c54 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -27,9 +27,18 @@ #include <linux/iio/machine.h> #include <linux/iio/driver.h> -#define AXP288_ADC_EN_MASK 0xF1 -#define AXP288_ADC_TS_PIN_GPADC 0xF2 -#define AXP288_ADC_TS_PIN_ON 0xF3 +/* + * This mask enables all ADCs except for the battery temp-sensor (TS), that is + * left as-is to avoid breaking charging on devices without a temp-sensor. + */ +#define AXP288_ADC_EN_MASK 0xF0 +#define AXP288_ADC_TS_ENABLE 0x01 + +#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) +#define AXP288_ADC_TS_CURRENT_OFF (0 << 0) +#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) +#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) +#define AXP288_ADC_TS_CURRENT_ON (3 << 0) enum axp288_adc_id { AXP288_ADC_TS, @@ -44,6 +53,7 @@ enum axp288_adc_id { struct axp288_adc_info { int irq; struct regmap *regmap; + bool ts_enabled; }; static const struct iio_chan_spec axp288_adc_channels[] = { @@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address, return IIO_VAL_INT; } -static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, - unsigned long address) +/* + * The current-source used for the battery temp-sensor (TS) is shared + * with the GPADC. For proper fuel-gauge and charger operation the TS + * current-source needs to be permanently on. But to read the GPADC we + * need to temporary switch the TS current-source to ondemand, so that + * the GPADC can use it, otherwise we will always read an all 0 value. + */ +static int axp288_adc_set_ts(struct axp288_adc_info *info, + unsigned int mode, unsigned long address) { int ret; - /* channels other than GPADC do not need to switch TS pin */ + /* No need to switch the current-source if the TS pin is disabled */ + if (!info->ts_enabled) + return 0; + + /* Channels other than GPADC do not need the current source */ if (address != AXP288_GP_ADC_H) return 0; - ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode); if (ret) return ret; /* When switching to the GPADC pin give things some time to settle */ - if (mode == AXP288_ADC_TS_PIN_GPADC) + if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND) usleep_range(6000, 10000); return 0; @@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); switch (mask) { case IIO_CHAN_INFO_RAW: - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND, chan->address)) { dev_err(&indio_dev->dev, "GPADC mode\n"); ret = -EINVAL; break; } ret = axp288_adc_read_channel(val, chan->address, info->regmap); - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON, chan->address)) dev_err(&indio_dev->dev, "TS pin restore\n"); break; @@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, return ret; } -static int axp288_adc_set_state(struct regmap *regmap) +static int axp288_adc_initialize(struct axp288_adc_info *info) { - /* ADC should be always enabled for internal FG to function */ - if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) - return -EIO; + int ret, adc_enable_val; + + /* + * Determine if the TS pin is enabled and set the TS current-source + * accordingly. + */ + ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val); + if (ret) + return ret; + + if (adc_enable_val & AXP288_ADC_TS_ENABLE) { + info->ts_enabled = true; + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_ON); + } else { + info->ts_enabled = false; + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_OFF); + } + if (ret) + return ret; - return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); + /* Turn on the ADC for all channels except TS, leave TS as is */ + return regmap_update_bits(info->regmap, AXP20X_ADC_EN1, + AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK); } static const struct iio_info axp288_adc_iio_info = { @@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev) * Set ADC to enabled state at all time, including system suspend. * otherwise internal fuel gauge functionality may be affected. */ - ret = axp288_adc_set_state(axp20x->regmap); + ret = axp288_adc_initialize(info); if (ret) { dev_err(&pdev->dev, "unable to enable ADC device\n"); return ret; diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index 184d686ebd99..8b4568edd5cb 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c @@ -41,6 +41,7 @@ #define ADS8688_VREF_MV 4096 #define ADS8688_REALBITS 16 +#define ADS8688_MAX_CHANNELS 8 /* * enum ads8688_range - ADS8688 reference voltage range @@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; - u16 buffer[8]; + u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)]; int i, j = 0; for (i = 0; i < indio_dev->masklength; i++) { diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c index a406ad31b096..3a20cb5d9bff 100644 --- a/drivers/iio/chemical/atlas-ph-sensor.c +++ b/drivers/iio/chemical/atlas-ph-sensor.c @@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_TEMP: - *val = 1; /* 0.01 */ - *val2 = 100; - break; + *val = 10; + return IIO_VAL_INT; case IIO_PH: *val = 1; /* 0.001 */ *val2 = 1000; @@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct atlas_data *data = iio_priv(indio_dev); - __be32 reg = cpu_to_be32(val); + __be32 reg = cpu_to_be32(val / 10); if (val2 != 0 || val < 0 || val > 20000) return -EINVAL; diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 3cd830d52967..616734313f0c 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -267,7 +267,6 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, #endif struct ib_device *ib_device_get_by_index(u32 ifindex); -void ib_device_put(struct ib_device *device); /* RDMA device netlink */ void nldev_init(void); void nldev_exit(void); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 8872453e26c0..238ec42778ef 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -156,19 +156,26 @@ struct ib_device *ib_device_get_by_index(u32 index) down_read(&lists_rwsem); device = __ib_device_get_by_index(index); if (device) { - /* Do not return a device if unregistration has started. */ - if (!refcount_inc_not_zero(&device->refcount)) + if (!ib_device_try_get(device)) device = NULL; } up_read(&lists_rwsem); return device; } +/** + * ib_device_put - Release IB device reference + * @device: device whose reference to be released + * + * ib_device_put() releases reference to the IB device to allow it to be + * unregistered and eventually free. + */ void ib_device_put(struct ib_device *device) { if (refcount_dec_and_test(&device->refcount)) complete(&device->unreg_completion); } +EXPORT_SYMBOL(ib_device_put); static struct ib_device *__ib_device_get_by_name(const char *name) { @@ -303,7 +310,6 @@ struct ib_device *ib_alloc_device(size_t size) rwlock_init(&device->client_data_lock); INIT_LIST_HEAD(&device->client_data_list); INIT_LIST_HEAD(&device->port_list); - refcount_set(&device->refcount, 1); init_completion(&device->unreg_completion); return device; @@ -620,6 +626,7 @@ int ib_register_device(struct ib_device *device, const char *name, goto cg_cleanup; } + refcount_set(&device->refcount, 1); device->reg_state = IB_DEV_REGISTERED; list_for_each_entry(client, &client_list, list) diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index a4ec43093cb3..acb882f279cb 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -352,6 +352,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm, umem->writable = 1; umem->is_odp = 1; odp_data->per_mm = per_mm; + umem->owning_mm = per_mm->mm; + mmgrab(umem->owning_mm); mutex_init(&odp_data->umem_mutex); init_completion(&odp_data->notifier_completion); @@ -384,6 +386,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm, out_page_list: vfree(odp_data->page_list); out_odp_data: + mmdrop(umem->owning_mm); kfree(odp_data); return ERR_PTR(ret); } diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 2890a77339e1..5f366838b7ff 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -204,6 +204,9 @@ void ib_uverbs_release_file(struct kref *ref) if (atomic_dec_and_test(&file->device->refcount)) ib_uverbs_comp_dev(file->device); + if (file->async_file) + kref_put(&file->async_file->ref, + ib_uverbs_release_async_event_file); put_device(&file->device->dev); kfree(file); } @@ -964,11 +967,19 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) /* Get an arbitrary mm pointer that hasn't been cleaned yet */ mutex_lock(&ufile->umap_lock); - if (!list_empty(&ufile->umaps)) { - mm = list_first_entry(&ufile->umaps, - struct rdma_umap_priv, list) - ->vma->vm_mm; - mmget(mm); + while (!list_empty(&ufile->umaps)) { + int ret; + + priv = list_first_entry(&ufile->umaps, + struct rdma_umap_priv, list); + mm = priv->vma->vm_mm; + ret = mmget_not_zero(mm); + if (!ret) { + list_del_init(&priv->list); + mm = NULL; + continue; + } + break; } mutex_unlock(&ufile->umap_lock); if (!mm) @@ -1096,10 +1107,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) list_del_init(&file->list); mutex_unlock(&file->device->lists_mutex); - if (file->async_file) - kref_put(&file->async_file->ref, - ib_uverbs_release_async_event_file); - kref_put(&file->ref, ib_uverbs_release_file); return 0; diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c index 5030ec480370..2a3f2f01028d 100644 --- a/drivers/infiniband/core/uverbs_std_types_device.c +++ b/drivers/infiniband/core/uverbs_std_types_device.c @@ -168,12 +168,18 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr, static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( struct uverbs_attr_bundle *attrs) { - struct ib_device *ib_dev = attrs->ufile->device->ib_dev; + struct ib_device *ib_dev; struct ib_port_attr attr = {}; struct ib_uverbs_query_port_resp_ex resp = {}; + struct ib_ucontext *ucontext; int ret; u8 port_num; + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ if (!ib_dev->ops.query_port) return -EOPNOTSUPP; diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index c22ebc774a6a..f9a7e9d29c8b 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) vmf = 1; break; case STATUS: - if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) { + if (flags & VM_WRITE) { ret = -EPERM; goto done; } diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 88242fe95eaa..bf96067876c9 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -987,7 +987,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { wc.ex.imm_data = packet->ohdr->u.ud.imm_data; wc.wc_flags = IB_WC_WITH_IMM; - tlen -= sizeof(u32); } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { wc.ex.imm_data = 0; wc.wc_flags = 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 960b1946c365..12deacf442cf 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -210,6 +210,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); + struct hns_roce_ib_create_srq_resp resp = {}; struct hns_roce_srq *srq; int srq_desc_size; int srq_buf_size; @@ -378,16 +379,21 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, srq->event = hns_roce_ib_srq_event; srq->ibsrq.ext.xrc.srq_num = srq->srqn; + resp.srqn = srq->srqn; if (udata) { - if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { + if (ib_copy_to_udata(udata, &resp, + min(udata->outlen, sizeof(resp)))) { ret = -EFAULT; - goto err_wrid; + goto err_srqc_alloc; } } return &srq->ibsrq; +err_srqc_alloc: + hns_roce_srq_free(hr_dev, srq); + err_wrid: kvfree(srq->wrid); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 25439da8976c..936ee1314bcd 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); if (sqp->tx_ring[wire_tx_ix].ah) - rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); + mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); sqp->tx_ring[wire_tx_ix].ah = ah; ib_dma_sync_single_for_cpu(&dev->ib_dev, sqp->tx_ring[wire_tx_ix].buf.map, @@ -1902,7 +1902,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) if (wc.status == IB_WC_SUCCESS) { switch (wc.opcode) { case IB_WC_SEND: - rdma_destroy_ah(sqp->tx_ring[wc.wr_id & + mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; @@ -1931,7 +1931,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) " status = %d, wrid = 0x%llx\n", ctx->slave, wc.status, wc.wr_id); if (!MLX4_TUN_IS_RECV(wc.wr_id)) { - rdma_destroy_ah(sqp->tx_ring[wc.wr_id & + mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c index 356bccc715ee..6bcc63aaa50b 100644 --- a/drivers/infiniband/hw/mlx5/cmd.c +++ b/drivers/infiniband/hw/mlx5/cmd.c @@ -345,3 +345,40 @@ int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id, counter_set_id); return err; } + +int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, + u16 opmod, u8 port) +{ + int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out); + int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in); + int err = -ENOMEM; + void *data; + void *resp; + u32 *out; + u32 *in; + + in = kzalloc(inlen, GFP_KERNEL); + out = kzalloc(outlen, GFP_KERNEL); + if (!in || !out) + goto out; + + MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC); + MLX5_SET(mad_ifc_in, in, op_mod, opmod); + MLX5_SET(mad_ifc_in, in, port, port); + + data = MLX5_ADDR_OF(mad_ifc_in, in, mad); + memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad)); + + err = mlx5_cmd_exec(dev, in, inlen, out, outlen); + if (err) + goto out; + + resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet); + memcpy(outb, resp, + MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet)); + +out: + kfree(out); + kfree(in); + return err; +} diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h index 1e76dc67a369..923a7b93f507 100644 --- a/drivers/infiniband/hw/mlx5/cmd.h +++ b/drivers/infiniband/hw/mlx5/cmd.h @@ -63,4 +63,6 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid); int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid); int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id, u16 uid); +int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, + u16 opmod, u8 port); #endif /* MLX5_IB_CMD_H */ diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c index e8a1e4498e3f..798591a18484 100644 --- a/drivers/infiniband/hw/mlx5/flow.c +++ b/drivers/infiniband/hw/mlx5/flow.c @@ -630,8 +630,7 @@ const struct uapi_definition mlx5_ib_flow_defs[] = { UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), UAPI_DEF_CHAIN_OBJ_TREE( UVERBS_OBJECT_FLOW, - &mlx5_ib_fs, - UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), + &mlx5_ib_fs), UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, &mlx5_ib_flow_actions), {}, diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c index 46a9ddc8ca56..4700cffb5a00 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.c +++ b/drivers/infiniband/hw/mlx5/ib_rep.c @@ -3,10 +3,11 @@ * Copyright (c) 2018 Mellanox Technologies. All rights reserved. */ +#include <linux/mlx5/vport.h> #include "ib_rep.h" #include "srq.h" -static const struct mlx5_ib_profile rep_profile = { +static const struct mlx5_ib_profile vf_rep_profile = { STAGE_CREATE(MLX5_IB_STAGE_INIT, mlx5_ib_stage_init_init, mlx5_ib_stage_init_cleanup), @@ -46,30 +47,16 @@ static const struct mlx5_ib_profile rep_profile = { }; static int -mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) -{ - struct mlx5_ib_dev *ibdev; - - ibdev = mlx5_ib_rep_to_dev(rep); - if (!__mlx5_ib_add(ibdev, ibdev->profile)) - return -EINVAL; - return 0; -} - -static void -mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep) -{ - struct mlx5_ib_dev *ibdev; - - ibdev = mlx5_ib_rep_to_dev(rep); - __mlx5_ib_remove(ibdev, ibdev->profile, MLX5_IB_STAGE_MAX); -} - -static int mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { + const struct mlx5_ib_profile *profile; struct mlx5_ib_dev *ibdev; + if (rep->vport == MLX5_VPORT_UPLINK) + profile = &uplink_rep_profile; + else + profile = &vf_rep_profile; + ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev)); if (!ibdev) return -ENOMEM; @@ -78,7 +65,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ibdev->mdev = dev; ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports), MLX5_CAP_GEN(dev, num_vhca_ports)); - if (!__mlx5_ib_add(ibdev, &rep_profile)) + if (!__mlx5_ib_add(ibdev, profile)) return -EINVAL; rep->rep_if[REP_IB].priv = ibdev; @@ -105,53 +92,23 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) return mlx5_ib_rep_to_dev(rep); } -static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev) -{ - struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev); - int vport; - - for (vport = 1; vport < total_vfs; vport++) { - struct mlx5_eswitch_rep_if rep_if = {}; - - rep_if.load = mlx5_ib_vport_rep_load; - rep_if.unload = mlx5_ib_vport_rep_unload; - rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev; - mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_IB); - } -} - -static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev) +void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) { - struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev); - int vport; - - for (vport = 1; vport < total_vfs; vport++) - mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB); -} - -void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev) -{ - struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; + struct mlx5_eswitch *esw = mdev->priv.eswitch; struct mlx5_eswitch_rep_if rep_if = {}; - rep_if.load = mlx5_ib_nic_rep_load; - rep_if.unload = mlx5_ib_nic_rep_unload; + rep_if.load = mlx5_ib_vport_rep_load; + rep_if.unload = mlx5_ib_vport_rep_unload; rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev; - rep_if.priv = dev; - - mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB); - mlx5_ib_rep_register_vf_vports(dev); + mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB); } -void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev) +void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) { - struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; + struct mlx5_eswitch *esw = mdev->priv.eswitch; - mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */ - mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/ + mlx5_eswitch_unregister_vport_reps(esw, REP_IB); } u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw) diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h index 2ba73636a2fb..798d41e61fb4 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.h +++ b/drivers/infiniband/hw/mlx5/ib_rep.h @@ -10,14 +10,16 @@ #include "mlx5_ib.h" #ifdef CONFIG_MLX5_ESWITCH +extern const struct mlx5_ib_profile uplink_rep_profile; + u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw); struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw, int vport_index); struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw); struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport_index); -void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev); -void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev); +void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev); +void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev); int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq); struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, @@ -48,8 +50,8 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, return NULL; } -static inline void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev) {} -static inline void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev) {} +static inline void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) {} +static inline void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) {} static inline int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq) { diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 558638468edb..6c529e6f3a01 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -36,6 +36,7 @@ #include <rdma/ib_smi.h> #include <rdma/ib_pma.h> #include "mlx5_ib.h" +#include "cmd.h" enum { MLX5_IB_VENDOR_CLASS1 = 0x9, @@ -51,9 +52,10 @@ static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num, return dev->mdev->port_caps[port_num - 1].has_smi; } -int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, - u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, - const void *in_mad, void *response_mad) +static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, + int ignore_bkey, u8 port, const struct ib_wc *in_wc, + const struct ib_grh *in_grh, const void *in_mad, + void *response_mad) { u8 op_modifier = 0; @@ -68,7 +70,8 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, if (ignore_bkey || !in_wc) op_modifier |= 0x2; - return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port); + return mlx5_cmd_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, + port); } static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 94fe253d4956..581ae11e2fc9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -331,8 +331,8 @@ out: spin_unlock(&port->mp.mpi_lock); } -static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, - u8 *active_width) +static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed, + u8 *active_width) { switch (eth_proto_oper) { case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): @@ -389,10 +389,66 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, return 0; } +static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed, + u8 *active_width) +{ + switch (eth_proto_oper) { + case MLX5E_PROT_MASK(MLX5E_SGMII_100M): + case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII): + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_SDR; + break; + case MLX5E_PROT_MASK(MLX5E_5GBASE_R): + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_DDR; + break; + case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1): + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_QDR; + break; + case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4): + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_QDR; + break; + case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR): + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_EDR; + break; + case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2): + case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR): + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_HDR; + break; + case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2): + *active_width = IB_WIDTH_2X; + *active_speed = IB_SPEED_HDR; + break; + case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4): + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_HDR; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, + u8 *active_width, bool ext) +{ + return ext ? + translate_eth_ext_proto_oper(eth_proto_oper, active_speed, + active_width) : + translate_eth_legacy_proto_oper(eth_proto_oper, active_speed, + active_width); +} + static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, struct ib_port_attr *props) { struct mlx5_ib_dev *dev = to_mdev(device); + u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; struct mlx5_core_dev *mdev; struct net_device *ndev, *upper; enum ib_mtu ndev_ib_mtu; @@ -400,6 +456,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, u16 qkey_viol_cntr; u32 eth_prot_oper; u8 mdev_port_num; + bool ext; int err; mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); @@ -416,16 +473,18 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, /* Possible bad flows are checked before filling out props so in case * of an error it will still be zeroed out. */ - err = mlx5_query_port_eth_proto_oper(mdev, ð_prot_oper, - mdev_port_num); + err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, + mdev_port_num); if (err) goto out; + ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); + eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); props->active_width = IB_WIDTH_4X; props->active_speed = IB_SPEED_QDR; translate_eth_proto_oper(eth_prot_oper, &props->active_speed, - &props->active_width); + &props->active_width, ext); props->port_cap_flags |= IB_PORT_CM_SUP; props->ip_gids = true; @@ -6386,7 +6445,7 @@ static const struct mlx5_ib_profile pf_profile = { mlx5_ib_stage_delay_drop_cleanup), }; -static const struct mlx5_ib_profile nic_rep_profile = { +const struct mlx5_ib_profile uplink_rep_profile = { STAGE_CREATE(MLX5_IB_STAGE_INIT, mlx5_ib_stage_init_init, mlx5_ib_stage_init_cleanup), @@ -6479,6 +6538,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) printk_once(KERN_INFO "%s", mlx5_version); + if (MLX5_ESWITCH_MANAGER(mdev) && + mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { + mlx5_ib_register_vport_reps(mdev); + return mdev; + } + port_type_cap = MLX5_CAP_GEN(mdev, port_type); ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); @@ -6493,14 +6558,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), MLX5_CAP_GEN(mdev, num_vhca_ports)); - if (MLX5_ESWITCH_MANAGER(mdev) && - mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { - dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); - dev->profile = &nic_rep_profile; - mlx5_ib_register_vport_reps(dev); - return dev; - } - return __mlx5_ib_add(dev, &pf_profile); } @@ -6509,6 +6566,11 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_dev *dev; + if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) { + mlx5_ib_unregister_vport_reps(mdev); + return; + } + if (mlx5_core_is_mp_slave(mdev)) { mpi = context; mutex_lock(&mlx5_ib_multiport_mutex); @@ -6520,10 +6582,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) } dev = context; - if (dev->profile == &nic_rep_profile) - mlx5_ib_unregister_vport_reps(dev); - else - __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); + __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); ib_dealloc_device((struct ib_device *)dev); } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index b06d3b1efea8..eedba0d2ec4b 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -587,6 +587,7 @@ struct mlx5_ib_mr { struct mlx5_ib_mr *parent; atomic_t num_leaf_free; wait_queue_head_t q_leaf_free; + struct mlx5_async_work cb_work; }; struct mlx5_ib_mw { @@ -944,6 +945,7 @@ struct mlx5_ib_dev { struct mlx5_memic memic; u16 devx_whitelist_uid; struct mlx5_srq_table srq_table; + struct mlx5_async_ctx async_ctx; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) @@ -1038,9 +1040,6 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db) void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); -int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, - u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, - const void *in_mad, void *response_mad); struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, u32 flags, struct ib_udata *udata); int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index fd6ea1f75085..bf2b6ea23851 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -123,9 +123,10 @@ static void update_odp_mr(struct mlx5_ib_mr *mr) } #endif -static void reg_mr_callback(int status, void *context) +static void reg_mr_callback(int status, struct mlx5_async_work *context) { - struct mlx5_ib_mr *mr = context; + struct mlx5_ib_mr *mr = + container_of(context, struct mlx5_ib_mr, cb_work); struct mlx5_ib_dev *dev = mr->dev; struct mlx5_mr_cache *cache = &dev->cache; int c = order2idx(dev, mr->order); @@ -216,9 +217,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) ent->pending++; spin_unlock_irq(&ent->lock); err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey, - in, inlen, + &dev->async_ctx, in, inlen, mr->out, sizeof(mr->out), - reg_mr_callback, mr); + reg_mr_callback, &mr->cb_work); if (err) { spin_lock_irq(&ent->lock); ent->pending--; @@ -679,6 +680,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) return -ENOMEM; } + mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); timer_setup(&dev->delay_timer, delay_time_func, 0); for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { ent = &cache->ent[i]; @@ -725,33 +727,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) return 0; } -static void wait_for_async_commands(struct mlx5_ib_dev *dev) -{ - struct mlx5_mr_cache *cache = &dev->cache; - struct mlx5_cache_ent *ent; - int total = 0; - int i; - int j; - - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { - ent = &cache->ent[i]; - for (j = 0 ; j < 1000; j++) { - if (!ent->pending) - break; - msleep(50); - } - } - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { - ent = &cache->ent[i]; - total += ent->pending; - } - - if (total) - mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total); - else - mlx5_ib_warn(dev, "done with all pending requests\n"); -} - int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) { int i; @@ -763,12 +738,12 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) flush_workqueue(dev->cache.wq); mlx5_mr_cache_debugfs_cleanup(dev); + mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) clean_keys(dev, i); destroy_workqueue(dev->cache.wq); - wait_for_async_commands(dev); del_timer_sync(&dev->delay_timer); return 0; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 01e0f6200631..4ee32964e1dd 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -1595,10 +1595,12 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work) struct prefetch_mr_work *w = container_of(work, struct prefetch_mr_work, work); - if (w->dev->ib_dev.reg_state == IB_DEV_REGISTERED) + if (ib_device_try_get(&w->dev->ib_dev)) { mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list, w->num_sge); - + ib_device_put(&w->dev->ib_dev); + } + put_device(&w->dev->ib_dev.dev); kfree(w); } @@ -1617,15 +1619,13 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list, num_sge); - if (dev->ib_dev.reg_state != IB_DEV_REGISTERED) - return -ENODEV; - work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL); if (!work) return -ENOMEM; memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge)); + get_device(&dev->ib_dev.dev); work->dev = dev; work->pf_flags = pf_flags; work->num_sge = num_sge; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index dd2ae640bc84..7db778d96ef5 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1912,14 +1912,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, } if (!check_flags_mask(ucmd.flags, + MLX5_QP_FLAG_ALLOW_SCATTER_CQE | + MLX5_QP_FLAG_BFREG_INDEX | + MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE | + MLX5_QP_FLAG_SCATTER_CQE | MLX5_QP_FLAG_SIGNATURE | - MLX5_QP_FLAG_SCATTER_CQE | - MLX5_QP_FLAG_TUNNEL_OFFLOADS | - MLX5_QP_FLAG_BFREG_INDEX | - MLX5_QP_FLAG_TYPE_DCT | - MLX5_QP_FLAG_TYPE_DCI | - MLX5_QP_FLAG_ALLOW_SCATTER_CQE | - MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)) + MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC | + MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | + MLX5_QP_FLAG_TUNNEL_OFFLOADS | + MLX5_QP_FLAG_TYPE_DCI | + MLX5_QP_FLAG_TYPE_DCT)) return -EINVAL; err = get_qp_user_index(to_mucontext(pd->uobject->context), diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index 868da0ece7ba..445ea19a2ec8 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -512,7 +512,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { wc.ex.imm_data = ohdr->u.ud.imm_data; wc.wc_flags = IB_WC_WITH_IMM; - tlen -= sizeof(u32); } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { wc.ex.imm_data = 0; wc.wc_flags = 0; diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index a1bd8cfc2c25..c6cc3e4ab71d 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -2910,6 +2910,8 @@ send: goto op_err; if (!ret) goto rnr_nak; + if (wqe->length > qp->r_len) + goto inv_err; break; case IB_WR_RDMA_WRITE_WITH_IMM: @@ -3078,7 +3080,10 @@ op_err: goto err; inv_err: - send_status = IB_WC_REM_INV_REQ_ERR; + send_status = + sqp->ibqp.qp_type == IB_QPT_RC ? + IB_WC_REM_INV_REQ_ERR : + IB_WC_SUCCESS; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 1da119d901a9..73e808c1e6ad 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -248,7 +248,6 @@ struct ipoib_cm_tx { struct list_head list; struct net_device *dev; struct ipoib_neigh *neigh; - struct ipoib_path *path; struct ipoib_tx_buf *tx_ring; unsigned int tx_head; unsigned int tx_tail; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 0428e01e8f69..aa9dcfc36cd3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path neigh->cm = tx; tx->neigh = neigh; - tx->path = path; tx->dev = dev; list_add(&tx->list, &priv->cm.start_list); set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); @@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) neigh->daddr + QPN_AND_OPTIONS_OFFSET); goto free_neigh; } - memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec)); + memcpy(&pathrec, &path->pathrec, sizeof(pathrec)); spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 4713957b0cbb..a878351f1643 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -420,7 +420,7 @@ config KEYBOARD_MPR121 config KEYBOARD_SNVS_PWRKEY tristate "IMX SNVS Power Key Driver" - depends on SOC_IMX6SX + depends on SOC_IMX6SX || SOC_IMX7D depends on OF help This is the snvs powerkey driver for the Freescale i.MX application diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c index 312916f99597..73686c2460ce 100644 --- a/drivers/input/keyboard/cap11xx.c +++ b/drivers/input/keyboard/cap11xx.c @@ -75,9 +75,7 @@ struct cap11xx_led { struct cap11xx_priv *priv; struct led_classdev cdev; - struct work_struct work; u32 reg; - enum led_brightness new_brightness; }; #endif @@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev) } #ifdef CONFIG_LEDS_CLASS -static void cap11xx_led_work(struct work_struct *work) +static int cap11xx_led_set(struct led_classdev *cdev, + enum led_brightness value) { - struct cap11xx_led *led = container_of(work, struct cap11xx_led, work); + struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); struct cap11xx_priv *priv = led->priv; - int value = led->new_brightness; /* - * All LEDs share the same duty cycle as this is a HW limitation. - * Brightness levels per LED are either 0 (OFF) and 1 (ON). + * All LEDs share the same duty cycle as this is a HW + * limitation. Brightness levels per LED are either + * 0 (OFF) and 1 (ON). */ - regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL, - BIT(led->reg), value ? BIT(led->reg) : 0); -} - -static void cap11xx_led_set(struct led_classdev *cdev, - enum led_brightness value) -{ - struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); - - if (led->new_brightness == value) - return; - - led->new_brightness = value; - schedule_work(&led->work); + return regmap_update_bits(priv->regmap, + CAP11XX_REG_LED_OUTPUT_CONTROL, + BIT(led->reg), + value ? BIT(led->reg) : 0); } static int cap11xx_init_leds(struct device *dev, @@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev, led->cdev.default_trigger = of_get_property(child, "linux,default-trigger", NULL); led->cdev.flags = 0; - led->cdev.brightness_set = cap11xx_led_set; + led->cdev.brightness_set_blocking = cap11xx_led_set; led->cdev.max_brightness = 1; led->cdev.brightness = LED_OFF; @@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev, led->reg = reg; led->priv = priv; - INIT_WORK(&led->work, cap11xx_led_work); - error = devm_led_classdev_register(dev, &led->cdev); if (error) { of_node_put(child); diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 403452ef00e6..3d1cb7bf5e35 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev) keypad->stopped = true; spin_unlock_irq(&keypad->lock); - flush_work(&keypad->work.work); + flush_delayed_work(&keypad->work); /* * matrix_keypad_scan() will leave IRQs enabled; * we should disable them now. diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c index 43b86482dda0..d466bc07aebb 100644 --- a/drivers/input/keyboard/qt2160.c +++ b/drivers/input/keyboard/qt2160.c @@ -58,10 +58,9 @@ static unsigned char qt2160_key2code[] = { struct qt2160_led { struct qt2160_data *qt2160; struct led_classdev cdev; - struct work_struct work; char name[32]; int id; - enum led_brightness new_brightness; + enum led_brightness brightness; }; #endif @@ -74,7 +73,6 @@ struct qt2160_data { u16 key_matrix; #ifdef CONFIG_LEDS_CLASS struct qt2160_led leds[QT2160_NUM_LEDS_X]; - struct mutex led_lock; #endif }; @@ -83,46 +81,39 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data); #ifdef CONFIG_LEDS_CLASS -static void qt2160_led_work(struct work_struct *work) +static int qt2160_led_set(struct led_classdev *cdev, + enum led_brightness value) { - struct qt2160_led *led = container_of(work, struct qt2160_led, work); + struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev); struct qt2160_data *qt2160 = led->qt2160; struct i2c_client *client = qt2160->client; - int value = led->new_brightness; u32 drive, pwmen; - mutex_lock(&qt2160->led_lock); - - drive = qt2160_read(client, QT2160_CMD_DRIVE_X); - pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); - if (value != LED_OFF) { - drive |= (1 << led->id); - pwmen |= (1 << led->id); - - } else { - drive &= ~(1 << led->id); - pwmen &= ~(1 << led->id); - } - qt2160_write(client, QT2160_CMD_DRIVE_X, drive); - qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen); + if (value != led->brightness) { + drive = qt2160_read(client, QT2160_CMD_DRIVE_X); + pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); + if (value != LED_OFF) { + drive |= BIT(led->id); + pwmen |= BIT(led->id); - /* - * Changing this register will change the brightness - * of every LED in the qt2160. It's a HW limitation. - */ - if (value != LED_OFF) - qt2160_write(client, QT2160_CMD_PWM_DUTY, value); + } else { + drive &= ~BIT(led->id); + pwmen &= ~BIT(led->id); + } + qt2160_write(client, QT2160_CMD_DRIVE_X, drive); + qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen); - mutex_unlock(&qt2160->led_lock); -} + /* + * Changing this register will change the brightness + * of every LED in the qt2160. It's a HW limitation. + */ + if (value != LED_OFF) + qt2160_write(client, QT2160_CMD_PWM_DUTY, value); -static void qt2160_led_set(struct led_classdev *cdev, - enum led_brightness value) -{ - struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev); + led->brightness = value; + } - led->new_brightness = value; - schedule_work(&led->work); + return 0; } #endif /* CONFIG_LEDS_CLASS */ @@ -293,20 +284,16 @@ static int qt2160_register_leds(struct qt2160_data *qt2160) int ret; int i; - mutex_init(&qt2160->led_lock); - for (i = 0; i < QT2160_NUM_LEDS_X; i++) { struct qt2160_led *led = &qt2160->leds[i]; snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); led->cdev.name = led->name; - led->cdev.brightness_set = qt2160_led_set; + led->cdev.brightness_set_blocking = qt2160_led_set; led->cdev.brightness = LED_OFF; led->id = i; led->qt2160 = qt2160; - INIT_WORK(&led->work, qt2160_led_work); - ret = led_classdev_register(&client->dev, &led->cdev); if (ret < 0) return ret; @@ -324,10 +311,8 @@ static void qt2160_unregister_leds(struct qt2160_data *qt2160) { int i; - for (i = 0; i < QT2160_NUM_LEDS_X; i++) { + for (i = 0; i < QT2160_NUM_LEDS_X; i++) led_classdev_unregister(&qt2160->leds[i].cdev); - cancel_work_sync(&qt2160->leds[i].work); - } } #else diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c index babcfb165e4f..3b85631fde91 100644 --- a/drivers/input/keyboard/st-keyscan.c +++ b/drivers/input/keyboard/st-keyscan.c @@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev) input_dev->id.bustype = BUS_HOST; + keypad_data->input_dev = input_dev; + error = keypad_matrix_key_parse_dt(keypad_data); if (error) return error; @@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev) input_set_drvdata(input_dev, keypad_data); - keypad_data->input_dev = input_dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); keypad_data->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(keypad_data->base)) diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c index 094bddf56755..c1e66f45d552 100644 --- a/drivers/input/misc/apanel.c +++ b/drivers/input/misc/apanel.c @@ -22,7 +22,6 @@ #include <linux/io.h> #include <linux/input-polldev.h> #include <linux/i2c.h> -#include <linux/workqueue.h> #include <linux/leds.h> #define APANEL_NAME "Fujitsu Application Panel" @@ -59,8 +58,6 @@ struct apanel { struct i2c_client *client; unsigned short keymap[MAX_PANEL_KEYS]; u16 nkeys; - u16 led_bits; - struct work_struct led_work; struct led_classdev mail_led; }; @@ -109,25 +106,13 @@ static void apanel_poll(struct input_polled_dev *ipdev) report_key(idev, ap->keymap[i]); } -/* Track state changes of LED */ -static void led_update(struct work_struct *work) -{ - struct apanel *ap = container_of(work, struct apanel, led_work); - - i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits); -} - -static void mail_led_set(struct led_classdev *led, +static int mail_led_set(struct led_classdev *led, enum led_brightness value) { struct apanel *ap = container_of(led, struct apanel, mail_led); + u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000; - if (value != LED_OFF) - ap->led_bits |= 0x8000; - else - ap->led_bits &= ~0x8000; - - schedule_work(&ap->led_work); + return i2c_smbus_write_word_data(ap->client, 0x10, led_bits); } static int apanel_remove(struct i2c_client *client) @@ -179,7 +164,7 @@ static struct apanel apanel = { }, .mail_led = { .name = "mail:blue", - .brightness_set = mail_led_set, + .brightness_set_blocking = mail_led_set, }, }; @@ -235,7 +220,6 @@ static int apanel_probe(struct i2c_client *client, if (err) goto out3; - INIT_WORK(&ap->led_work, led_update); if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { err = led_classdev_register(&client->dev, &ap->mail_led); if (err) diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c index 1efcfdf9f8a8..dd9dd4e40827 100644 --- a/drivers/input/misc/bma150.c +++ b/drivers/input/misc/bma150.c @@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150) idev->close = bma150_irq_close; input_set_drvdata(idev, bma150); + bma150->input = idev; + error = input_register_device(idev); if (error) { input_free_device(idev); return error; } - bma150->input = idev; return 0; } @@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150) bma150_init_input_device(bma150, ipoll_dev->input); + bma150->input_polled = ipoll_dev; + bma150->input = ipoll_dev->input; + error = input_register_polled_device(ipoll_dev); if (error) { input_free_polled_device(ipoll_dev); return error; } - bma150->input_polled = ipoll_dev; - bma150->input = ipoll_dev->input; - return 0; } diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c index 55da191ae550..dbb6d9e1b947 100644 --- a/drivers/input/misc/pwm-vibra.c +++ b/drivers/input/misc/pwm-vibra.c @@ -34,6 +34,7 @@ struct pwm_vibrator { struct work_struct play_work; u16 level; u32 direction_duty_cycle; + bool vcc_on; }; static int pwm_vibrator_start(struct pwm_vibrator *vibrator) @@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) struct pwm_state state; int err; - err = regulator_enable(vibrator->vcc); - if (err) { - dev_err(pdev, "failed to enable regulator: %d", err); - return err; + if (!vibrator->vcc_on) { + err = regulator_enable(vibrator->vcc); + if (err) { + dev_err(pdev, "failed to enable regulator: %d", err); + return err; + } + vibrator->vcc_on = true; } pwm_get_state(vibrator->pwm, &state); @@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) { - regulator_disable(vibrator->vcc); - if (vibrator->pwm_dir) pwm_disable(vibrator->pwm_dir); pwm_disable(vibrator->pwm); + + if (vibrator->vcc_on) { + regulator_disable(vibrator->vcc); + vibrator->vcc_on = false; + } } static void pwm_vibrator_play_work(struct work_struct *work) diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index f322a1768fbb..225ae6980182 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id); static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0000", 0 }, { "ELAN0100", 0 }, - { "ELAN0501", 0 }, { "ELAN0600", 0 }, { "ELAN0602", 0 }, { "ELAN0605", 0 }, @@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN060C", 0 }, { "ELAN0611", 0 }, { "ELAN0612", 0 }, + { "ELAN0617", 0 }, { "ELAN0618", 0 }, { "ELAN061C", 0 }, { "ELAN061D", 0 }, diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 9fe075c137dc..a7f8b1614559 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Asus UX31 0x361f00 20, 15, 0e clickpad * Asus UX32VD 0x361f02 00, 15, 0e clickpad * Avatar AVIU-145A2 0x361f00 ? clickpad + * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**) + * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**) * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons @@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), }, }, + { + /* Fujitsu H780 also has a middle button */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"), + }, + }, #endif { } }; diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c index bae08226e3d9..a7cfab3db9ee 100644 --- a/drivers/input/serio/olpc_apsp.c +++ b/drivers/input/serio/olpc_apsp.c @@ -23,7 +23,6 @@ #include <linux/of.h> #include <linux/slab.h> #include <linux/delay.h> -#include <linux/clk.h> /* * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller. @@ -75,7 +74,6 @@ struct olpc_apsp { struct serio *kbio; struct serio *padio; void __iomem *base; - struct clk *clk; int open_count; int irq; }; @@ -148,17 +146,11 @@ static int olpc_apsp_open(struct serio *port) struct olpc_apsp *priv = port->port_data; unsigned int tmp; unsigned long l; - int error; if (priv->open_count++ == 0) { - error = clk_prepare_enable(priv->clk); - if (error) - return error; - l = readl(priv->base + COMMAND_FIFO_STATUS); if (!(l & CMD_STS_MASK)) { dev_err(priv->dev, "SP cannot accept commands.\n"); - clk_disable_unprepare(priv->clk); return -EIO; } @@ -179,8 +171,6 @@ static void olpc_apsp_close(struct serio *port) /* Disable interrupt 0 */ tmp = readl(priv->base + PJ_INTERRUPT_MASK); writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK); - - clk_disable_unprepare(priv->clk); } } @@ -208,10 +198,6 @@ static int olpc_apsp_probe(struct platform_device *pdev) if (priv->irq < 0) return priv->irq; - priv->clk = devm_clk_get(&pdev->dev, "sp"); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - /* KEYBOARD */ kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!kb_serio) diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c index c62cceb97bb1..5e8d8384aa2a 100644 --- a/drivers/input/serio/ps2-gpio.c +++ b/drivers/input/serio/ps2-gpio.c @@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio) { struct ps2_gpio_data *drvdata = serio->port_data; + flush_delayed_work(&drvdata->tx_work); disable_irq(drvdata->irq); } diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 87ba23a75b38..2a7b78bb98b4 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1991,16 +1991,13 @@ static void do_attach(struct iommu_dev_data *dev_data, static void do_detach(struct iommu_dev_data *dev_data) { + struct protection_domain *domain = dev_data->domain; struct amd_iommu *iommu; u16 alias; iommu = amd_iommu_rlookup_table[dev_data->devid]; alias = dev_data->alias; - /* decrease reference counters */ - dev_data->domain->dev_iommu[iommu->index] -= 1; - dev_data->domain->dev_cnt -= 1; - /* Update data structures */ dev_data->domain = NULL; list_del(&dev_data->list); @@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data) /* Flush the DTE entry */ device_flush_dte(dev_data); + + /* Flush IOTLB */ + domain_flush_tlb_pde(domain); + + /* Wait for the flushes to finish */ + domain_flush_complete(domain); + + /* decrease reference counters - needs to happen after the flushes */ + domain->dev_iommu[iommu->index] -= 1; + domain->dev_cnt -= 1; } /* @@ -2617,13 +2624,13 @@ out_unmap: bus_addr = address + s->dma_address + (j << PAGE_SHIFT); iommu_unmap_page(domain, bus_addr, PAGE_SIZE); - if (--mapped_pages) + if (--mapped_pages == 0) goto out_free_iova; } } out_free_iova: - free_iova_fast(&dma_dom->iovad, address, npages); + free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages); out_err: return 0; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2bd9ac285c0d..78188bf7e90d 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -363,7 +363,7 @@ static int dmar_map_gfx = 1; static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; -static int intel_iommu_sm = 1; +static int intel_iommu_sm; static int iommu_identity_mapping; #define IDENTMAP_ALL 1 @@ -456,9 +456,9 @@ static int __init intel_iommu_setup(char *str) } else if (!strncmp(str, "sp_off", 6)) { pr_info("Disable supported super page\n"); intel_iommu_superpage = 0; - } else if (!strncmp(str, "sm_off", 6)) { - pr_info("Intel-IOMMU: disable scalable mode support\n"); - intel_iommu_sm = 0; + } else if (!strncmp(str, "sm_on", 5)) { + pr_info("Intel-IOMMU: scalable mode supported\n"); + intel_iommu_sm = 1; } else if (!strncmp(str, "tboot_noforce", 13)) { printk(KERN_INFO "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); @@ -5294,7 +5294,7 @@ static void intel_iommu_put_resv_regions(struct device *dev, struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, head, list) { - if (entry->type == IOMMU_RESV_RESERVED) + if (entry->type == IOMMU_RESV_MSI) kfree(entry); } } diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 730f7dabcf37..7e0df67bd3e9 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -441,6 +441,10 @@ static int mtk_iommu_add_device(struct device *dev) iommu_spec.args_count = count; mtk_iommu_create_mapping(dev, &iommu_spec); + + /* dev->iommu_fwspec might have changed */ + fwspec = dev_iommu_fwspec_get(dev); + of_node_put(iommu_spec.np); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 7f2a45445b00..c3aba3fc818d 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -97,9 +97,14 @@ struct its_device; * The ITS structure - contains most of the infrastructure, with the * top-level MSI domain, the command queue, the collections, and the * list of devices writing to it. + * + * dev_alloc_lock has to be taken for device allocations, while the + * spinlock must be taken to parse data structures such as the device + * list. */ struct its_node { raw_spinlock_t lock; + struct mutex dev_alloc_lock; struct list_head entry; void __iomem *base; phys_addr_t phys_base; @@ -156,6 +161,7 @@ struct its_device { void *itt; u32 nr_ites; u32 device_id; + bool shared; }; static struct { @@ -1580,6 +1586,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) nr_irqs /= 2; } while (nr_irqs > 0); + if (!nr_irqs) + err = -ENOSPC; + if (err) goto out; @@ -2059,6 +2068,29 @@ static int __init allocate_lpi_tables(void) return 0; } +static u64 its_clear_vpend_valid(void __iomem *vlpi_base) +{ + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val &= ~GICR_VPENDBASER_Valid; + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + do { + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + return val; +} + static void its_cpu_init_lpis(void) { void __iomem *rbase = gic_data_rdist_rd_base(); @@ -2144,6 +2176,30 @@ static void its_cpu_init_lpis(void) val |= GICR_CTLR_ENABLE_LPIS; writel_relaxed(val, rbase + GICR_CTLR); + if (gic_rdists->has_vlpis) { + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + + /* + * It's possible for CPU to receive VLPIs before it is + * sheduled as a vPE, especially for the first CPU, and the + * VLPI with INTID larger than 2^(IDbits+1) will be considered + * as out of range and dropped by GIC. + * So we initialize IDbits to known value to avoid VLPI drop. + */ + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", + smp_processor_id(), val); + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + /* + * Also clear Valid bit of GICR_VPENDBASER, in case some + * ancient programming gets left in and has possibility of + * corrupting memory. + */ + val = its_clear_vpend_valid(vlpi_base); + WARN_ON(val & GICR_VPENDBASER_Dirty); + } + /* Make sure the GIC has seen the above */ dsb(sy); out: @@ -2422,6 +2478,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, struct its_device *its_dev; struct msi_domain_info *msi_info; u32 dev_id; + int err = 0; /* * We ignore "dev" entierely, and rely on the dev_id that has @@ -2444,6 +2501,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, return -EINVAL; } + mutex_lock(&its->dev_alloc_lock); its_dev = its_find_device(its, dev_id); if (its_dev) { /* @@ -2451,18 +2509,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, * another alias (PCI bridge of some sort). No need to * create the device. */ + its_dev->shared = true; pr_debug("Reusing ITT for devID %x\n", dev_id); goto out; } its_dev = its_create_device(its, dev_id, nvec, true); - if (!its_dev) - return -ENOMEM; + if (!its_dev) { + err = -ENOMEM; + goto out; + } pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); out: + mutex_unlock(&its->dev_alloc_lock); info->scratchpad[0].ptr = its_dev; - return 0; + return err; } static struct msi_domain_ops its_msi_domain_ops = { @@ -2566,6 +2628,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its = its_dev->its; int i; for (i = 0; i < nr_irqs; i++) { @@ -2580,8 +2643,14 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, irq_domain_reset_irq_data(data); } - /* If all interrupts have been freed, start mopping the floor */ - if (bitmap_empty(its_dev->event_map.lpi_map, + mutex_lock(&its->dev_alloc_lock); + + /* + * If all interrupts have been freed, start mopping the + * floor. This is conditionned on the device not being shared. + */ + if (!its_dev->shared && + bitmap_empty(its_dev->event_map.lpi_map, its_dev->event_map.nr_lpis)) { its_lpi_free(its_dev->event_map.lpi_map, its_dev->event_map.lpi_base, @@ -2593,6 +2662,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, its_free_device(its_dev); } + mutex_unlock(&its->dev_alloc_lock); + irq_domain_free_irqs_parent(domain, virq, nr_irqs); } @@ -2755,26 +2826,11 @@ static void its_vpe_schedule(struct its_vpe *vpe) static void its_vpe_deschedule(struct its_vpe *vpe) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); - u32 count = 1000000; /* 1s! */ - bool clean; u64 val; - /* We're being scheduled out */ - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - val &= ~GICR_VPENDBASER_Valid; - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); - - do { - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - clean = !(val & GICR_VPENDBASER_Dirty); - if (!clean) { - count--; - cpu_relax(); - udelay(1); - } - } while (!clean && count); + val = its_clear_vpend_valid(vlpi_base); - if (unlikely(!clean && !count)) { + if (unlikely(val & GICR_VPENDBASER_Dirty)) { pr_err_ratelimited("ITS virtual pending table not cleaning\n"); vpe->idai = false; vpe->pending_last = true; @@ -3517,6 +3573,7 @@ static int __init its_probe_one(struct resource *res, } raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); INIT_LIST_HEAD(&its->entry); INIT_LIST_HEAD(&its->its_device_list); typer = gic_read_typer(its_base + GITS_TYPER); diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 25f32e1d7764..3496b61a312a 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -34,6 +34,9 @@ #define SEL_INT_PENDING (1 << 6) #define SEL_INT_NUM_MASK 0x3f +#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5) +#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6) + struct icu_chip_data { int nr_irqs; unsigned int virq_base; @@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = { static const struct mmp_intc_conf mmp2_conf = { .conf_enable = 0x20, .conf_disable = 0x0, - .conf_mask = 0x7f, + .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | + MMP2_ICU_INT_ROUTE_PJ4_FIQ, }; static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index 5385f5768345..27933338f7b3 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c @@ -71,14 +71,17 @@ static void xtensa_mx_irq_mask(struct irq_data *d) unsigned int mask = 1u << d->hwirq; if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | - XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { - set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - - HW_IRQ_MX_BASE), MIENG); - } else { - mask = __this_cpu_read(cached_irq_mask) & ~mask; - __this_cpu_write(cached_irq_mask, mask); - xtensa_set_sr(mask, intenable); + XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { + unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq); + + if (ext_irq >= HW_IRQ_MX_BASE) { + set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG); + return; + } } + mask = __this_cpu_read(cached_irq_mask) & ~mask; + __this_cpu_write(cached_irq_mask, mask); + xtensa_set_sr(mask, intenable); } static void xtensa_mx_irq_unmask(struct irq_data *d) @@ -86,14 +89,17 @@ static void xtensa_mx_irq_unmask(struct irq_data *d) unsigned int mask = 1u << d->hwirq; if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | - XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { - set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - - HW_IRQ_MX_BASE), MIENGSET); - } else { - mask |= __this_cpu_read(cached_irq_mask); - __this_cpu_write(cached_irq_mask, mask); - xtensa_set_sr(mask, intenable); + XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { + unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq); + + if (ext_irq >= HW_IRQ_MX_BASE) { + set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET); + return; + } } + mask |= __this_cpu_read(cached_irq_mask); + __this_cpu_write(cached_irq_mask, mask); + xtensa_set_sr(mask, intenable); } static void xtensa_mx_irq_enable(struct irq_data *d) @@ -113,7 +119,11 @@ static void xtensa_mx_irq_ack(struct irq_data *d) static int xtensa_mx_irq_retrigger(struct irq_data *d) { - xtensa_set_sr(1 << d->hwirq, intset); + unsigned int mask = 1u << d->hwirq; + + if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE)) + return 0; + xtensa_set_sr(mask, intset); return 1; } diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c index c200234dd2c9..ab12328be5ee 100644 --- a/drivers/irqchip/irq-xtensa-pic.c +++ b/drivers/irqchip/irq-xtensa-pic.c @@ -70,7 +70,11 @@ static void xtensa_irq_ack(struct irq_data *d) static int xtensa_irq_retrigger(struct irq_data *d) { - xtensa_set_sr(1 << d->hwirq, intset); + unsigned int mask = 1u << d->hwirq; + + if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE)) + return 0; + xtensa_set_sr(mask, intset); return 1; } diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index ab0b63a4d045..e1de8b1a1001 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c @@ -633,7 +633,7 @@ gigaset_tty_ioctl(struct tty_struct *tty, struct file *file, flush_send_queue(cs); break; } - /* Pass through */ + /* fall through */ default: /* pass through to underlying serial device */ diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 81dd465afcf4..71a8312592d6 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c @@ -656,7 +656,7 @@ hfcpci_fill_fifo(struct BCState *bcs) schedule_event(bcs, B_ACKPENDING); } - dev_kfree_skb_any(bcs->tx_skb); + dev_consume_skb_any(bcs->tx_skb); bcs->tx_skb = skb_dequeue(&bcs->squeue); /* fetch next data */ } test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index dc1cded716c1..43700fc19a31 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -3642,7 +3642,7 @@ isdn_tty_edit_at(const char *p, int count, modem_info *info) break; } else m->mdmcmdl = 0; - /* Fall through, check for 'A' */ + /* Fall through - check for 'A' */ case 0: if (c == 'A') { m->mdmcmd[m->mdmcmdl] = c; diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c index 2a5f6668756c..d11fe76f138f 100644 --- a/drivers/isdn/i4l/isdn_v110.c +++ b/drivers/isdn/i4l/isdn_v110.c @@ -354,7 +354,7 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen) printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n"); return line; } - /* else: fall through */ + /* fall through */ case 128: m[line] = 128; /* leftmost -> set byte to 1000000 */ mbit = 64; /* current bit in the matrix line */ diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 211ed6cffd10..578978711887 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t) spin_lock_irqsave(&timer->dev->lock, flags); if (timer->id >= 0) list_move_tail(&timer->list, &timer->dev->expired); - spin_unlock_irqrestore(&timer->dev->lock, flags); wake_up_interruptible(&timer->dev->wait); + spin_unlock_irqrestore(&timer->dev->lock, flags); } static int diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index d713271ebf7c..a64116586b4c 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan) /* Clear ring flush state */ timeout = 1000; /* timeout of 1s */ - writel_relaxed(0x0, ring + RING_CONTROL); + writel_relaxed(0x0, ring->regs + RING_CONTROL); do { - if (!(readl_relaxed(ring + RING_FLUSH_DONE) & + if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & FLUSH_DONE_MASK)) break; mdelay(1); diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index c6a7d4582dc6..38d9df3fb199 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout) return ret; } +EXPORT_SYMBOL_GPL(mbox_flush); /** * mbox_request_channel - Request a mailbox channel. diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 47d4e0d30bf0..dd538e6b2748 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) if (IS_ERR(bip)) return PTR_ERR(bip); - tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); + tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); bip->bip_iter.bi_size = tag_len; bip->bip_iter.bi_sector = io->cc->start + io->sector; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 4eb5f8c56535..a20531e5f3b4 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -131,7 +131,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) static void rq_completed(struct mapped_device *md) { /* nudge anyone waiting on suspend queue */ - if (unlikely(waitqueue_active(&md->wait))) + if (unlikely(wq_has_sleeper(&md->wait))) wake_up(&md->wait); /* diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ca8af21bf644..e83b63608262 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -257,6 +257,7 @@ struct pool { spinlock_t lock; struct bio_list deferred_flush_bios; + struct bio_list deferred_flush_completions; struct list_head prepared_mappings; struct list_head prepared_discards; struct list_head prepared_discards_pt2; @@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) mempool_free(m, &m->tc->pool->mapping_pool); } +static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) +{ + struct pool *pool = tc->pool; + unsigned long flags; + + /* + * If the bio has the REQ_FUA flag set we must commit the metadata + * before signaling its completion. + */ + if (!bio_triggers_commit(tc, bio)) { + bio_endio(bio); + return; + } + + /* + * Complete bio with an error if earlier I/O caused changes to the + * metadata that can't be committed, e.g, due to I/O errors on the + * metadata device. + */ + if (dm_thin_aborted_changes(tc->td)) { + bio_io_error(bio); + return; + } + + /* + * Batch together any bios that trigger commits and then issue a + * single commit for them in process_deferred_bios(). + */ + spin_lock_irqsave(&pool->lock, flags); + bio_list_add(&pool->deferred_flush_completions, bio); + spin_unlock_irqrestore(&pool->lock, flags); +} + static void process_prepared_mapping(struct dm_thin_new_mapping *m) { struct thin_c *tc = m->tc; @@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) */ if (bio) { inc_remap_and_issue_cell(tc, m->cell, m->data_block); - bio_endio(bio); + complete_overwrite_bio(tc, bio); } else { inc_all_io_entry(tc->pool, m->cell->holder); remap_and_issue(tc, m->cell->holder, m->data_block); @@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool) { unsigned long flags; struct bio *bio; - struct bio_list bios; + struct bio_list bios, bio_completions; struct thin_c *tc; tc = get_first_thin(pool); @@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool) } /* - * If there are any deferred flush bios, we must commit - * the metadata before issuing them. + * If there are any deferred flush bios, we must commit the metadata + * before issuing them or signaling their completion. */ bio_list_init(&bios); + bio_list_init(&bio_completions); + spin_lock_irqsave(&pool->lock, flags); bio_list_merge(&bios, &pool->deferred_flush_bios); bio_list_init(&pool->deferred_flush_bios); + + bio_list_merge(&bio_completions, &pool->deferred_flush_completions); + bio_list_init(&pool->deferred_flush_completions); spin_unlock_irqrestore(&pool->lock, flags); - if (bio_list_empty(&bios) && + if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) return; if (commit(pool)) { + bio_list_merge(&bios, &bio_completions); + while ((bio = bio_list_pop(&bios))) bio_io_error(bio); return; } pool->last_commit_jiffies = jiffies; + while ((bio = bio_list_pop(&bio_completions))) + bio_endio(bio); + while ((bio = bio_list_pop(&bios))) generic_make_request(bio); } @@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); spin_lock_init(&pool->lock); bio_list_init(&pool->deferred_flush_bios); + bio_list_init(&pool->deferred_flush_completions); INIT_LIST_HEAD(&pool->prepared_mappings); INIT_LIST_HEAD(&pool->prepared_discards); INIT_LIST_HEAD(&pool->prepared_discards_pt2); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2b53c3841b53..515e6af9bed2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io) true, duration, &io->stats_aux); /* nudge anyone waiting on suspend queue */ - if (unlikely(waitqueue_active(&md->wait))) + if (unlikely(wq_has_sleeper(&md->wait))) wake_up(&md->wait); } @@ -1336,7 +1336,11 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, return r; } - bio_trim(clone, sector - clone->bi_iter.bi_sector, len); + bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); + clone->bi_iter.bi_size = to_bytes(len); + + if (bio_integrity(bio)) + bio_integrity_trim(clone); return 0; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1d54109071cc..fa47249fa3e4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio) reschedule_retry(r1_bio); } +static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) +{ + sector_t sync_blocks = 0; + sector_t s = r1_bio->sector; + long sectors_to_go = r1_bio->sectors; + + /* make sure these bits don't get cleared. */ + do { + md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); + s += sync_blocks; + sectors_to_go -= sync_blocks; + } while (sectors_to_go > 0); +} + static void end_sync_write(struct bio *bio) { int uptodate = !bio->bi_status; @@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio) struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; if (!uptodate) { - sector_t sync_blocks = 0; - sector_t s = r1_bio->sector; - long sectors_to_go = r1_bio->sectors; - /* make sure these bits doesn't get cleared. */ - do { - md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); - s += sync_blocks; - sectors_to_go -= sync_blocks; - } while (sectors_to_go > 0); + abort_sync_write(mddev, r1_bio); set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, &rdev->flags)) set_bit(MD_RECOVERY_NEEDED, & @@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) (i == r1_bio->read_disk || !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) continue; - if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) + if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { + abort_sync_write(mddev, r1_bio); continue; + } bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index ec3a5ef7fee0..cbbe6b6535be 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1935,12 +1935,14 @@ out: } static struct stripe_head * -r5c_recovery_alloc_stripe(struct r5conf *conf, - sector_t stripe_sect) +r5c_recovery_alloc_stripe( + struct r5conf *conf, + sector_t stripe_sect, + int noblock) { struct stripe_head *sh; - sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); + sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0); if (!sh) return NULL; /* no more stripe available */ @@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, stripe_sect); if (!sh) { - sh = r5c_recovery_alloc_stripe(conf, stripe_sect); + sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1); /* * cannot get stripe from raid5_get_active_stripe * try replay some stripes @@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, r5c_recovery_replay_stripes( cached_stripe_list, ctx); sh = r5c_recovery_alloc_stripe( - conf, stripe_sect); + conf, stripe_sect, 1); } if (!sh) { + int new_size = conf->min_nr_stripes * 2; pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", mdname(mddev), - conf->min_nr_stripes * 2); - raid5_set_cache_size(mddev, - conf->min_nr_stripes * 2); - sh = r5c_recovery_alloc_stripe(conf, - stripe_sect); + new_size); + ret = raid5_set_cache_size(mddev, new_size); + if (conf->min_nr_stripes <= new_size / 2) { + pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n", + mdname(mddev), + ret, + new_size, + conf->min_nr_stripes, + conf->max_nr_stripes); + return -ENOMEM; + } + sh = r5c_recovery_alloc_stripe( + conf, stripe_sect, 0); } if (!sh) { pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", - mdname(mddev)); + mdname(mddev)); return -ENOMEM; } list_add_tail(&sh->lru, cached_stripe_list); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4990f0319f6c..cecea901ab8c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page) int raid5_set_cache_size(struct mddev *mddev, int size) { + int result = 0; struct r5conf *conf = mddev->private; if (size <= 16 || size > 32768) @@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size) mutex_lock(&conf->cache_size_mutex); while (size > conf->max_nr_stripes) - if (!grow_one_stripe(conf, GFP_KERNEL)) + if (!grow_one_stripe(conf, GFP_KERNEL)) { + conf->min_nr_stripes = conf->max_nr_stripes; + result = -ENOMEM; break; + } mutex_unlock(&conf->cache_size_mutex); - return 0; + return result; } EXPORT_SYMBOL(raid5_set_cache_size); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index f461460a2aeb..76f9909cf396 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1419,7 +1419,7 @@ config MFD_TPS65217 config MFD_TPS68470 bool "TI TPS68470 Power Management / LED chips" - depends on ACPI && I2C=y + depends on ACPI && PCI && I2C=y select MFD_CORE select REGMAP_I2C select I2C_DESIGNWARE_PLATFORM diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 1fc8ea0f519b..ca4c9cc218a2 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -401,8 +401,11 @@ static void mei_io_list_flush_cl(struct list_head *head, struct mei_cl_cb *cb, *next; list_for_each_entry_safe(cb, next, head, list) { - if (cl == cb->cl) + if (cl == cb->cl) { list_del_init(&cb->list); + if (cb->fop_type == MEI_FOP_READ) + mei_io_cb_free(cb); + } } } diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 23739a60517f..bb1ee9834a02 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -139,6 +139,8 @@ #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ +#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index e89497f858ae..3ab946ad3257 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -105,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 2bfa3a903bf9..744757f541be 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -47,7 +47,8 @@ * @dc: Virtio device control * @vpdev: VOP device which is the parent for this virtio device * @vr: Buffer for accessing the VRING - * @used: Buffer for used + * @used_virt: Virtual address of used ring + * @used: DMA address of used ring * @used_size: Size of the used buffer * @reset_done: Track whether VOP reset is complete * @virtio_cookie: Cookie returned upon requesting a interrupt @@ -61,6 +62,7 @@ struct _vop_vdev { struct mic_device_ctrl __iomem *dc; struct vop_device *vpdev; void __iomem *vr[VOP_MAX_VRINGS]; + void *used_virt[VOP_MAX_VRINGS]; dma_addr_t used[VOP_MAX_VRINGS]; int used_size[VOP_MAX_VRINGS]; struct completion reset_done; @@ -260,12 +262,12 @@ static bool vop_notify(struct virtqueue *vq) static void vop_del_vq(struct virtqueue *vq, int n) { struct _vop_vdev *vdev = to_vopvdev(vq->vdev); - struct vring *vr = (struct vring *)(vq + 1); struct vop_device *vpdev = vdev->vpdev; dma_unmap_single(&vpdev->dev, vdev->used[n], vdev->used_size[n], DMA_BIDIRECTIONAL); - free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); + free_pages((unsigned long)vdev->used_virt[n], + get_order(vdev->used_size[n])); vring_del_virtqueue(vq); vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); vdev->vr[n] = NULL; @@ -283,6 +285,26 @@ static void vop_del_vqs(struct virtio_device *dev) vop_del_vq(vq, idx++); } +static struct virtqueue *vop_new_virtqueue(unsigned int index, + unsigned int num, + struct virtio_device *vdev, + bool context, + void *pages, + bool (*notify)(struct virtqueue *vq), + void (*callback)(struct virtqueue *vq), + const char *name, + void *used) +{ + bool weak_barriers = false; + struct vring vring; + + vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN); + vring.used = used; + + return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, + notify, callback, name); +} + /* * This routine will assign vring's allocated in host/io memory. Code in * virtio_ring.c however continues to access this io memory as if it were local @@ -302,7 +324,6 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, struct _mic_vring_info __iomem *info; void *used; int vr_size, _vr_size, err, magic; - struct vring *vr; u8 type = ioread8(&vdev->desc->type); if (index >= ioread8(&vdev->desc->num_vq)) @@ -322,17 +343,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, return ERR_PTR(-ENOMEM); vdev->vr[index] = va; memset_io(va, 0x0, _vr_size); - vq = vring_new_virtqueue( - index, - le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, - dev, - false, - ctx, - (void __force *)va, vop_notify, callback, name); - if (!vq) { - err = -ENOMEM; - goto unmap; - } + info = va + _vr_size; magic = ioread32(&info->magic); @@ -341,18 +352,27 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, goto unmap; } - /* Allocate and reassign used ring now */ vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * le16_to_cpu(config.num)); used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(vdev->used_size[index])); + vdev->used_virt[index] = used; if (!used) { err = -ENOMEM; dev_err(_vop_dev(vdev), "%s %d err %d\n", __func__, __LINE__, err); - goto del_vq; + goto unmap; + } + + vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx, + (void __force *)va, vop_notify, callback, + name, used); + if (!vq) { + err = -ENOMEM; + goto free_used; } + vdev->used[index] = dma_map_single(&vpdev->dev, used, vdev->used_size[index], DMA_BIDIRECTIONAL); @@ -360,26 +380,17 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, err = -ENOMEM; dev_err(_vop_dev(vdev), "%s %d err %d\n", __func__, __LINE__, err); - goto free_used; + goto del_vq; } writeq(vdev->used[index], &vqconfig->used_address); - /* - * To reassign the used ring here we are directly accessing - * struct vring_virtqueue which is a private data structure - * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in - * vring_new_virtqueue() would ensure that - * (&vq->vring == (struct vring *) (&vq->vq + 1)); - */ - vr = (struct vring *)(vq + 1); - vr->used = used; vq->priv = vdev; return vq; +del_vq: + vring_del_virtqueue(vq); free_used: free_pages((unsigned long)used, get_order(vdev->used_size[index])); -del_vq: - vring_del_virtqueue(vq); unmap: vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); return ERR_PTR(err); @@ -581,6 +592,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, int ret = -1; if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { + struct device *dev = get_device(&vdev->vdev.dev); + dev_dbg(&vpdev->dev, "%s %d config_change %d type %d vdev %p\n", __func__, __LINE__, @@ -592,7 +605,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, iowrite8(-1, &dc->h2c_vdev_db); if (status & VIRTIO_CONFIG_S_DRIVER_OK) wait_for_completion(&vdev->reset_done); - put_device(&vdev->vdev.dev); + put_device(dev); iowrite8(1, &dc->guest_ack); dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", __func__, __LINE__, ioread8(&dc->guest_ack)); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index aef1185f383d..14f3fdb8c6bb 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2112,7 +2112,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq) if (waiting) wake_up(&mq->wait); else - kblockd_schedule_work(&mq->complete_work); + queue_work(mq->card->complete_wq, &mq->complete_work); return; } @@ -2924,6 +2924,13 @@ static int mmc_blk_probe(struct mmc_card *card) mmc_fixup_device(card, mmc_blk_fixups); + card->complete_wq = alloc_workqueue("mmc_complete", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (unlikely(!card->complete_wq)) { + pr_err("Failed to create mmc completion workqueue"); + return -ENOMEM; + } + md = mmc_blk_alloc(card); if (IS_ERR(md)) return PTR_ERR(md); @@ -2987,6 +2994,7 @@ static void mmc_blk_remove(struct mmc_card *card) pm_runtime_put_noidle(&card->dev); mmc_blk_remove_req(md); dev_set_drvdata(&card->dev, NULL); + destroy_workqueue(card->complete_wq); } static int _mmc_blk_suspend(struct mmc_card *card) diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 50293529d6de..c9e7aa50bb0a 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1431,6 +1431,8 @@ static int bcm2835_probe(struct platform_device *pdev) err: dev_dbg(dev, "%s -> err %d\n", __func__, ret); + if (host->dma_chan_rxtx) + dma_release_channel(host->dma_chan_rxtx); mmc_free_host(mmc); return ret; diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index f19ec60bcbdc..2eba507790e4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1338,7 +1338,8 @@ static int meson_mmc_probe(struct platform_device *pdev) host->regs + SD_EMMC_IRQ_EN); ret = request_threaded_irq(host->irq, meson_mmc_irq, - meson_mmc_irq_thread, IRQF_SHARED, NULL, host); + meson_mmc_irq_thread, IRQF_SHARED, + dev_name(&pdev->dev), host); if (ret) goto err_init_clk; diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 8afeaf81ae66..833ef0590af8 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) if (timing == MMC_TIMING_MMC_HS400 && host->dev_comp->hs400_tune) - sdr_set_field(host->base + PAD_CMD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, host->hs400_cmd_int_delay); dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock, diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 279e326e397e..70fadc976795 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1399,13 +1399,37 @@ static int sunxi_mmc_probe(struct platform_device *pdev) mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; - if (host->cfg->clk_delays || host->use_new_timings) + /* + * Some H5 devices do not have signal traces precise enough to + * use HS DDR mode for their eMMC chips. + * + * We still enable HS DDR modes for all the other controller + * variants that support them. + */ + if ((host->cfg->clk_delays || host->use_new_timings) && + !of_device_is_compatible(pdev->dev.of_node, + "allwinner,sun50i-h5-emmc")) mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; ret = mmc_of_parse(mmc); if (ret) goto error_free_dma; + /* + * If we don't support delay chains in the SoC, we can't use any + * of the higher speed modes. Mask them out in case the device + * tree specifies the properties for them, which gets added to + * the caps by mmc_of_parse() above. + */ + if (!(host->cfg->clk_delays || host->use_new_timings)) { + mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | + MMC_CAP_1_2V_DDR | MMC_CAP_UHS); + mmc->caps2 &= ~MMC_CAP2_HS200; + } + + /* TODO: This driver doesn't support HS400 mode yet */ + mmc->caps2 &= ~MMC_CAP2_HS400; + ret = sunxi_mmc_init_host(host); if (ret) goto error_free_dma; diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 60104e1079c5..37f174ccbcec 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -480,6 +480,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, /* let's register it anyway to preserve ordering */ slave->offset = 0; slave->mtd.size = 0; + + /* Initialize ->erasesize to make add_mtd_device() happy. */ + slave->mtd.erasesize = parent->erasesize; + printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", part->name); goto out_register; @@ -632,7 +636,6 @@ err_remove_part: mutex_unlock(&mtd_partitions_mutex); free_partition(new); - pr_info("%s:%i\n", __func__, __LINE__); return ret; } diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c index bd4cfac6b5aa..a4768df5083f 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c @@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this) /* * Reset BCH here, too. We got failures otherwise :( - * See later BCH reset for explanation of MX23 handling + * See later BCH reset for explanation of MX23 and MX28 handling */ - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); + ret = gpmi_reset_block(r->bch_regs, + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); if (ret) goto err_out; @@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this) /* * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. - * On the other hand, the MX28 needs the reset, because one case has been - * seen where the BCH produced ECC errors constantly after 10000 - * consecutive reboots. The latter case has not been seen on the MX23 - * yet, still we don't know if it could happen there as well. + * and MX28. */ - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); + ret = gpmi_reset_block(r->bch_regs, + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); if (ret) goto err_out; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index cca4b24d2ffa..839494ac457c 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -410,6 +410,7 @@ static int nand_check_wp(struct nand_chip *chip) /** * nand_fill_oob - [INTERN] Transfer client buffer to oob + * @chip: NAND chip object * @oob: oob data buffer * @len: oob data write length * @ops: oob ops structure diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c index 1b722fe9213c..19a2b563acdf 100644 --- a/drivers/mtd/nand/raw/nand_bbt.c +++ b/drivers/mtd/nand/raw/nand_bbt.c @@ -158,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td) /** * read_bbt - [GENERIC] Read the bad block table starting from page - * @chip: NAND chip object + * @this: NAND chip object * @buf: temporary buffer * @page: the starting page * @num: the number of bbt descriptors to read diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 479c2f2cf17f..fa87ae28cdfe 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, struct nand_device *nand = spinand_to_nand(spinand); struct mtd_info *mtd = nanddev_to_mtd(nand); struct nand_page_io_req adjreq = *req; - unsigned int nbytes = 0; - void *buf = NULL; + void *buf = spinand->databuf; + unsigned int nbytes; u16 column = 0; int ret; - memset(spinand->databuf, 0xff, - nanddev_page_size(nand) + - nanddev_per_page_oobsize(nand)); + /* + * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset + * the cache content to 0xFF (depends on vendor implementation), so we + * must fill the page cache entirely even if we only want to program + * the data portion of the page, otherwise we might corrupt the BBM or + * user data previously programmed in OOB area. + */ + nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); + memset(spinand->databuf, 0xff, nbytes); + adjreq.dataoffs = 0; + adjreq.datalen = nanddev_page_size(nand); + adjreq.databuf.out = spinand->databuf; + adjreq.ooblen = nanddev_per_page_oobsize(nand); + adjreq.ooboffs = 0; + adjreq.oobbuf.out = spinand->oobbuf; - if (req->datalen) { + if (req->datalen) memcpy(spinand->databuf + req->dataoffs, req->databuf.out, req->datalen); - adjreq.dataoffs = 0; - adjreq.datalen = nanddev_page_size(nand); - adjreq.databuf.out = spinand->databuf; - nbytes = adjreq.datalen; - buf = spinand->databuf; - } if (req->ooblen) { if (req->mode == MTD_OPS_AUTO_OOB) @@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, else memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, req->ooblen); - - adjreq.ooblen = nanddev_per_page_oobsize(nand); - adjreq.ooboffs = 0; - nbytes += nanddev_per_page_oobsize(nand); - if (!buf) { - buf = spinand->oobbuf; - column = nanddev_page_size(nand); - } } spinand_cache_op_adjust_colum(spinand, &adjreq, &column); @@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, /* * We need to use the RANDOM LOAD CACHE operation if there's - * more than one iteration, because the LOAD operation resets - * the cache to 0xff. + * more than one iteration, because the LOAD operation might + * reset the cache to 0xff. */ if (nbytes) { column = op.addr.val; @@ -1018,11 +1016,11 @@ static int spinand_init(struct spinand_device *spinand) for (i = 0; i < nand->memorg.ntargets; i++) { ret = spinand_select_target(spinand, i); if (ret) - goto err_free_bufs; + goto err_manuf_cleanup; ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); if (ret) - goto err_free_bufs; + goto err_manuf_cleanup; } ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index edb1c023a753..6210757772ed 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -145,13 +145,16 @@ config MACVTAP To compile this driver as a module, choose M here: the module will be called macvtap. +config IPVLAN_L3S + depends on NETFILTER + depends on IPVLAN + def_bool y + select NET_L3_MASTER_DEV config IPVLAN tristate "IP-VLAN support" depends on INET depends on IPV6 || !IPV6 - depends on NETFILTER - select NET_L3_MASTER_DEV ---help--- This allows one to create virtual devices off of a main interface and packets will be delivered based on the dest L3 (IPv6/IPv4 addr) @@ -197,9 +200,9 @@ config VXLAN config GENEVE tristate "Generic Network Virtualization Encapsulation" - depends on INET && NET_UDP_TUNNEL + depends on INET depends on IPV6 || !IPV6 - select NET_IP_TUNNEL + select NET_UDP_TUNNEL select GRO_CELLS ---help--- This allows one to create geneve virtual interfaces that provide diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index f90bb723985f..b3c63d2f16aa 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -301,7 +301,7 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) dev->irq = cops_irq(ioaddr, board); if (dev->irq) break; - /* No IRQ found on this port, fallthrough */ + /* fall through - Once no IRQ found on this port. */ case 1: retval = -EINVAL; goto err_out; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 4d5d01cb8141..da1fc17295d9 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1375,6 +1375,7 @@ static int bond_option_slaves_set(struct bonding *bond, sscanf(newval->string, "%16s", command); /* IFNAMSIZ*/ ifname = command + 1; if ((strlen(command) <= 1) || + (command[0] != '+' && command[0] != '-') || !dev_valid_name(ifname)) goto err_no_cmd; @@ -1398,6 +1399,7 @@ static int bond_option_slaves_set(struct bonding *bond, break; default: + /* should not run here. */ goto err_no_cmd; } diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index d28a1398c091..7608bc3e00df 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -73,35 +73,37 @@ MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment."); #define LOW_WATER_MARK 100 #define HIGH_WATER_MARK (LOW_WATER_MARK*5) -#ifdef CONFIG_UML +#ifndef CONFIG_HAS_DMA /* * We sometimes use UML for debugging, but it cannot handle * dma_alloc_coherent so we have to wrap it. */ -static inline void *dma_alloc(dma_addr_t *daddr) +static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr) { return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL); } -static inline void dma_free(void *cpu_addr, dma_addr_t handle) +static inline void dma_free(struct cfspi *cfspi, void *cpu_addr, + dma_addr_t handle) { kfree(cpu_addr); } #else -static inline void *dma_alloc(dma_addr_t *daddr) +static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr) { - return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr, + return dma_alloc_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, daddr, GFP_KERNEL); } -static inline void dma_free(void *cpu_addr, dma_addr_t handle) +static inline void dma_free(struct cfspi *cfspi, void *cpu_addr, + dma_addr_t handle) { - dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle); + dma_free_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, cpu_addr, handle); } -#endif /* CONFIG_UML */ +#endif /* CONFIG_HAS_DMA */ #ifdef CONFIG_DEBUG_FS @@ -610,13 +612,13 @@ static int cfspi_init(struct net_device *dev) } /* Allocate DMA buffers. */ - cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]); + cfspi->xfer.va_tx[0] = dma_alloc(cfspi, &cfspi->xfer.pa_tx[0]); if (!cfspi->xfer.va_tx[0]) { res = -ENODEV; goto err_dma_alloc_tx_0; } - cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx); + cfspi->xfer.va_rx = dma_alloc(cfspi, &cfspi->xfer.pa_rx); if (!cfspi->xfer.va_rx) { res = -ENODEV; @@ -665,9 +667,9 @@ static int cfspi_init(struct net_device *dev) return 0; err_create_wq: - dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); + dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx); err_dma_alloc_rx: - dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); + dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); err_dma_alloc_tx_0: return res; } @@ -683,8 +685,8 @@ static void cfspi_uninit(struct net_device *dev) cfspi->ndev = NULL; /* Free DMA buffers. */ - dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); - dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); + dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx); + dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); set_bit(SPI_TERMINATE, &cfspi->state); wake_up_interruptible(&cfspi->wait); destroy_workqueue(cfspi->wq); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0e4bbdcc614f..c76892ac4e69 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); } -static void b53_enable_vlan(struct b53_device *dev, bool enable) +static void b53_enable_vlan(struct b53_device *dev, bool enable, + bool enable_filtering) { u8 mgmt, vc0, vc1, vc4 = 0, vc5; @@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; vc4 &= ~VC4_ING_VID_CHECK_MASK; - vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; - vc5 |= VC5_DROP_VTABLE_MISS; + if (enable_filtering) { + vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; + vc5 |= VC5_DROP_VTABLE_MISS; + } else { + vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; + vc5 &= ~VC5_DROP_VTABLE_MISS; + } if (is5325(dev)) vc0 &= ~VC0_RESERVED_1; @@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) } b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); + + dev->vlan_enabled = enable; + dev->vlan_filtering_enabled = enable_filtering; } static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) @@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev) b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); } +static u16 b53_default_pvid(struct b53_device *dev) +{ + if (is5325(dev) || is5365(dev)) + return 1; + else + return 0; +} + int b53_configure_vlan(struct dsa_switch *ds) { struct b53_device *dev = ds->priv; struct b53_vlan vl = { 0 }; - int i; + int i, def_vid; + + def_vid = b53_default_pvid(dev); /* clear all vlan entries */ if (is5325(dev) || is5365(dev)) { - for (i = 1; i < dev->num_vlans; i++) + for (i = def_vid; i < dev->num_vlans; i++) b53_set_vlan_entry(dev, i, &vl); } else { b53_do_vlan_op(dev, VTA_CMD_CLEAR); } - b53_enable_vlan(dev, false); + b53_enable_vlan(dev, false, dev->vlan_filtering_enabled); b53_for_each_port(dev, i) b53_write16(dev, B53_VLAN_PAGE, - B53_VLAN_PORT_DEF_TAG(i), 1); + B53_VLAN_PORT_DEF_TAG(i), def_vid); if (!is5325(dev) && !is5365(dev)) b53_set_jumbo(dev, dev->enable_jumbo, false); @@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { + struct b53_device *dev = ds->priv; + struct net_device *bridge_dev; + unsigned int i; + u16 pvid, new_pvid; + + /* Handle the case were multiple bridges span the same switch device + * and one of them has a different setting than what is being requested + * which would be breaking filtering semantics for any of the other + * bridge devices. + */ + b53_for_each_port(dev, i) { + bridge_dev = dsa_to_port(ds, i)->bridge_dev; + if (bridge_dev && + bridge_dev != dsa_to_port(ds, port)->bridge_dev && + br_vlan_enabled(bridge_dev) != vlan_filtering) { + netdev_err(bridge_dev, + "VLAN filtering is global to the switch!\n"); + return -EINVAL; + } + } + + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); + new_pvid = pvid; + if (dev->vlan_filtering_enabled && !vlan_filtering) { + /* Filtering is currently enabled, use the default PVID since + * the bridge does not expect tagging anymore + */ + dev->ports[port].pvid = pvid; + new_pvid = b53_default_pvid(dev); + } else if (!dev->vlan_filtering_enabled && vlan_filtering) { + /* Filtering is currently disabled, restore the previous PVID */ + new_pvid = dev->ports[port].pvid; + } + + if (pvid != new_pvid) + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), + new_pvid); + + b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering); + return 0; } EXPORT_SYMBOL(b53_vlan_filtering); @@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, if (vlan->vid_end > dev->num_vlans) return -ERANGE; - b53_enable_vlan(dev, true); + b53_enable_vlan(dev, true, dev->vlan_filtering_enabled); return 0; } @@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port, b53_fast_age_vlan(dev, vid); } - if (pvid) { + if (pvid && !dsa_is_cpu_port(ds, port)) { b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), vlan->vid_end); b53_fast_age_vlan(dev, vid); @@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port, vl->members &= ~BIT(port); - if (pvid == vid) { - if (is5325(dev) || is5365(dev)) - pvid = 1; - else - pvid = 0; - } + if (pvid == vid) + pvid = b53_default_pvid(dev); if (untagged && !dsa_is_cpu_port(ds, port)) vl->untag &= ~(BIT(port)); @@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); dev->ports[port].vlan_ctl_mask = pvlan; - if (is5325(dev) || is5365(dev)) - pvid = 1; - else - pvid = 0; + pvid = b53_default_pvid(dev); /* Make this port join all VLANs without VLAN entries */ if (is58xx(dev)) { diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index ec796482792d..4dc7ee38b258 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -91,6 +91,7 @@ enum { struct b53_port { u16 vlan_ctl_mask; struct ethtool_eee eee; + u16 pvid; }; struct b53_vlan { @@ -137,6 +138,8 @@ struct b53_device { unsigned int num_vlans; struct b53_vlan *vlans; + bool vlan_enabled; + bool vlan_filtering_enabled; unsigned int num_ports; struct b53_port *ports; }; diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 90f514252987..d9c56a779c08 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev) /* Clear all pending interrupts */ writel(0xffffffff, priv->regs + B53_SRAB_INTR); - if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) - return; - for (i = 0; i < B53_N_PORTS; i++) { port = &priv->port_intrs[i]; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 361fbde76654..f91b8e77d543 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -690,7 +690,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) * port, the other ones have already been disabled during * bcm_sf2_sw_setup */ - for (port = 0; port < DSA_MAX_PORTS; port++) { + for (port = 0; port < ds->num_ports; port++) { if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) bcm_sf2_port_disable(ds, port, NULL); } @@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, { struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_wolinfo pwol; + struct ethtool_wolinfo pwol = { }; /* Get the parent device WoL settings */ - p->ethtool_ops->get_wol(p, &pwol); + if (p->ethtool_ops->get_wol) + p->ethtool_ops->get_wol(p, &pwol); /* Advertise the parent device supported settings */ wol->supported = pwol.supported; @@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->ports[port].cpu_dp->index; - struct ethtool_wolinfo pwol; + struct ethtool_wolinfo pwol = { }; - p->ethtool_ops->get_wol(p, &pwol); + if (p->ethtool_ops->get_wol) + p->ethtool_ops->get_wol(p, &pwol); if (wol->wolopts & ~pwol.supported) return -EINVAL; @@ -894,12 +896,44 @@ static const struct b53_io_ops bcm_sf2_io_ops = { .write64 = bcm_sf2_core_write64, }; +static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data) +{ + int cnt = b53_get_sset_count(ds, port, stringset); + + b53_get_strings(ds, port, stringset, data); + bcm_sf2_cfp_get_strings(ds, port, stringset, + data + cnt * ETH_GSTRING_LEN); +} + +static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + int cnt = b53_get_sset_count(ds, port, ETH_SS_STATS); + + b53_get_ethtool_stats(ds, port, data); + bcm_sf2_cfp_get_ethtool_stats(ds, port, data + cnt); +} + +static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port, + int sset) +{ + int cnt = b53_get_sset_count(ds, port, sset); + + if (cnt < 0) + return cnt; + + cnt += bcm_sf2_cfp_get_sset_count(ds, port, sset); + + return cnt; +} + static const struct dsa_switch_ops bcm_sf2_ops = { .get_tag_protocol = b53_get_tag_protocol, .setup = bcm_sf2_sw_setup, - .get_strings = b53_get_strings, - .get_ethtool_stats = b53_get_ethtool_stats, - .get_sset_count = b53_get_sset_count, + .get_strings = bcm_sf2_sw_get_strings, + .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, + .get_sset_count = bcm_sf2_sw_get_sset_count, .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, .get_phy_flags = bcm_sf2_sw_get_phy_flags, .phylink_validate = bcm_sf2_sw_validate, @@ -1062,7 +1096,6 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); spin_lock_init(&priv->indir_lock); - mutex_init(&priv->stats_mutex); mutex_init(&priv->cfp.lock); INIT_LIST_HEAD(&priv->cfp.rules_list); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index faaef320ec48..eb3655bea467 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -87,9 +87,6 @@ struct bcm_sf2_priv { /* Backing b53_device */ struct b53_device *dev; - /* Mutex protecting access to the MIB counters */ - struct mutex stats_mutex; - struct bcm_sf2_hw_params hw_params; struct bcm_sf2_port_status port_sts[DSA_MAX_PORTS]; @@ -216,5 +213,10 @@ int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv); void bcm_sf2_cfp_exit(struct dsa_switch *ds); int bcm_sf2_cfp_resume(struct dsa_switch *ds); +void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data); +void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data); +int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset); #endif /* __BCM_SF2_H */ diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index e14663ab6dbc..e6234d209787 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -16,6 +16,7 @@ #include <linux/netdevice.h> #include <net/dsa.h> #include <linux/bitmap.h> +#include <net/flow_offload.h> #include "bcm_sf2.h" #include "bcm_sf2_regs.h" @@ -212,6 +213,7 @@ static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, unsigned int rule_index, + int src_port, unsigned int port_num, unsigned int queue_num, bool fwd_map_change) @@ -229,6 +231,10 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, else reg = 0; + /* Enable looping back to the original port */ + if (src_port == port_num) + reg |= LOOP_BK_EN; + core_writel(priv, reg, CORE_ACT_POL_DATA0); /* Set classification ID that needs to be put in Broadcom tag */ @@ -257,7 +263,8 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, } static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, - struct ethtool_tcpip4_spec *v4_spec, + struct flow_dissector_key_ipv4_addrs *addrs, + struct flow_dissector_key_ports *ports, unsigned int slice_num, bool mask) { @@ -278,7 +285,7 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, * UDF_n_A6 [23:8] * UDF_n_A5 [7:0] */ - reg = be16_to_cpu(v4_spec->pdst) >> 8; + reg = be16_to_cpu(ports->dst) >> 8; if (mask) offset = CORE_CFP_MASK_PORT(3); else @@ -289,9 +296,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, * UDF_n_A4 [23:8] * UDF_n_A3 [7:0] */ - reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 | - (u32)be16_to_cpu(v4_spec->psrc) << 8 | - (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8; + reg = (be16_to_cpu(ports->dst) & 0xff) << 24 | + (u32)be16_to_cpu(ports->src) << 8 | + (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8; if (mask) offset = CORE_CFP_MASK_PORT(2); else @@ -302,9 +309,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, * UDF_n_A2 [23:8] * UDF_n_A1 [7:0] */ - reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 | - (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 | - (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8; + reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 | + (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 | + (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8; if (mask) offset = CORE_CFP_MASK_PORT(1); else @@ -317,8 +324,8 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, * Slice ID [3:2] * Slice valid [1:0] */ - reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 | - (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 | + reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 | + (u32)(be32_to_cpu(addrs->src) >> 16) << 8 | SLICE_NUM(slice_num) | SLICE_VALID; if (mask) offset = CORE_CFP_MASK_PORT(0); @@ -332,9 +339,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, unsigned int queue_num, struct ethtool_rx_flow_spec *fs) { - struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec; + struct ethtool_rx_flow_spec_input input = {}; const struct cfp_udf_layout *layout; unsigned int slice_num, rule_index; + struct ethtool_rx_flow_rule *flow; + struct flow_match_ipv4_addrs ipv4; + struct flow_match_ports ports; + struct flow_match_ip ip; u8 ip_proto, ip_frag; u8 num_udf; u32 reg; @@ -343,13 +354,9 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, switch (fs->flow_type & ~FLOW_EXT) { case TCP_V4_FLOW: ip_proto = IPPROTO_TCP; - v4_spec = &fs->h_u.tcp_ip4_spec; - v4_m_spec = &fs->m_u.tcp_ip4_spec; break; case UDP_V4_FLOW: ip_proto = IPPROTO_UDP; - v4_spec = &fs->h_u.udp_ip4_spec; - v4_m_spec = &fs->m_u.udp_ip4_spec; break; default: return -EINVAL; @@ -367,11 +374,22 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, if (rule_index > bcm_sf2_cfp_rule_size(priv)) return -ENOSPC; + input.fs = fs; + flow = ethtool_rx_flow_rule_create(&input); + if (IS_ERR(flow)) + return PTR_ERR(flow); + + flow_rule_match_ipv4_addrs(flow->rule, &ipv4); + flow_rule_match_ports(flow->rule, &ports); + flow_rule_match_ip(flow->rule, &ip); + layout = &udf_tcpip4_layout; /* We only use one UDF slice for now */ slice_num = bcm_sf2_get_slice_number(layout, 0); - if (slice_num == UDF_NUM_SLICES) - return -EINVAL; + if (slice_num == UDF_NUM_SLICES) { + ret = -EINVAL; + goto out_err_flow_rule; + } num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); @@ -398,7 +416,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, * Reserved [1] * UDF_Valid[8] [0] */ - core_writel(priv, v4_spec->tos << IPTOS_SHIFT | + core_writel(priv, ip.key->tos << IPTOS_SHIFT | ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf), CORE_CFP_DATA_PORT(6)); @@ -417,8 +435,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); /* Program the match and the mask */ - bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false); - bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true); + bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false); + bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true); /* Insert into TCAM now */ bcm_sf2_cfp_rule_addr_set(priv, rule_index); @@ -426,14 +444,14 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); if (ret) { pr_err("TCAM entry at addr %d failed\n", rule_index); - return ret; + goto out_err_flow_rule; } /* Insert into Action and policer RAMs now */ - ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num, + ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num, queue_num, true); if (ret) - return ret; + goto out_err_flow_rule; /* Turn on CFP for this rule now */ reg = core_readl(priv, CORE_CFP_CTL_REG); @@ -446,6 +464,10 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, fs->location = rule_index; return 0; + +out_err_flow_rule: + ethtool_rx_flow_rule_destroy(flow); + return ret; } static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, @@ -581,9 +603,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, unsigned int queue_num, struct ethtool_rx_flow_spec *fs) { - struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec; + struct ethtool_rx_flow_spec_input input = {}; unsigned int slice_num, rule_index[2]; const struct cfp_udf_layout *layout; + struct ethtool_rx_flow_rule *flow; + struct flow_match_ipv6_addrs ipv6; + struct flow_match_ports ports; u8 ip_proto, ip_frag; int ret = 0; u8 num_udf; @@ -592,13 +617,9 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, switch (fs->flow_type & ~FLOW_EXT) { case TCP_V6_FLOW: ip_proto = IPPROTO_TCP; - v6_spec = &fs->h_u.tcp_ip6_spec; - v6_m_spec = &fs->m_u.tcp_ip6_spec; break; case UDP_V6_FLOW: ip_proto = IPPROTO_UDP; - v6_spec = &fs->h_u.udp_ip6_spec; - v6_m_spec = &fs->m_u.udp_ip6_spec; break; default: return -EINVAL; @@ -645,6 +666,15 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, goto out_err; } + input.fs = fs; + flow = ethtool_rx_flow_rule_create(&input); + if (IS_ERR(flow)) { + ret = PTR_ERR(flow); + goto out_err; + } + flow_rule_match_ipv6_addrs(flow->rule, &ipv6); + flow_rule_match_ports(flow->rule, &ports); + /* Apply the UDF layout for this filter */ bcm_sf2_cfp_udf_set(priv, layout, slice_num); @@ -688,10 +718,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); /* Slice the IPv6 source address and port */ - bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc, - slice_num, false); - bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc, - SLICE_NUM_MASK, true); + bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32, + ports.key->src, slice_num, false); + bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32, + ports.mask->src, SLICE_NUM_MASK, true); /* Insert into TCAM now because we need to insert a second rule */ bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]); @@ -699,20 +729,20 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); if (ret) { pr_err("TCAM entry at addr %d failed\n", rule_index[0]); - goto out_err; + goto out_err_flow_rule; } /* Insert into Action and policer RAMs now */ - ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num, + ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num, queue_num, false); if (ret) - goto out_err; + goto out_err_flow_rule; /* Now deal with the second slice to chain this rule */ slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1); if (slice_num == UDF_NUM_SLICES) { ret = -EINVAL; - goto out_err; + goto out_err_flow_rule; } num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); @@ -748,10 +778,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, /* Mask all */ core_writel(priv, 0, CORE_CFP_MASK_PORT(5)); - bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num, - false); - bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst, - SLICE_NUM_MASK, true); + bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32, + ports.key->dst, slice_num, false); + bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32, + ports.key->dst, SLICE_NUM_MASK, true); /* Insert into TCAM now */ bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]); @@ -759,16 +789,16 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); if (ret) { pr_err("TCAM entry at addr %d failed\n", rule_index[1]); - goto out_err; + goto out_err_flow_rule; } /* Insert into Action and policer RAMs now, set chain ID to * the one we are chained to */ - ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num, + ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num, queue_num, true); if (ret) - goto out_err; + goto out_err_flow_rule; /* Turn on CFP for this rule now */ reg = core_readl(priv, CORE_CFP_CTL_REG); @@ -784,6 +814,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, return ret; +out_err_flow_rule: + ethtool_rx_flow_rule_destroy(flow); out_err: clear_bit(rule_index[1], priv->cfp.used); return ret; @@ -1169,3 +1201,91 @@ int bcm_sf2_cfp_resume(struct dsa_switch *ds) return ret; } + +static const struct bcm_sf2_cfp_stat { + unsigned int offset; + unsigned int ram_loc; + const char *name; +} bcm_sf2_cfp_stats[] = { + { + .offset = CORE_STAT_GREEN_CNTR, + .ram_loc = GREEN_STAT_RAM, + .name = "Green" + }, + { + .offset = CORE_STAT_YELLOW_CNTR, + .ram_loc = YELLOW_STAT_RAM, + .name = "Yellow" + }, + { + .offset = CORE_STAT_RED_CNTR, + .ram_loc = RED_STAT_RAM, + .name = "Red" + }, +}; + +void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats); + char buf[ETH_GSTRING_LEN]; + unsigned int i, j, iter; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 1; i < priv->num_cfp_rules; i++) { + for (j = 0; j < s; j++) { + snprintf(buf, sizeof(buf), + "CFP%03d_%sCntr", + i, bcm_sf2_cfp_stats[j].name); + iter = (i - 1) * s + j; + strlcpy(data + iter * ETH_GSTRING_LEN, + buf, ETH_GSTRING_LEN); + } + } +} + +void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats); + const struct bcm_sf2_cfp_stat *stat; + unsigned int i, j, iter; + struct cfp_rule *rule; + int ret; + + mutex_lock(&priv->cfp.lock); + for (i = 1; i < priv->num_cfp_rules; i++) { + rule = bcm_sf2_cfp_rule_find(priv, port, i); + if (!rule) + continue; + + for (j = 0; j < s; j++) { + stat = &bcm_sf2_cfp_stats[j]; + + bcm_sf2_cfp_rule_addr_set(priv, i); + ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ); + if (ret) + continue; + + iter = (i - 1) * s + j; + data[iter] = core_readl(priv, stat->offset); + } + + } + mutex_unlock(&priv->cfp.lock); +} + +int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + + if (sset != ETH_SS_STATS) + return 0; + + /* 3 counters per CFP rules */ + return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats); +} diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 0a1e530d52b7..67f056206f37 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -400,6 +400,10 @@ enum bcm_sf2_reg_offs { #define CORE_RATE_METER6 0x281e0 #define CIR_REF_CNT_MASK 0x7ffff +#define CORE_STAT_GREEN_CNTR 0x28200 +#define CORE_STAT_YELLOW_CNTR 0x28210 +#define CORE_STAT_RED_CNTR 0x28220 + #define CORE_CFP_CTL_REG 0x28400 #define CFP_EN_MAP_MASK 0x1ff diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 89ed059bb576..674d77e1b029 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -397,6 +397,7 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, struct ksz_port *p = &dev->ports[port]; u8 data; int member = -1; + int forward = dev->member; ksz_pread8(dev, port, P_STP_CTRL, &data); data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); @@ -464,10 +465,10 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, } /* When topology has changed the function ksz_update_port_member - * should be called to modify port forwarding behavior. However - * as the offload_fwd_mark indication cannot be reported here - * the switch forwarding function is not enabled. + * should be called to modify port forwarding behavior. */ + if (forward != dev->member) + ksz_update_port_member(dev, port); } static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8dca2c949e73..cc7ce06b6d58 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) unsigned int sub_irq; unsigned int n; u16 reg; + u16 ctl1; int err; mutex_lock(&chip->reg_lock); @@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) if (err) goto out; - for (n = 0; n < chip->g1_irq.nirqs; ++n) { - if (reg & (1 << n)) { - sub_irq = irq_find_mapping(chip->g1_irq.domain, n); - handle_nested_irq(sub_irq); - ++nhandled; + do { + for (n = 0; n < chip->g1_irq.nirqs; ++n) { + if (reg & (1 << n)) { + sub_irq = irq_find_mapping(chip->g1_irq.domain, + n); + handle_nested_irq(sub_irq); + ++nhandled; + } } - } + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1); + if (err) + goto unlock; + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®); +unlock: + mutex_unlock(&chip->reg_lock); + if (err) + goto out; + ctl1 &= GENMASK(chip->g1_irq.nirqs, 0); + } while (reg & ctl1); + out: return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); } @@ -647,8 +663,10 @@ static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port, unsigned long *mask, struct phylink_link_state *state) { - if (port >= 9) + if (port >= 9) { phylink_set(mask, 2500baseX_Full); + phylink_set(mask, 2500baseT_Full); + } /* No ethtool bits for 200Mbps */ phylink_set(mask, 1000baseT_Full); @@ -4674,6 +4692,22 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port, return err; } +static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port, + bool unicast, bool multicast) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err = -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + if (chip->info->ops->port_set_egress_floods) + err = chip->info->ops->port_set_egress_floods(chip, port, + unicast, + multicast); + mutex_unlock(&chip->reg_lock); + + return err; +} + static const struct dsa_switch_ops mv88e6xxx_switch_ops = { #if IS_ENABLED(CONFIG_NET_DSA_LEGACY) .probe = mv88e6xxx_drv_probe, @@ -4701,6 +4735,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .set_ageing_time = mv88e6xxx_set_ageing_time, .port_bridge_join = mv88e6xxx_port_bridge_join, .port_bridge_leave = mv88e6xxx_port_bridge_leave, + .port_egress_floods = mv88e6xxx_port_egress_floods, .port_stp_state_set = mv88e6xxx_port_stp_state_set, .port_fast_age = mv88e6xxx_port_fast_age, .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, @@ -4764,6 +4799,21 @@ static const void *pdata_device_get_match_data(struct device *dev) return NULL; } +/* There is no suspend to RAM support at DSA level yet, the switch configuration + * would be lost after a power cycle so prevent it to be suspended. + */ +static int __maybe_unused mv88e6xxx_suspend(struct device *dev) +{ + return -EOPNOTSUPP; +} + +static int __maybe_unused mv88e6xxx_resume(struct device *dev) +{ + return 0; +} + +static SIMPLE_DEV_PM_OPS(mv88e6xxx_pm_ops, mv88e6xxx_suspend, mv88e6xxx_resume); + static int mv88e6xxx_probe(struct mdio_device *mdiodev) { struct dsa_mv88e6xxx_pdata *pdata = mdiodev->dev.platform_data; @@ -4948,6 +4998,7 @@ static struct mdio_driver mv88e6xxx_driver = { .mdiodrv.driver = { .name = "mv88e6085", .of_match_table = mv88e6xxx_of_match, + .pm = &mv88e6xxx_pm_ops, }, }; diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 5200e4bdce93..ea243840ee0f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) { struct mv88e6xxx_chip *chip = dev_id; struct mv88e6xxx_atu_entry entry; + int spid; int err; u16 val; @@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) if (err) goto out; + spid = entry.state; + if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { dev_err_ratelimited(chip->dev, "ATU age out violation for %pM\n", @@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU member violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_member_violation++; + "ATU member violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_member_violation++; } if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU miss violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_miss_violation++; + "ATU miss violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_miss_violation++; } if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU full violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_full_violation++; + "ATU full violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_full_violation++; } mutex_unlock(&chip->reg_lock); diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index a4b6cda38016..195a8a87b984 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -443,6 +443,18 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode) val = QCA8K_PORT_PAD_RGMII_EN; qca8k_write(priv, reg, val); break; + case PHY_INTERFACE_MODE_RGMII_ID: + /* RGMII_ID needs internal delay. This is enabled through + * PORT5_PAD_CTRL for all ports, rather than individual port + * registers + */ + qca8k_write(priv, reg, + QCA8K_PORT_PAD_RGMII_EN | + QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) | + QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY)); + qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); + break; case PHY_INTERFACE_MODE_SGMII: qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN); break; diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 613fe5c50236..d146e54c8a6c 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -40,6 +40,7 @@ ((0x8 + (x & 0x3)) << 22) #define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) \ ((0x10 + (x & 0x3)) << 20) +#define QCA8K_MAX_DELAY 3 #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) #define QCA8K_PORT_PAD_SGMII_EN BIT(7) #define QCA8K_REG_MODULE_EN 0x030 diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index b648e3f95c01..808abb6b3671 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -1177,7 +1177,7 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id) if (inl(ioaddr + DownListPtr) == isa_virt_to_bus(&lp->tx_ring[entry])) break; /* It still hasn't been processed. */ if (lp->tx_skbuff[entry]) { - dev_kfree_skb_irq(lp->tx_skbuff[entry]); + dev_consume_skb_irq(lp->tx_skbuff[entry]); lp->tx_skbuff[entry] = NULL; } dirty_tx++; @@ -1192,7 +1192,7 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id) #ifdef VORTEX_BUS_MASTER if (status & DMADone) { outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ - dev_kfree_skb_irq(lp->tx_skb); /* Release the transferred buffer */ + dev_consume_skb_irq(lp->tx_skb); /* Release the transferred buffer */ netif_wake_queue(dev); } #endif diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 40f421dbdf57..147051404194 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2307,7 +2307,7 @@ _vortex_interrupt(int irq, struct net_device *dev) dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE); pkts_compl++; bytes_compl += vp->tx_skb->len; - dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ + dev_consume_skb_irq(vp->tx_skb); /* Release the transferred buffer */ if (ioread16(ioaddr + TxFree) > 1536) { /* * AKPM: FIXME: I don't think we need this. If the queue was stopped due to @@ -2449,7 +2449,7 @@ _boomerang_interrupt(int irq, struct net_device *dev) #endif pkts_compl++; bytes_compl += skb->len; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); vp->tx_skbuff[entry] = NULL; } else { pr_debug("boomerang_interrupt: no skb!\n"); diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 097467f44b0d..816540e6beac 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -1390,7 +1390,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) } } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } np->tx_done_q[np->tx_done].status = 0; np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index a70bb1bb90e7..a6eacf2099c3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter) goto err_device_destroy; } - clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); - /* Make sure we don't have a race with AENQ Links state handler */ - if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) - netif_carrier_on(adapter->netdev); - rc = ena_enable_msix_and_set_admin_interrupts(adapter, adapter->num_queues); if (rc) { @@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter) } set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); + + clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); + if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) + netif_carrier_on(adapter->netdev); + mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); dev_err(&pdev->dev, "Device reset completed successfully, Driver info: %s\n", diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index dc8b6173d8d8..63870072cbbd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -45,7 +45,7 @@ #define DRV_MODULE_VER_MAJOR 2 #define DRV_MODULE_VER_MINOR 0 -#define DRV_MODULE_VER_SUBMINOR 2 +#define DRV_MODULE_VER_SUBMINOR 3 #define DRV_MODULE_NAME "ena" #ifndef DRV_MODULE_VERSION diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index e833d1b3fe18..e5073aeea06a 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1167,7 +1167,7 @@ static int au1000_probe(struct platform_device *pdev) /* Allocate the data buffers * Snooping works fine with eth on all au1xxx */ - aup->vaddr = (u32)dma_alloc_attrs(NULL, MAX_BUF_SIZE * + aup->vaddr = (u32)dma_alloc_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), &aup->dma_addr, 0, DMA_ATTR_NON_CONSISTENT); @@ -1349,7 +1349,7 @@ err_remap3: err_remap2: iounmap(aup->mac); err_remap1: - dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), + dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), (void *)aup->vaddr, aup->dma_addr, DMA_ATTR_NON_CONSISTENT); err_vaddr: @@ -1383,7 +1383,7 @@ static int au1000_remove(struct platform_device *pdev) if (aup->tx_db_inuse[i]) au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); - dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), + dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), (void *)aup->vaddr, aup->dma_addr, DMA_ATTR_NON_CONSISTENT); diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index b56d84c7df46..f90b454b1642 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -1084,7 +1084,7 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id) /* We must free the original skb if it's not a data-only copy in the bounce buffer. */ if (lp->tx_skbuff[entry]) { - dev_kfree_skb_irq(lp->tx_skbuff[entry]); + dev_consume_skb_irq(lp->tx_skbuff[entry]); lp->tx_skbuff[entry] = NULL; } dirty_tx++; diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index 8931ce6bab7b..87ff5d6d1b22 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -1028,7 +1028,7 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0) #ifdef XMT_VIA_SKB if(p->tmd_skb[p->tmdlast]) { - dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]); + dev_consume_skb_irq(p->tmd_skb[p->tmdlast]); p->tmd_skb[p->tmdlast] = NULL; } #endif diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 68b9ee489489..4d9819d2894d 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -764,7 +764,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) dev->stats.tx_bytes += mp->tx_bufs[i]->len; ++dev->stats.tx_packets; } - dev_kfree_skb_irq(mp->tx_bufs[i]); + dev_consume_skb_irq(mp->tx_bufs[i]); --mp->tx_active; if (++i >= N_TX_RING) i = 0; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 4406325fdd9f..ff3d68532f5f 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -148,7 +148,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); /* return the sk_buff to system */ - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); txbd->data = 0; txbd->info = 0; diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 3164aad29bcf..9dfe6a9431e6 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1259,7 +1259,7 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) } if (tx_buffer->skb) { - dev_kfree_skb_irq(tx_buffer->skb); + dev_consume_skb_irq(tx_buffer->skb); tx_buffer->skb = NULL; } diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 63edc5706c09..9e07b469066a 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2088,7 +2088,7 @@ static int atl1_intr_tx(struct atl1_adapter *adapter) } if (buffer_info->skb) { - dev_kfree_skb_irq(buffer_info->skb); + dev_consume_skb_irq(buffer_info->skb); buffer_info->skb = NULL; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f9521d0274b7..bc3ac369cbe3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev, priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); reg = rxchk_readl(priv, RXCHK_CONTROL); + /* Clear L2 header checks, which would prevent BPDUs + * from being received. + */ + reg &= ~RXCHK_L2_HDR_DIS; if (priv->rx_chk_en) reg |= RXCHK_EN; else @@ -520,7 +524,6 @@ static void bcm_sysport_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcm_sysport_priv *priv = netdev_priv(dev); - u32 reg; wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; @@ -528,11 +531,7 @@ static void bcm_sysport_get_wol(struct net_device *dev, if (!(priv->wolopts & WAKE_MAGICSECURE)) return; - /* Return the programmed SecureOn password */ - reg = umac_readl(priv, UMAC_PSW_MS); - put_unaligned_be16(reg, &wol->sopass[0]); - reg = umac_readl(priv, UMAC_PSW_LS); - put_unaligned_be32(reg, &wol->sopass[2]); + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); } static int bcm_sysport_set_wol(struct net_device *dev, @@ -548,13 +547,8 @@ static int bcm_sysport_set_wol(struct net_device *dev, if (wol->wolopts & ~supported) return -EINVAL; - /* Program the SecureOn password */ - if (wol->wolopts & WAKE_MAGICSECURE) { - umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), - UMAC_PSW_MS); - umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), - UMAC_PSW_LS); - } + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); /* Flag the device and relevant IRQ as wakeup capable */ if (wol->wolopts) { @@ -2649,13 +2643,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) unsigned int index, i = 0; u32 reg; - /* Password has already been programmed */ reg = umac_readl(priv, UMAC_MPD_CTRL); if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) reg |= MPD_EN; reg &= ~PSW_EN; - if (priv->wolopts & WAKE_MAGICSECURE) + if (priv->wolopts & WAKE_MAGICSECURE) { + /* Program the SecureOn password */ + umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_PSW_MS); + umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_PSW_LS); reg |= PSW_EN; + } umac_writel(priv, reg, UMAC_MPD_CTRL); if (priv->wolopts & WAKE_FILTER) { diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 0887e6356649..0b192fea9c5d 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -12,6 +12,7 @@ #define __BCM_SYSPORT_H #include <linux/bitmap.h> +#include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/net_dim.h> @@ -778,6 +779,7 @@ struct bcm_sysport_priv { unsigned int crc_fwd:1; u16 rev; u32 wolopts; + u8 sopass[SOPASS_MAX]; unsigned int wol_irq_disabled:1; /* MIB related fields */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 0cec82450e19..626b491f7674 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -11998,7 +11998,7 @@ static void validate_set_si_mode(struct bnx2x *bp) static int bnx2x_get_hwinfo(struct bnx2x *bp) { int /*abs*/func = BP_ABS_FUNC(bp); - int vn, mfw_vn; + int vn; u32 val = 0, val2 = 0; int rc = 0; @@ -12083,12 +12083,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) /* * Initialize MF configuration */ - bp->mf_ov = 0; bp->mf_mode = 0; bp->mf_sub_mode = 0; vn = BP_VN(bp); - mfw_vn = BP_FW_MB_IDX(bp); if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 8e0a317b31f7..a9bdc21873d3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -1654,13 +1654,9 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, { int i, j; struct bnx2x_vf_mac_vlan_filters *fl = NULL; - size_t fsz; - fsz = tlv->n_mac_vlan_filters * - sizeof(struct bnx2x_vf_mac_vlan_filter) + - sizeof(struct bnx2x_vf_mac_vlan_filters); - - fl = kzalloc(fsz, GFP_KERNEL); + fl = kzalloc(struct_size(fl, filters, tlv->n_mac_vlan_filters), + GFP_KERNEL); if (!fl) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6a512871176b..a9edf946d3bf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2016-2019 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -31,6 +31,7 @@ #include <asm/page.h> #include <linux/time.h> #include <linux/mii.h> +#include <linux/mdio.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/if_bridge.h> @@ -112,6 +113,7 @@ enum board_idx { BCM57454, BCM5745x_NPAR, BCM57508, + BCM57504, BCM58802, BCM58804, BCM58808, @@ -155,6 +157,7 @@ static const struct { [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, + [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, @@ -201,6 +204,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, + { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, #ifdef CONFIG_BNXT_SRIOV @@ -4973,12 +4977,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; u32 map_idx = ring->map_idx; + unsigned int vector; + vector = bp->irq_tbl[map_idx].vector; + disable_irq_nosync(vector); rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); - if (rc) + if (rc) { + enable_irq(vector); goto err_out; + } bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); + enable_irq(vector); bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; if (!i) { @@ -6674,6 +6684,10 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; + hwrm_ver_get_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -8608,24 +8622,88 @@ static int bnxt_close(struct net_device *dev) return 0; } +static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, + u16 *val) +{ + struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_mdio_read_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10a00) + return -EOPNOTSUPP; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); + req.port_id = cpu_to_le16(bp->pf.port_id); + req.phy_addr = phy_addr; + req.reg_addr = cpu_to_le16(reg & 0x1f); + if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) { + req.cl45_mdio = 1; + req.phy_addr = mdio_phy_id_prtad(phy_addr); + req.dev_addr = mdio_phy_id_devad(phy_addr); + req.reg_addr = cpu_to_le16(reg); + } + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *val = le16_to_cpu(resp->reg_data); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, + u16 val) +{ + struct hwrm_port_phy_mdio_write_input req = {0}; + + if (bp->hwrm_spec_code < 0x10a00) + return -EOPNOTSUPP; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); + req.port_id = cpu_to_le16(bp->pf.port_id); + req.phy_addr = phy_addr; + req.reg_addr = cpu_to_le16(reg & 0x1f); + if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) { + req.cl45_mdio = 1; + req.phy_addr = mdio_phy_id_prtad(phy_addr); + req.dev_addr = mdio_phy_id_devad(phy_addr); + req.reg_addr = cpu_to_le16(reg); + } + req.reg_data = cpu_to_le16(val); + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + /* rtnl_lock held */ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + struct mii_ioctl_data *mdio = if_mii(ifr); + struct bnxt *bp = netdev_priv(dev); + int rc; + switch (cmd) { case SIOCGMIIPHY: + mdio->phy_id = bp->link_info.phy_addr; + /* fallthru */ case SIOCGMIIREG: { + u16 mii_regval = 0; + if (!netif_running(dev)) return -EAGAIN; - return 0; + rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, + &mii_regval); + mdio->val_out = mii_regval; + return rc; } case SIOCSMIIREG: if (!netif_running(dev)) return -EAGAIN; - return 0; + return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, + mdio->val_in); default: /* do nothing */ @@ -9981,8 +10059,11 @@ static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, return 0; } -int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) +int bnxt_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { + struct bnxt *bp = netdev_priv(dev); + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) return -EOPNOTSUPP; @@ -9990,27 +10071,12 @@ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) if (!BNXT_PF(bp)) return -EOPNOTSUPP; - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(bp->switch_id); - memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len); - break; - default: - return -EOPNOTSUPP; - } - return 0; -} + ppid->id_len = sizeof(bp->switch_id); + memcpy(ppid->id, bp->switch_id, ppid->id_len); -static int bnxt_swdev_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - return bnxt_port_attr_get(netdev_priv(dev), attr); + return 0; } -static const struct switchdev_ops bnxt_switchdev_ops = { - .switchdev_port_attr_get = bnxt_swdev_port_attr_get -}; - static const struct net_device_ops bnxt_netdev_ops = { .ndo_open = bnxt_open, .ndo_start_xmit = bnxt_start_xmit, @@ -10042,6 +10108,7 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_bpf = bnxt_xdp, .ndo_bridge_getlink = bnxt_bridge_getlink, .ndo_bridge_setlink = bnxt_bridge_setlink, + .ndo_get_port_parent_id = bnxt_get_port_parent_id, .ndo_get_phys_port_name = bnxt_get_phys_port_name }; @@ -10400,7 +10467,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; - SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 5c886a700bcc..ecbe7d28a723 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -22,7 +22,6 @@ #include <linux/rhashtable.h> #include <net/devlink.h> #include <net/dst_metadata.h> -#include <net/switchdev.h> #include <net/xdp.h> #include <linux/net_dim.h> @@ -946,6 +945,7 @@ struct bnxt_vf_info { * stored by PF. */ u16 vlan; + u16 func_qcfg_flags; u32 flags; #define BNXT_VF_QOS 0x1 #define BNXT_VF_SPOOFCHK 0x2 @@ -1479,6 +1479,7 @@ struct bnxt { #define BNXT_FW_CAP_IF_CHANGE 0x00000010 #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080 #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400 + #define BNXT_FW_CAP_TRUSTED_VF 0x00000800 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; @@ -1795,7 +1796,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); int bnxt_restore_pf_fw_resources(struct bnxt *bp); -int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr); +int bnxt_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid); void bnxt_dim_work(struct work_struct *work); int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index a6abfa4fb168..e1feb97bcd81 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -37,8 +37,6 @@ static const struct bnxt_dl_nvm_param nvm_params[] = { NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7}, {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, BNXT_NVM_SHARED_CFG, 1}, - - {DEVLINK_PARAM_GENERIC_ID_WOL, NVM_OFF_WOL, BNXT_NVM_PORT_CFG, 1}, }; static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, @@ -72,8 +70,7 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE; switch (bytesize) { case 1: - if (nvm_param.num_bits == 1 && - nvm_param.id != DEVLINK_PARAM_GENERIC_ID_WOL) + if (nvm_param.num_bits == 1) buf = &val->vbool; else buf = &val->vu8; @@ -167,17 +164,6 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, return 0; } -static int bnxt_dl_wol_validate(struct devlink *dl, u32 id, - union devlink_param_value val, - struct netlink_ext_ack *extack) -{ - if (val.vu8 && val.vu8 != DEVLINK_PARAM_WAKE_MAGIC) { - NL_SET_ERR_MSG_MOD(extack, "WOL type is not supported"); - return -EINVAL; - } - return 0; -} - static const struct devlink_param bnxt_dl_params[] = { DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT), @@ -203,9 +189,6 @@ static const struct devlink_param bnxt_dl_params[] = { }; static const struct devlink_param bnxt_dl_port_params[] = { - DEVLINK_PARAM_GENERIC(WOL, BIT(DEVLINK_PARAM_CMODE_PERMANENT), - bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, - bnxt_dl_wol_validate), }; int bnxt_dl_register(struct bnxt *bp) @@ -258,6 +241,9 @@ int bnxt_dl_register(struct bnxt *bp) netdev_err(bp->dev, "devlink_port_params_register failed"); goto err_dl_port_unreg; } + + devlink_params_publish(dl); + return 0; err_dl_port_unreg: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index da065ca84cc7..5b6b2c7d97cf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -35,7 +35,6 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) #define NVM_OFF_MSIX_VEC_PER_PF_MAX 108 #define NVM_OFF_MSIX_VEC_PER_PF_MIN 114 -#define NVM_OFF_WOL 152 #define NVM_OFF_IGNORE_ARI 164 #define NVM_OFF_DIS_GRE_VER_CHECK 171 #define NVM_OFF_ENABLE_SRIOV 401 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 0a0995894ddb..b6c610339501 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2016-2019 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -98,6 +98,7 @@ struct hwrm_short_input { struct cmd_nums { __le16 req_type; #define HWRM_VER_GET 0x0UL + #define HWRM_ERROR_RECOVERY_QCFG 0xcUL #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL #define HWRM_FUNC_BUF_UNRGTR 0xeUL #define HWRM_FUNC_VF_CFG 0xfUL @@ -221,6 +222,7 @@ struct cmd_nums { #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL + #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL #define HWRM_CFA_VFR_ALLOC 0xfdUL #define HWRM_CFA_VFR_FREE 0xfeUL #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL @@ -269,6 +271,7 @@ struct cmd_nums { #define HWRM_ENGINE_CKV_FLUSH 0x133UL #define HWRM_ENGINE_CKV_RNG_GET 0x134UL #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL + #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL #define HWRM_ENGINE_QG_QUERY 0x13dUL #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL @@ -296,6 +299,7 @@ struct cmd_nums { #define HWRM_ENGINE_NQ_ALLOC 0x162UL #define HWRM_ENGINE_NQ_FREE 0x163UL #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL + #define HWRM_ENGINE_FUNC_QCFG 0x165UL #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL @@ -379,15 +383,15 @@ struct hwrm_err_output { }; #define HWRM_NA_SIGNATURE ((__le32)(-1)) #define HWRM_MAX_REQ_LEN 128 -#define HWRM_MAX_RESP_LEN 280 +#define HWRM_MAX_RESP_LEN 704 #define HW_HASH_INDEX_SIZE 0x80 #define HW_HASH_KEY_SIZE 40 #define HWRM_RESP_VALID_KEY 1 #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 35 -#define HWRM_VERSION_STR "1.10.0.35" +#define HWRM_VERSION_RSVD 47 +#define HWRM_VERSION_STR "1.10.0.47" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -580,6 +584,7 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL @@ -595,6 +600,9 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL + #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -724,6 +732,30 @@ struct hwrm_async_event_cmpl_reset_notify { #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16 }; +/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_recovery { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL +}; + /* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ struct hwrm_async_event_cmpl_vf_cfg_change { __le16 type; @@ -1014,6 +1046,7 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -1185,6 +1218,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL + #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -1390,6 +1424,7 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL + #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -2024,6 +2059,89 @@ struct hwrm_func_backing_store_cfg_output { u8 valid; }; +/* hwrm_error_recovery_qcfg_input (size:192b/24B) */ +struct hwrm_error_recovery_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 unused_0[8]; +}; + +/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */ +struct hwrm_error_recovery_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL + __le32 driver_polling_freq; + __le32 master_func_wait_period; + __le32 normal_func_wait_period; + __le32 master_func_wait_period_after_reset; + __le32 max_bailout_time_after_reset; + __le32 fw_health_status_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2 + __le32 fw_heartbeat_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2 + __le32 fw_reset_cnt_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2 + __le32 reset_inprogress_reg; + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2 + __le32 reset_inprogress_reg_mask; + u8 unused_0[3]; + u8 reg_array_cnt; + __le32 reset_reg[16]; + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2 + __le32 reset_reg_val[16]; + u8 delay_after_reset[16]; + u8 unused_1[7]; + u8 valid; +}; + /* hwrm_func_drv_if_change_input (size:192b/24B) */ struct hwrm_func_drv_if_change_input { __le16 req_type; @@ -2955,6 +3073,7 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_200GB 0x4000UL __le16 supported_speeds_auto_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL @@ -2970,6 +3089,7 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_200GB 0x4000UL __le16 supported_speeds_eee_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL @@ -4919,6 +5039,35 @@ struct hwrm_ring_free_output { u8 valid; }; +/* hwrm_ring_reset_input (size:192b/24B) */ +struct hwrm_ring_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_RESET_REQ_RING_TYPE_TX 0x1UL + #define RING_RESET_REQ_RING_TYPE_RX 0x2UL + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL + u8 unused_0; + __le16 ring_id; + u8 unused_1[4]; +}; + +/* hwrm_ring_reset_output (size:128b/16B) */ +struct hwrm_ring_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[4]; + u8 consumer_idx[3]; + u8 valid; +}; + /* hwrm_ring_aggint_qcaps_input (size:128b/16B) */ struct hwrm_ring_aggint_qcaps_input { __le16 req_type; @@ -5446,19 +5595,21 @@ struct hwrm_cfa_encap_record_alloc_input { __le64 resp_addr; __le32 flags; #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL u8 encap_type; - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 u8 unused_0[3]; __le32 encap_data[20]; }; @@ -5506,6 +5657,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL __le32 enables; #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL @@ -5627,7 +5779,8 @@ struct hwrm_cfa_ntuple_filter_cfg_input { #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL - u8 unused_0[4]; + __le32 flags; + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL __le64 ntuple_filter_id; __le32 new_dst_id; __le32 new_mirror_vnic_id; @@ -5892,13 +6045,15 @@ struct hwrm_cfa_flow_info_input { __le64 ext_flow_handle; }; -/* hwrm_cfa_flow_info_output (size:448b/56B) */ +/* hwrm_cfa_flow_info_output (size:5632b/704B) */ struct hwrm_cfa_flow_info_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; u8 flags; + #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL + #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL u8 profile; __le16 src_fid; __le16 dst_fid; @@ -5910,7 +6065,10 @@ struct hwrm_cfa_flow_info_output { __le16 flow_handle; __le32 tunnel_handle; __le16 flow_timer; - u8 unused_0[5]; + u8 unused_0[6]; + __le32 flow_key_data[130]; + __le32 flow_action_info[30]; + u8 unused_1[7]; u8 valid; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index d80f5c981d90..2b90a2bb1a1d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -121,6 +121,54 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) return rc; } +static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcfg_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); + return -EIO; + } + vf->func_qcfg_flags = le16_to_cpu(resp->flags); + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + +static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) + return !!(vf->flags & BNXT_VF_TRUST); + + bnxt_hwrm_func_qcfg_flags(bp, vf); + return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF); +} + +static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); + if (vf->flags & BNXT_VF_TRUST) + req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + else + req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return -EIO; + return 0; +} + int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) { struct bnxt *bp = netdev_priv(dev); @@ -135,6 +183,7 @@ int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) else vf->flags &= ~BNXT_VF_TRUST; + bnxt_hwrm_set_trusted_vf(bp, vf); return 0; } @@ -164,7 +213,7 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, else ivi->qos = 0; ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); - ivi->trusted = !!(vf->flags & BNXT_VF_TRUST); + ivi->trusted = bnxt_is_trusted_vf(bp, vf); if (!(vf->flags & BNXT_VF_LINK_FORCED)) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->flags & BNXT_VF_LINK_UP) @@ -935,9 +984,10 @@ static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) * if the PF assigned MAC address is zero */ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { + bool trust = bnxt_is_trusted_vf(bp, vf); + if (is_valid_ether_addr(req->dflt_mac_addr) && - ((vf->flags & BNXT_VF_TRUST) || - !is_valid_ether_addr(vf->mac_addr) || + (trust || !is_valid_ether_addr(vf->mac_addr) || ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); @@ -962,7 +1012,7 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) * Otherwise, it must match the VF MAC address if firmware spec >= * 1.2.2 */ - if (vf->flags & BNXT_VF_TRUST) { + if (bnxt_is_trusted_vf(bp, vf)) { mac_ok = true; } else if (is_valid_ether_addr(vf->mac_addr)) { if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index c683b5e96b1d..44d6c5743fb9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -45,7 +45,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev) struct bnxt *bp; /* check if dev belongs to the same switch */ - if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) { + if (!netdev_port_same_parent_id(pf_bp->dev, dev)) { netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch", dev->ifindex); return BNXT_FID_INVALID; @@ -61,9 +61,9 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev) static int bnxt_tc_parse_redir(struct bnxt *bp, struct bnxt_tc_actions *actions, - const struct tc_action *tc_act) + const struct flow_action_entry *act) { - struct net_device *dev = tcf_mirred_dev(tc_act); + struct net_device *dev = act->dev; if (!dev) { netdev_info(bp->dev, "no dev in mirred action"); @@ -77,16 +77,16 @@ static int bnxt_tc_parse_redir(struct bnxt *bp, static int bnxt_tc_parse_vlan(struct bnxt *bp, struct bnxt_tc_actions *actions, - const struct tc_action *tc_act) + const struct flow_action_entry *act) { - switch (tcf_vlan_action(tc_act)) { - case TCA_VLAN_ACT_POP: + switch (act->id) { + case FLOW_ACTION_VLAN_POP: actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; break; - case TCA_VLAN_ACT_PUSH: + case FLOW_ACTION_VLAN_PUSH: actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; - actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); - actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); + actions->push_vlan_tci = htons(act->vlan.vid); + actions->push_vlan_tpid = act->vlan.proto; break; default: return -EOPNOTSUPP; @@ -96,10 +96,10 @@ static int bnxt_tc_parse_vlan(struct bnxt *bp, static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, struct bnxt_tc_actions *actions, - const struct tc_action *tc_act) + const struct flow_action_entry *act) { - struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act); - struct ip_tunnel_key *tun_key = &tun_info->key; + const struct ip_tunnel_info *tun_info = act->tunnel; + const struct ip_tunnel_key *tun_key = &tun_info->key; if (ip_tunnel_info_af(tun_info) != AF_INET) { netdev_info(bp->dev, "only IPv4 tunnel-encap is supported"); @@ -113,51 +113,43 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, static int bnxt_tc_parse_actions(struct bnxt *bp, struct bnxt_tc_actions *actions, - struct tcf_exts *tc_exts) + struct flow_action *flow_action) { - const struct tc_action *tc_act; + struct flow_action_entry *act; int i, rc; - if (!tcf_exts_has_actions(tc_exts)) { + if (!flow_action_has_entries(flow_action)) { netdev_info(bp->dev, "no actions"); return -EINVAL; } - tcf_exts_for_each_action(i, tc_act, tc_exts) { - /* Drop action */ - if (is_tcf_gact_shot(tc_act)) { + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: actions->flags |= BNXT_TC_ACTION_FLAG_DROP; return 0; /* don't bother with other actions */ - } - - /* Redirect action */ - if (is_tcf_mirred_egress_redirect(tc_act)) { - rc = bnxt_tc_parse_redir(bp, actions, tc_act); + case FLOW_ACTION_REDIRECT: + rc = bnxt_tc_parse_redir(bp, actions, act); if (rc) return rc; - continue; - } - - /* Push/pop VLAN */ - if (is_tcf_vlan(tc_act)) { - rc = bnxt_tc_parse_vlan(bp, actions, tc_act); + break; + case FLOW_ACTION_VLAN_POP: + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_MANGLE: + rc = bnxt_tc_parse_vlan(bp, actions, act); if (rc) return rc; - continue; - } - - /* Tunnel encap */ - if (is_tcf_tunnel_set(tc_act)) { - rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act); + break; + case FLOW_ACTION_TUNNEL_ENCAP: + rc = bnxt_tc_parse_tunnel_set(bp, actions, act); if (rc) return rc; - continue; - } - - /* Tunnel decap */ - if (is_tcf_tunnel_release(tc_act)) { + break; + case FLOW_ACTION_TUNNEL_DECAP: actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; - continue; + break; + default: + break; } } @@ -177,18 +169,12 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, return 0; } -#define GET_KEY(flow_cmd, key_type) \ - skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ - (flow_cmd)->key) -#define GET_MASK(flow_cmd, key_type) \ - skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ - (flow_cmd)->mask) - static int bnxt_tc_parse_flow(struct bnxt *bp, struct tc_cls_flower_offload *tc_flow_cmd, struct bnxt_tc_flow *flow) { - struct flow_dissector *dissector = tc_flow_cmd->dissector; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(tc_flow_cmd); + struct flow_dissector *dissector = rule->match.dissector; /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || @@ -198,143 +184,123 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, return -EOPNOTSUPP; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); - struct flow_dissector_key_basic *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - flow->l2_key.ether_type = key->n_proto; - flow->l2_mask.ether_type = mask->n_proto; + flow_rule_match_basic(rule, &match); + flow->l2_key.ether_type = match.key->n_proto; + flow->l2_mask.ether_type = match.mask->n_proto; - if (key->n_proto == htons(ETH_P_IP) || - key->n_proto == htons(ETH_P_IPV6)) { - flow->l4_key.ip_proto = key->ip_proto; - flow->l4_mask.ip_proto = mask->ip_proto; + if (match.key->n_proto == htons(ETH_P_IP) || + match.key->n_proto == htons(ETH_P_IPV6)) { + flow->l4_key.ip_proto = match.key->ip_proto; + flow->l4_mask.ip_proto = match.mask->ip_proto; } } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); - struct flow_dissector_key_eth_addrs *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + flow_rule_match_eth_addrs(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; - ether_addr_copy(flow->l2_key.dmac, key->dst); - ether_addr_copy(flow->l2_mask.dmac, mask->dst); - ether_addr_copy(flow->l2_key.smac, key->src); - ether_addr_copy(flow->l2_mask.smac, mask->src); + ether_addr_copy(flow->l2_key.dmac, match.key->dst); + ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); + ether_addr_copy(flow->l2_key.smac, match.key->src); + ether_addr_copy(flow->l2_mask.smac, match.mask->src); } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); - struct flow_dissector_key_vlan *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + flow_rule_match_vlan(rule, &match); flow->l2_key.inner_vlan_tci = - cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority)); + cpu_to_be16(VLAN_TCI(match.key->vlan_id, + match.key->vlan_priority)); flow->l2_mask.inner_vlan_tci = - cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority))); + cpu_to_be16((VLAN_TCI(match.mask->vlan_id, + match.mask->vlan_priority))); flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); flow->l2_mask.inner_vlan_tpid = htons(0xffff); flow->l2_key.num_vlans = 1; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { - struct flow_dissector_key_ipv4_addrs *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); - struct flow_dissector_key_ipv4_addrs *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + flow_rule_match_ipv4_addrs(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; - flow->l3_key.ipv4.daddr.s_addr = key->dst; - flow->l3_mask.ipv4.daddr.s_addr = mask->dst; - flow->l3_key.ipv4.saddr.s_addr = key->src; - flow->l3_mask.ipv4.saddr.s_addr = mask->src; - } else if (dissector_uses_key(dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { - struct flow_dissector_key_ipv6_addrs *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); - struct flow_dissector_key_ipv6_addrs *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); - + flow->l3_key.ipv4.daddr.s_addr = match.key->dst; + flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst; + flow->l3_key.ipv4.saddr.s_addr = match.key->src; + flow->l3_mask.ipv4.saddr.s_addr = match.mask->src; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; - flow->l3_key.ipv6.daddr = key->dst; - flow->l3_mask.ipv6.daddr = mask->dst; - flow->l3_key.ipv6.saddr = key->src; - flow->l3_mask.ipv6.saddr = mask->src; + flow->l3_key.ipv6.daddr = match.key->dst; + flow->l3_mask.ipv6.daddr = match.mask->dst; + flow->l3_key.ipv6.saddr = match.key->src; + flow->l3_mask.ipv6.saddr = match.mask->src; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); - struct flow_dissector_key_ports *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + flow_rule_match_ports(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; - flow->l4_key.ports.dport = key->dst; - flow->l4_mask.ports.dport = mask->dst; - flow->l4_key.ports.sport = key->src; - flow->l4_mask.ports.sport = mask->src; + flow->l4_key.ports.dport = match.key->dst; + flow->l4_mask.ports.dport = match.mask->dst; + flow->l4_key.ports.sport = match.key->src; + flow->l4_mask.ports.sport = match.mask->src; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) { - struct flow_dissector_key_icmp *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); - struct flow_dissector_key_icmp *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { + struct flow_match_icmp match; + flow_rule_match_icmp(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; - flow->l4_key.icmp.type = key->type; - flow->l4_key.icmp.code = key->code; - flow->l4_mask.icmp.type = mask->type; - flow->l4_mask.icmp.code = mask->code; + flow->l4_key.icmp.type = match.key->type; + flow->l4_key.icmp.code = match.key->code; + flow->l4_mask.icmp.type = match.mask->type; + flow->l4_mask.icmp.code = match.mask->code; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { - struct flow_dissector_key_ipv4_addrs *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); - struct flow_dissector_key_ipv4_addrs *mask = - GET_MASK(tc_flow_cmd, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + flow_rule_match_enc_ipv4_addrs(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; - flow->tun_key.u.ipv4.dst = key->dst; - flow->tun_mask.u.ipv4.dst = mask->dst; - flow->tun_key.u.ipv4.src = key->src; - flow->tun_mask.u.ipv4.src = mask->src; - } else if (dissector_uses_key(dissector, + flow->tun_key.u.ipv4.dst = match.key->dst; + flow->tun_mask.u.ipv4.dst = match.mask->dst; + flow->tun_key.u.ipv4.src = match.key->src; + flow->tun_mask.u.ipv4.src = match.mask->src; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { return -EOPNOTSUPP; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); - struct flow_dissector_key_keyid *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; + flow_rule_match_enc_keyid(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; - flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid); - flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid); + flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid); + flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid); } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { - struct flow_dissector_key_ports *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); - struct flow_dissector_key_ports *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + struct flow_match_ports match; + flow_rule_match_enc_ports(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; - flow->tun_key.tp_dst = key->dst; - flow->tun_mask.tp_dst = mask->dst; - flow->tun_key.tp_src = key->src; - flow->tun_mask.tp_src = mask->src; + flow->tun_key.tp_dst = match.key->dst; + flow->tun_mask.tp_dst = match.mask->dst; + flow->tun_key.tp_src = match.key->src; + flow->tun_mask.tp_src = match.mask->src; } - return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); + return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action); } static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, @@ -1324,7 +1290,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, bnxt_tc_set_flow_dir(bp, flow, src_fid); if (!bnxt_tc_can_offload(bp, flow)) { - rc = -ENOSPC; + rc = -EOPNOTSUPP; goto free_node; } @@ -1422,8 +1388,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, lastused = flow->lastused; spin_unlock(&flow->stats_lock); - tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, - lastused); + flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, + lastused); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 9a25c05aa571..2bdd2da9aac7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -237,21 +237,17 @@ static void bnxt_vf_rep_get_drvinfo(struct net_device *dev, strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } -static int bnxt_vf_rep_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) +static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct bnxt_vf_rep *vf_rep = netdev_priv(dev); /* as only PORT_PARENT_ID is supported currently use common code * between PF and VF-rep for now. */ - return bnxt_port_attr_get(vf_rep->bp, attr); + return bnxt_get_port_parent_id(vf_rep->bp->dev, ppid); } -static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = { - .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get -}; - static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = { .get_drvinfo = bnxt_vf_rep_get_drvinfo }; @@ -262,6 +258,7 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = { .ndo_start_xmit = bnxt_vf_rep_xmit, .ndo_get_stats64 = bnxt_vf_rep_get_stats64, .ndo_setup_tc = bnxt_vf_rep_setup_tc, + .ndo_get_port_parent_id = bnxt_vf_rep_get_port_parent_id, .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name }; @@ -392,7 +389,6 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, dev->netdev_ops = &bnxt_vf_rep_netdev_ops; dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops; - SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops); /* Just inherit all the featues of the parent PF as the VF-R * uses the RX/TX rings of the parent PF */ diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 5db9f4158e62..134ae2862efa 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, * for transmits, we just free buffers. */ - dev_kfree_skb_irq(sb); + dev_consume_skb_irq(sb); /* * .. and advance to the next buffer. diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 2b2882615e8b..f2915f2fe21a 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1734,7 +1734,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) if (!nskb) return -ENOMEM; - dev_kfree_skb_any(*skb); + dev_consume_skb_any(*skb); *skb = nskb; } @@ -3673,9 +3673,9 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, /* Store packet information (to free when Tx completed) */ lp->skb = skb; lp->skb_length = skb->len; - lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(NULL, lp->skb_physaddr)) { + lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) { dev_kfree_skb_any(skb); dev->stats.tx_dropped++; netdev_err(dev, "%s: DMA mapping error\n", __func__); @@ -3763,9 +3763,9 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) dev->stats.tx_errors++; if (lp->skb) { - dev_kfree_skb_irq(lp->skb); + dev_consume_skb_irq(lp->skb); lp->skb = NULL; - dma_unmap_single(NULL, lp->skb_physaddr, + dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE); dev->stats.tx_packets++; dev->stats.tx_bytes += lp->skb_length; @@ -3943,6 +3943,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,np4-macb", .data = &np4_config }, { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, { .compatible = "cdns,gem", .data = &pc302gem_config }, + { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config }, { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 5f03199a3acf..05f4a3b21e29 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -54,7 +54,6 @@ config CAVIUM_PTP tristate "Cavium PTP coprocessor as PTP clock" depends on 64BIT && PCI imply PTP_1588_CLOCK - default y ---help--- This driver adds support for the Precision Time Protocol Clocks and Timestamping coprocessor (PTP) found on Cavium processors. diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 9f4f3c1d5043..43d11c38b38a 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1450,7 +1450,7 @@ void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, mbox_cmd.recv_len = 0; mbox_cmd.recv_status = 0; mbox_cmd.fn = NULL; - mbox_cmd.fn_arg = 0; + mbox_cmd.fn_arg = NULL; ether_addr_copy(mbox_cmd.msg.s.params, mac); mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf; octeon_mbox_write(oct, &mbox_cmd); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 3d24133e5e49..9b7819fdc9de 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -21,7 +21,6 @@ #include <linux/firmware.h> #include <net/vxlan.h> #include <linux/kthread.h> -#include <net/switchdev.h> #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" @@ -2908,7 +2907,7 @@ static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, nctrl.ncmd.s.param2 = enable; nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.cb_fn = 0; + nctrl.cb_fn = NULL; retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); @@ -3184,7 +3183,8 @@ static const struct devlink_ops liquidio_devlink_ops = { }; static int -lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr) +liquidio_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct lio *lio = GET_LIO(dev); struct octeon_device *oct = lio->oct_dev; @@ -3192,24 +3192,12 @@ lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr) if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) return -EOPNOTSUPP; - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = ETH_ALEN; - ether_addr_copy(attr->u.ppid.id, - (void *)&lio->linfo.hw_addr + 2); - break; - - default: - return -EOPNOTSUPP; - } + ppid->id_len = ETH_ALEN; + ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); return 0; } -static const struct switchdev_ops lio_pf_switchdev_ops = { - .switchdev_port_attr_get = lio_pf_switchdev_attr_get, -}; - static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, struct ifla_vf_stats *vf_stats) { @@ -3259,6 +3247,7 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_trust = liquidio_set_vf_trust, .ndo_set_vf_link_state = liquidio_set_vf_link_state, .ndo_get_vf_stats = liquidio_get_vf_stats, + .ndo_get_port_parent_id = liquidio_get_port_parent_id, }; /** \brief Entry point for the liquidio module @@ -3534,7 +3523,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) * netdev tasks. */ netdev->netdev_ops = &lionetdevops; - SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops); retval = netif_set_real_num_rx_queues(netdev, num_oqueues); if (retval) { diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index de61060721c4..f3f2e71431ac 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -25,7 +25,6 @@ #include "octeon_nic.h" #include "octeon_main.h" #include "octeon_network.h" -#include <net/switchdev.h> #include "lio_vf_rep.h" static int lio_vf_rep_open(struct net_device *ndev); @@ -38,6 +37,8 @@ static int lio_vf_rep_phys_port_name(struct net_device *dev, static void lio_vf_rep_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64); static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu); +static int lio_vf_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid); static const struct net_device_ops lio_vf_rep_ndev_ops = { .ndo_open = lio_vf_rep_open, @@ -47,6 +48,7 @@ static const struct net_device_ops lio_vf_rep_ndev_ops = { .ndo_get_phys_port_name = lio_vf_rep_phys_port_name, .ndo_get_stats64 = lio_vf_rep_get_stats64, .ndo_change_mtu = lio_vf_rep_change_mtu, + .ndo_get_port_parent_id = lio_vf_get_port_parent_id, }; static int @@ -443,31 +445,19 @@ xmit_failed: return NETDEV_TX_OK; } -static int -lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr) +static int lio_vf_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct lio_vf_rep_desc *vf_rep = netdev_priv(dev); struct net_device *parent_ndev = vf_rep->parent_ndev; struct lio *lio = GET_LIO(parent_ndev); - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = ETH_ALEN; - ether_addr_copy(attr->u.ppid.id, - (void *)&lio->linfo.hw_addr + 2); - break; - - default: - return -EOPNOTSUPP; - } + ppid->id_len = ETH_ALEN; + ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); return 0; } -static const struct switchdev_ops lio_vf_rep_switchdev_ops = { - .switchdev_port_attr_get = lio_vf_rep_attr_get, -}; - static void lio_vf_rep_fetch_stats(struct work_struct *work) { @@ -524,7 +514,6 @@ lio_vf_rep_create(struct octeon_device *oct) ndev->min_mtu = LIO_MIN_MTU_SIZE; ndev->max_mtu = LIO_MAX_MTU_SIZE; ndev->netdev_ops = &lio_vf_rep_ndev_ops; - SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops); vf_rep = netdev_priv(ndev); memset(vf_rep, 0, sizeof(*vf_rep)); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 568715a13b5c..b7b0eb104430 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -617,6 +617,7 @@ enum { /* adapter flags */ FW_OFLD_CONN = (1 << 9), ROOT_NO_RELAXED_ORDERING = (1 << 10), SHUTTING_DOWN = (1 << 11), + SGE_DBQ_TIMER = (1 << 12), }; enum { @@ -756,6 +757,8 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ #ifdef CONFIG_CHELSIO_T4_DCB u8 dcb_prio; /* DCB Priority bound to queue */ #endif + u8 dbqt; /* SGE Doorbell Queue Timer in use */ + unsigned int dbqtimerix; /* SGE Doorbell Queue Timer Index */ unsigned long tso; /* # of TSO requests */ unsigned long tx_cso; /* # of Tx checksum offloads */ unsigned long vlan_ins; /* # of Tx VLAN insertions */ @@ -816,6 +819,8 @@ struct sge { u16 nqs_per_uld; /* # of Rx queues per ULD */ u16 timer_val[SGE_NTIMERS]; u8 counter_val[SGE_NCOUNTERS]; + u16 dbqtimer_tick; + u16 dbqtimer_val[SGE_NDBQTIMERS]; u32 fl_pg_order; /* large page allocation size */ u32 stat_len; /* length of status page at ring end */ u32 pktshift; /* padding between CPL & packet data */ @@ -1402,7 +1407,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, rspq_flush_handler_t flush_handler, int cong); int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, struct net_device *dev, struct netdev_queue *netdevq, - unsigned int iqid); + unsigned int iqid, u8 dbqt); int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, struct net_device *dev, unsigned int iqid, unsigned int cmplqid); @@ -1415,6 +1420,8 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie); int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); +int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *q, + int maxreclaim); void cxgb4_set_ethtool_ops(struct net_device *netdev); int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb); @@ -1821,6 +1828,8 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type); +int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers, + u16 *dbqtimers); void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl); int t4_update_port_info(struct port_info *pi); int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 796043544fc3..65b8dc706c1d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -932,11 +932,190 @@ static int get_adaptive_rx_setting(struct net_device *dev) return q->rspq.adaptive_rx; } -static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +/* Return the current global Adapter SGE Doorbell Queue Timer Tick for all + * Ethernet TX Queues. + */ +static int get_dbqtimer_tick(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + if (!(adap->flags & SGE_DBQ_TIMER)) + return 0; + + return adap->sge.dbqtimer_tick; +} + +/* Return the SGE Doorbell Queue Timer Value for the Ethernet TX Queues + * associated with a Network Device. + */ +static int get_dbqtimer(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_txq *txq; + + txq = &adap->sge.ethtxq[pi->first_qset]; + + if (!(adap->flags & SGE_DBQ_TIMER)) + return 0; + + /* all of the TX Queues use the same Timer Index */ + return adap->sge.dbqtimer_val[txq->dbqtimerix]; +} + +/* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX + * Queues. This is the fundamental "Tick" that sets the scale of values which + * can be used. Individual Ethernet TX Queues index into a relatively small + * array of Tick Multipliers. Changing the base Tick will thus change all of + * the resulting Timer Values associated with those multipliers for all + * Ethernet TX Queues. + */ +static int set_dbqtimer_tick(struct net_device *dev, int usecs) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge *s = &adap->sge; + u32 param, val; + int ret; + + if (!(adap->flags & SGE_DBQ_TIMER)) + return 0; + + /* return early if it's the same Timer Tick we're already using */ + if (s->dbqtimer_tick == usecs) + return 0; + + /* attempt to set the new Timer Tick value */ + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK)); + val = usecs; + ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); + if (ret) + return ret; + s->dbqtimer_tick = usecs; + + /* if successful, reread resulting dependent Timer values */ + ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(s->dbqtimer_val), + s->dbqtimer_val); + return ret; +} + +/* Set the SGE Doorbell Queue Timer Value for the Ethernet TX Queues + * associated with a Network Device. There is a relatively small array of + * possible Timer Values so we need to pick the closest value available. + */ +static int set_dbqtimer(struct net_device *dev, int usecs) +{ + int qix, timerix, min_timerix, delta, min_delta; + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge *s = &adap->sge; + struct sge_eth_txq *txq; + u32 param, val; + int ret; + + if (!(adap->flags & SGE_DBQ_TIMER)) + return 0; + + /* Find the SGE Doorbell Timer Value that's closest to the requested + * value. + */ + min_delta = INT_MAX; + min_timerix = 0; + for (timerix = 0; timerix < ARRAY_SIZE(s->dbqtimer_val); timerix++) { + delta = s->dbqtimer_val[timerix] - usecs; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + min_timerix = timerix; + } + } + + /* Return early if it's the same Timer Index we're already using. + * We use the same Timer Index for all of the TX Queues for an + * interface so it's only necessary to check the first one. + */ + txq = &s->ethtxq[pi->first_qset]; + if (txq->dbqtimerix == min_timerix) + return 0; + + for (qix = 0; qix < pi->nqsets; qix++, txq++) { + if (adap->flags & FULL_INIT_DONE) { + param = + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) | + FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); + val = min_timerix; + ret = t4_set_params(adap, adap->mbox, adap->pf, 0, + 1, ¶m, &val); + if (ret) + return ret; + } + txq->dbqtimerix = min_timerix; + } + return 0; +} + +/* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX + * Queues and the Timer Value for the Ethernet TX Queues associated with a + * Network Device. Since changing the global Tick changes all of the + * available Timer Values, we need to do this first before selecting the + * resulting closest Timer Value. Moreover, since the Tick is global, + * changing it affects the Timer Values for all Network Devices on the + * adapter. So, before changing the Tick, we grab all of the current Timer + * Values for other Network Devices on this Adapter and then attempt to select + * new Timer Values which are close to the old values ... + */ +static int set_dbqtimer_tickval(struct net_device *dev, + int tick_usecs, int timer_usecs) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + int timer[MAX_NPORTS]; + unsigned int port; + int ret; + + /* Grab the other adapter Network Interface current timers and fill in + * the new one for this Network Interface. + */ + for_each_port(adap, port) + if (port == pi->port_id) + timer[port] = timer_usecs; + else + timer[port] = get_dbqtimer(adap->port[port]); + + /* Change the global Tick first ... */ + ret = set_dbqtimer_tick(dev, tick_usecs); + if (ret) + return ret; + + /* ... and then set all of the Network Interface Timer Values ... */ + for_each_port(adap, port) { + ret = set_dbqtimer(adap->port[port], timer[port]); + if (ret) + return ret; + } + + return 0; +} + +static int set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coalesce) { - set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce); - return set_rx_intr_params(dev, c->rx_coalesce_usecs, - c->rx_max_coalesced_frames); + int ret; + + set_adaptive_rx_setting(dev, coalesce->use_adaptive_rx_coalesce); + + ret = set_rx_intr_params(dev, coalesce->rx_coalesce_usecs, + coalesce->rx_max_coalesced_frames); + if (ret) + return ret; + + return set_dbqtimer_tickval(dev, + coalesce->tx_coalesce_usecs_irq, + coalesce->tx_coalesce_usecs); } static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) @@ -949,6 +1128,8 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ? adap->sge.counter_val[rq->pktcnt_idx] : 0; c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); + c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev); + c->tx_coalesce_usecs = get_dbqtimer(dev); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index adf75d16e6d3..bcbac247a73d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -575,7 +575,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, struct sge_eth_txq *eq; eq = container_of(txq, struct sge_eth_txq, q); - netif_tx_wake_queue(eq->txq); + t4_sge_eth_txq_egress_update(q->adap, eq, -1); } else { struct sge_uld_txq *oq; @@ -933,10 +933,13 @@ static int setup_sge_queues(struct adapter *adap) q->rspq.idx = j; memset(&q->stats, 0, sizeof(q->stats)); } - for (j = 0; j < pi->nqsets; j++, t++) { + + q = &s->ethrxq[pi->first_qset]; + for (j = 0; j < pi->nqsets; j++, t++, q++) { err = t4_sge_alloc_eth_txq(adap, t, dev, netdev_get_tx_queue(dev, j), - s->fw_evtq.cntxt_id); + q->rspq.cntxt_id, + !!(adap->flags & SGE_DBQ_TIMER)); if (err) goto freeout; } @@ -958,7 +961,7 @@ static int setup_sge_queues(struct adapter *adap) if (!is_t4(adap->params.chip)) { err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], netdev_get_tx_queue(adap->port[0], 0) - , s->fw_evtq.cntxt_id); + , s->fw_evtq.cntxt_id, false); if (err) goto freeout; } @@ -4325,6 +4328,24 @@ static int adap_init0(struct adapter *adap) if (ret < 0) goto bye; + /* Grab the SGE Doorbell Queue Timer values. If successful, that + * indicates that the Firmware and Hardware support this. + */ + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK)); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + 1, params, val); + + if (!ret) { + adap->sge.dbqtimer_tick = val[0]; + ret = t4_read_sge_dbqtimers(adap, + ARRAY_SIZE(adap->sge.dbqtimer_val), + adap->sge.dbqtimer_val); + } + + if (!ret) + adap->flags |= SGE_DBQ_TIMER; + if (is_bypass_device(adap->pdev->device)) adap->params.bypass = 1; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index c116f96956fe..82a8d1970060 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -83,28 +83,23 @@ static void cxgb4_process_flow_match(struct net_device *dev, struct tc_cls_flower_offload *cls, struct ch_filter_specification *fs) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls); u16 addr_type = 0; - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - cls->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; - addr_type = key->addr_type; + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_BASIC, - cls->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_BASIC, - cls->mask); - u16 ethtype_key = ntohs(key->n_proto); - u16 ethtype_mask = ntohs(mask->n_proto); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + u16 ethtype_key, ethtype_mask; + + flow_rule_match_basic(rule, &match); + ethtype_key = ntohs(match.key->n_proto); + ethtype_mask = ntohs(match.mask->n_proto); if (ethtype_key == ETH_P_ALL) { ethtype_key = 0; @@ -116,115 +111,89 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->val.ethtype = ethtype_key; fs->mask.ethtype = ethtype_mask; - fs->val.proto = key->ip_proto; - fs->mask.proto = mask->ip_proto; + fs->val.proto = match.key->ip_proto; + fs->mask.proto = match.mask->ip_proto; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - cls->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - cls->mask); + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); fs->type = 0; - memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst)); - memcpy(&fs->val.fip[0], &key->src, sizeof(key->src)); - memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst)); - memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src)); + memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst)); + memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src)); + memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst)); + memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src)); /* also initialize nat_lip/fip to same values */ - memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst)); - memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src)); - + memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst)); + memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src)); } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - cls->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - cls->mask); + struct flow_match_ipv6_addrs match; + flow_rule_match_ipv6_addrs(rule, &match); fs->type = 1; - memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst)); - memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src)); - memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst)); - memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src)); + memcpy(&fs->val.lip[0], match.key->dst.s6_addr, + sizeof(match.key->dst)); + memcpy(&fs->val.fip[0], match.key->src.s6_addr, + sizeof(match.key->src)); + memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr, + sizeof(match.mask->dst)); + memcpy(&fs->mask.fip[0], match.mask->src.s6_addr, + sizeof(match.mask->src)); /* also initialize nat_lip/fip to same values */ - memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst)); - memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src)); + memcpy(&fs->nat_lip[0], match.key->dst.s6_addr, + sizeof(match.key->dst)); + memcpy(&fs->nat_fip[0], match.key->src.s6_addr, + sizeof(match.key->src)); } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key, *mask; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; - key = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_PORTS, - cls->key); - mask = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_PORTS, - cls->mask); - fs->val.lport = cpu_to_be16(key->dst); - fs->mask.lport = cpu_to_be16(mask->dst); - fs->val.fport = cpu_to_be16(key->src); - fs->mask.fport = cpu_to_be16(mask->src); + flow_rule_match_ports(rule, &match); + fs->val.lport = cpu_to_be16(match.key->dst); + fs->mask.lport = cpu_to_be16(match.mask->dst); + fs->val.fport = cpu_to_be16(match.key->src); + fs->mask.fport = cpu_to_be16(match.mask->src); /* also initialize nat_lport/fport to same values */ - fs->nat_lport = cpu_to_be16(key->dst); - fs->nat_fport = cpu_to_be16(key->src); + fs->nat_lport = cpu_to_be16(match.key->dst); + fs->nat_fport = cpu_to_be16(match.key->src); } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { - struct flow_dissector_key_ip *key, *mask; - - key = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IP, - cls->key); - mask = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IP, - cls->mask); - fs->val.tos = key->tos; - fs->mask.tos = mask->tos; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; + + flow_rule_match_ip(rule, &match); + fs->val.tos = match.key->tos; + fs->mask.tos = match.mask->tos; } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *key, *mask; - - key = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - cls->key); - mask = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - cls->mask); - fs->val.vni = be32_to_cpu(key->keyid); - fs->mask.vni = be32_to_cpu(mask->keyid); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; + + flow_rule_match_enc_keyid(rule, &match); + fs->val.vni = be32_to_cpu(match.key->keyid); + fs->mask.vni = be32_to_cpu(match.mask->keyid); if (fs->mask.vni) { fs->val.encap_vld = 1; fs->mask.encap_vld = 1; } } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key, *mask; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; u16 vlan_tci, vlan_tci_mask; - key = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_VLAN, - cls->key); - mask = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_VLAN, - cls->mask); - vlan_tci = key->vlan_id | (key->vlan_priority << - VLAN_PRIO_SHIFT); - vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << - VLAN_PRIO_SHIFT); + flow_rule_match_vlan(rule, &match); + vlan_tci = match.key->vlan_id | (match.key->vlan_priority << + VLAN_PRIO_SHIFT); + vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority << + VLAN_PRIO_SHIFT); fs->val.ivlan = vlan_tci; fs->mask.ivlan = vlan_tci_mask; @@ -255,10 +224,12 @@ static void cxgb4_process_flow_match(struct net_device *dev, static int cxgb4_validate_flow_match(struct net_device *dev, struct tc_cls_flower_offload *cls) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; u16 ethtype_mask = 0; u16 ethtype_key = 0; - if (cls->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | @@ -268,36 +239,29 @@ static int cxgb4_validate_flow_match(struct net_device *dev, BIT(FLOW_DISSECTOR_KEY_VLAN) | BIT(FLOW_DISSECTOR_KEY_IP))) { netdev_warn(dev, "Unsupported key used: 0x%x\n", - cls->dissector->used_keys); + dissector->used_keys); return -EOPNOTSUPP; } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_BASIC, - cls->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_BASIC, - cls->mask); - ethtype_key = ntohs(key->n_proto); - ethtype_mask = ntohs(mask->n_proto); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + ethtype_key = ntohs(match.key->n_proto); + ethtype_mask = ntohs(match.mask->n_proto); } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { u16 eth_ip_type = ethtype_key & ethtype_mask; - struct flow_dissector_key_ip *mask; + struct flow_match_ip match; if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) { netdev_err(dev, "IP Key supported only with IPv4/v6"); return -EINVAL; } - mask = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IP, - cls->mask); - if (mask->ttl) { + flow_rule_match_ip(rule, &match); + if (match.mask->ttl) { netdev_warn(dev, "ttl match unsupported for offload"); return -EOPNOTSUPP; } @@ -328,7 +292,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, u32 mask, u32 offset, u8 htype) { switch (htype) { - case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: switch (offset) { case PEDIT_ETH_DMAC_31_0: fs->newdmac = 1; @@ -346,7 +310,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, offload_pedit(fs, val, mask, ETH_SMAC_47_16); } break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: switch (offset) { case PEDIT_IP4_SRC: offload_pedit(fs, val, mask, IP4_SRC); @@ -356,7 +320,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, } fs->nat_mode = NAT_MODE_ALL; break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: switch (offset) { case PEDIT_IP6_SRC_31_0: offload_pedit(fs, val, mask, IP6_SRC_31_0); @@ -384,7 +348,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, } fs->nat_mode = NAT_MODE_ALL; break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: switch (offset) { case PEDIT_TCP_SPORT_DPORT: if (~mask & PEDIT_TCP_UDP_SPORT_MASK) @@ -397,7 +361,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, } fs->nat_mode = NAT_MODE_ALL; break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: switch (offset) { case PEDIT_UDP_SPORT_DPORT: if (~mask & PEDIT_TCP_UDP_SPORT_MASK) @@ -416,56 +380,63 @@ static void cxgb4_process_flow_actions(struct net_device *in, struct tc_cls_flower_offload *cls, struct ch_filter_specification *fs) { - const struct tc_action *a; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls); + struct flow_action_entry *act; int i; - tcf_exts_for_each_action(i, a, cls->exts) { - if (is_tcf_gact_ok(a)) { + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_ACCEPT: fs->action = FILTER_PASS; - } else if (is_tcf_gact_shot(a)) { + break; + case FLOW_ACTION_DROP: fs->action = FILTER_DROP; - } else if (is_tcf_mirred_egress_redirect(a)) { - struct net_device *out = tcf_mirred_dev(a); + break; + case FLOW_ACTION_REDIRECT: { + struct net_device *out = act->dev; struct port_info *pi = netdev_priv(out); fs->action = FILTER_SWITCH; fs->eport = pi->port_id; - } else if (is_tcf_vlan(a)) { - u32 vlan_action = tcf_vlan_action(a); - u8 prio = tcf_vlan_push_prio(a); - u16 vid = tcf_vlan_push_vid(a); + } + break; + case FLOW_ACTION_VLAN_POP: + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_MANGLE: { + u8 prio = act->vlan.prio; + u16 vid = act->vlan.vid; u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid; - - switch (vlan_action) { - case TCA_VLAN_ACT_POP: + switch (act->id) { + case FLOW_ACTION_VLAN_POP: fs->newvlan |= VLAN_REMOVE; break; - case TCA_VLAN_ACT_PUSH: + case FLOW_ACTION_VLAN_PUSH: fs->newvlan |= VLAN_INSERT; fs->vlan = vlan_tci; break; - case TCA_VLAN_ACT_MODIFY: + case FLOW_ACTION_VLAN_MANGLE: fs->newvlan |= VLAN_REWRITE; fs->vlan = vlan_tci; break; default: break; } - } else if (is_tcf_pedit(a)) { + } + break; + case FLOW_ACTION_MANGLE: { u32 mask, val, offset; - int nkeys, i; u8 htype; - nkeys = tcf_pedit_nkeys(a); - for (i = 0; i < nkeys; i++) { - htype = tcf_pedit_htype(a, i); - mask = tcf_pedit_mask(a, i); - val = tcf_pedit_val(a, i); - offset = tcf_pedit_offset(a, i); + htype = act->mangle.htype; + mask = act->mangle.mask; + val = act->mangle.val; + offset = act->mangle.offset; - process_pedit_field(fs, val, mask, offset, - htype); + process_pedit_field(fs, val, mask, offset, htype); } + break; + default: + break; } } } @@ -484,101 +455,89 @@ static bool valid_l4_mask(u32 mask) } static bool valid_pedit_action(struct net_device *dev, - const struct tc_action *a) + const struct flow_action_entry *act) { u32 mask, offset; - u8 cmd, htype; - int nkeys, i; - - nkeys = tcf_pedit_nkeys(a); - for (i = 0; i < nkeys; i++) { - htype = tcf_pedit_htype(a, i); - cmd = tcf_pedit_cmd(a, i); - mask = tcf_pedit_mask(a, i); - offset = tcf_pedit_offset(a, i); - - if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) { - netdev_err(dev, "%s: Unsupported pedit cmd\n", + u8 htype; + + htype = act->mangle.htype; + mask = act->mangle.mask; + offset = act->mangle.offset; + + switch (htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + switch (offset) { + case PEDIT_ETH_DMAC_31_0: + case PEDIT_ETH_DMAC_47_32_SMAC_15_0: + case PEDIT_ETH_SMAC_47_16: + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", __func__); return false; } - - switch (htype) { - case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: - switch (offset) { - case PEDIT_ETH_DMAC_31_0: - case PEDIT_ETH_DMAC_47_32_SMAC_15_0: - case PEDIT_ETH_SMAC_47_16: - break; - default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); - return false; - } - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: - switch (offset) { - case PEDIT_IP4_SRC: - case PEDIT_IP4_DST: - break; - default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); - return false; - } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + switch (offset) { + case PEDIT_IP4_SRC: + case PEDIT_IP4_DST: break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: - switch (offset) { - case PEDIT_IP6_SRC_31_0: - case PEDIT_IP6_SRC_63_32: - case PEDIT_IP6_SRC_95_64: - case PEDIT_IP6_SRC_127_96: - case PEDIT_IP6_DST_31_0: - case PEDIT_IP6_DST_63_32: - case PEDIT_IP6_DST_95_64: - case PEDIT_IP6_DST_127_96: - break; - default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); - return false; - } + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return false; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + switch (offset) { + case PEDIT_IP6_SRC_31_0: + case PEDIT_IP6_SRC_63_32: + case PEDIT_IP6_SRC_95_64: + case PEDIT_IP6_SRC_127_96: + case PEDIT_IP6_DST_31_0: + case PEDIT_IP6_DST_63_32: + case PEDIT_IP6_DST_95_64: + case PEDIT_IP6_DST_127_96: break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: - switch (offset) { - case PEDIT_TCP_SPORT_DPORT: - if (!valid_l4_mask(~mask)) { - netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n", - __func__); - return false; - } - break; - default: - netdev_err(dev, "%s: Unsupported pedit field\n", + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return false; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + switch (offset) { + case PEDIT_TCP_SPORT_DPORT: + if (!valid_l4_mask(~mask)) { + netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n", __func__); return false; } break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: - switch (offset) { - case PEDIT_UDP_SPORT_DPORT: - if (!valid_l4_mask(~mask)) { - netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n", - __func__); - return false; - } - break; - default: - netdev_err(dev, "%s: Unsupported pedit field\n", + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return false; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + switch (offset) { + case PEDIT_UDP_SPORT_DPORT: + if (!valid_l4_mask(~mask)) { + netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n", __func__); return false; } break; default: - netdev_err(dev, "%s: Unsupported pedit type\n", + netdev_err(dev, "%s: Unsupported pedit field\n", __func__); return false; } + break; + default: + netdev_err(dev, "%s: Unsupported pedit type\n", __func__); + return false; } return true; } @@ -586,24 +545,26 @@ static bool valid_pedit_action(struct net_device *dev, static int cxgb4_validate_flow_actions(struct net_device *dev, struct tc_cls_flower_offload *cls) { - const struct tc_action *a; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls); + struct flow_action_entry *act; bool act_redir = false; bool act_pedit = false; bool act_vlan = false; int i; - tcf_exts_for_each_action(i, a, cls->exts) { - if (is_tcf_gact_ok(a)) { - /* Do nothing */ - } else if (is_tcf_gact_shot(a)) { + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_ACCEPT: + case FLOW_ACTION_DROP: /* Do nothing */ - } else if (is_tcf_mirred_egress_redirect(a)) { + break; + case FLOW_ACTION_REDIRECT: { struct adapter *adap = netdev2adap(dev); struct net_device *n_dev, *target_dev; unsigned int i; bool found = false; - target_dev = tcf_mirred_dev(a); + target_dev = act->dev; for_each_port(adap, i) { n_dev = adap->port[i]; if (target_dev == n_dev) { @@ -621,15 +582,18 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, return -EINVAL; } act_redir = true; - } else if (is_tcf_vlan(a)) { - u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); - u32 vlan_action = tcf_vlan_action(a); + } + break; + case FLOW_ACTION_VLAN_POP: + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_MANGLE: { + u16 proto = be16_to_cpu(act->vlan.proto); - switch (vlan_action) { - case TCA_VLAN_ACT_POP: + switch (act->id) { + case FLOW_ACTION_VLAN_POP: break; - case TCA_VLAN_ACT_PUSH: - case TCA_VLAN_ACT_MODIFY: + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_MANGLE: if (proto != ETH_P_8021Q) { netdev_err(dev, "%s: Unsupported vlan proto\n", __func__); @@ -642,13 +606,17 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, return -EOPNOTSUPP; } act_vlan = true; - } else if (is_tcf_pedit(a)) { - bool pedit_valid = valid_pedit_action(dev, a); + } + break; + case FLOW_ACTION_MANGLE: { + bool pedit_valid = valid_pedit_action(dev, act); if (!pedit_valid) return -EOPNOTSUPP; act_pedit = true; - } else { + } + break; + default: netdev_err(dev, "%s: Unsupported action\n", __func__); return -EOPNOTSUPP; } @@ -843,9 +811,9 @@ int cxgb4_tc_flower_stats(struct net_device *dev, if (ofld_stats->packet_count != packets) { if (ofld_stats->prev_packet_count != packets) ofld_stats->last_used = jiffies; - tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count, - packets - ofld_stats->packet_count, - ofld_stats->last_used); + flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count, + packets - ofld_stats->packet_count, + ofld_stats->last_used); ofld_stats->packet_count = packets; ofld_stats->byte_count = bytes; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index fc0bc6458e84..f18493fb32aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -80,9 +80,10 @@ * Max number of Tx descriptors we clean up at a time. Should be modest as * freeing skbs isn't cheap and it happens while holding locks. We just need * to free packets faster than they arrive, we eventually catch up and keep - * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. + * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should + * also match the CIDX Flush Threshold. */ -#define MAX_TX_RECLAIM 16 +#define MAX_TX_RECLAIM 32 /* * Max number of Rx buffers we replenish at a time. Again keep this modest, @@ -401,31 +402,52 @@ static inline int reclaimable(const struct sge_txq *q) } /** - * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors + * reclaim_completed_tx - reclaims completed TX Descriptors * @adap: the adapter * @q: the Tx queue to reclaim completed descriptors from + * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 * @unmap: whether the buffers should be unmapped for DMA * - * Reclaims Tx descriptors that the SGE has indicated it has processed, - * and frees the associated buffers if possible. Called with the Tx - * queue locked. + * Reclaims Tx Descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. If @max == -1, then + * we'll use a defaiult maximum. Called with the TX Queue locked. */ -inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, - bool unmap) +static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, + int maxreclaim, bool unmap) { - int avail = reclaimable(q); + int reclaim = reclaimable(q); - if (avail) { + if (reclaim) { /* * Limit the amount of clean up work we do at a time to keep * the Tx lock hold time O(1). */ - if (avail > MAX_TX_RECLAIM) - avail = MAX_TX_RECLAIM; + if (maxreclaim < 0) + maxreclaim = MAX_TX_RECLAIM; + if (reclaim > maxreclaim) + reclaim = maxreclaim; - free_tx_desc(adap, q, avail, unmap); - q->in_use -= avail; + free_tx_desc(adap, q, reclaim, unmap); + q->in_use -= reclaim; } + + return reclaim; +} + +/** + * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors + * @adap: the adapter + * @q: the Tx queue to reclaim completed descriptors from + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the Tx + * queue locked. + */ +void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, + bool unmap) +{ + (void)reclaim_completed_tx(adap, q, -1, unmap); } EXPORT_SYMBOL(cxgb4_reclaim_completed_tx); @@ -1288,6 +1310,44 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb, } /** + * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update + * @adap: the adapter + * @eq: the Ethernet TX Queue + * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 + * + * We're typically called here to update the state of an Ethernet TX + * Queue with respect to the hardware's progress in consuming the TX + * Work Requests that we've put on that Egress Queue. This happens + * when we get Egress Queue Update messages and also prophylactically + * in regular timer-based Ethernet TX Queue maintenance. + */ +int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, + int maxreclaim) +{ + struct sge_txq *q = &eq->q; + unsigned int reclaimed; + + if (!q->in_use || !__netif_tx_trylock(eq->txq)) + return 0; + + /* Reclaim pending completed TX Descriptors. */ + reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); + + /* If the TX Queue is currently stopped and there's now more than half + * the queue available, restart it. Otherwise bail out since the rest + * of what we want do here is with the possibility of shipping any + * currently buffered Coalesced TX Work Request. + */ + if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) { + netif_tx_wake_queue(eq->txq); + eq->q.restarts++; + } + + __netif_tx_unlock(eq->txq); + return reclaimed; +} + +/** * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue * @skb: the packet * @dev: the egress net device @@ -1357,7 +1417,7 @@ out_free: dev_kfree_skb_any(skb); } skb_tx_timestamp(skb); - cxgb4_reclaim_completed_tx(adap, &q->q, true); + reclaim_completed_tx(adap, &q->q, -1, true); cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; #ifdef CONFIG_CHELSIO_T4_FCOE @@ -1400,8 +1460,25 @@ out_free: dev_kfree_skb_any(skb); wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); if (unlikely(credits < ETHTXQ_STOP_THRES)) { + /* After we're done injecting the Work Request for this + * packet, we'll be below our "stop threshold" so stop the TX + * Queue now and schedule a request for an SGE Egress Queue + * Update message. The queue will get started later on when + * the firmware processes this Work Request and sends us an + * Egress Queue Status Update message indicating that space + * has opened up. + */ eth_txq_stop(q); - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + + /* If we're using the SGE Doorbell Queue Timer facility, we + * don't need to ask the Firmware to send us Egress Queue CIDX + * Updates: the Hardware will do this automatically. And + * since we send the Ingress Queue CIDX Updates to the + * corresponding Ethernet Response Queue, we'll get them very + * quickly. + */ + if (!q->dbqt) + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } wr = (void *)&q->q.desc[q->q.pidx]; @@ -1671,7 +1748,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, /* Take this opportunity to reclaim any TX Descriptors whose DMA * transfers have completed. */ - cxgb4_reclaim_completed_tx(adapter, &txq->q, true); + reclaim_completed_tx(adapter, &txq->q, -1, true); /* Calculate the number of flits and TX Descriptors we're going to * need along with how many TX Descriptors will be left over after @@ -1715,7 +1792,16 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, * has opened up. */ eth_txq_stop(txq); - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + + /* If we're using the SGE Doorbell Queue Timer facility, we + * don't need to ask the Firmware to send us Egress Queue CIDX + * Updates: the Hardware will do this automatically. And + * since we send the Ingress Queue CIDX Updates to the + * corresponding Ethernet Response Queue, we'll get them very + * quickly. + */ + if (!txq->dbqt) + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } /* Start filling in our Work Request. Note that we do _not_ handle @@ -2794,6 +2880,74 @@ static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb, } /** + * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages + * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue + * @rsp: Response Entry pointer into Response Queue + * @gl: Gather List pointer + * + * For adapters which support the SGE Doorbell Queue Timer facility, + * we configure the Ethernet TX Queues to send CIDX Updates to the + * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE + * messages. This adds a small load to PCIe Link RX bandwidth and, + * potentially, higher CPU Interrupt load, but allows us to respond + * much more quickly to the CIDX Updates. This is important for + * Upper Layer Software which isn't willing to have a large amount + * of TX Data outstanding before receiving DMA Completions. + */ +static void t4_tx_completion_handler(struct sge_rspq *rspq, + const __be64 *rsp, + const struct pkt_gl *gl) +{ + u8 opcode = ((const struct rss_header *)rsp)->opcode; + struct port_info *pi = netdev_priv(rspq->netdev); + struct adapter *adapter = rspq->adap; + struct sge *s = &adapter->sge; + struct sge_eth_txq *txq; + + /* skip RSS header */ + rsp++; + + /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. + */ + if (unlikely(opcode == CPL_FW4_MSG && + ((const struct cpl_fw4_msg *)rsp)->type == + FW_TYPE_RSSCPL)) { + rsp++; + opcode = ((const struct rss_header *)rsp)->opcode; + rsp++; + } + + if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) { + pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n", + __func__, opcode); + return; + } + + txq = &s->ethtxq[pi->first_qset + rspq->idx]; + + /* We've got the Hardware Consumer Index Update in the Egress Update + * message. If we're using the SGE Doorbell Queue Timer mechanism, + * these Egress Update messages will be our sole CIDX Updates we get + * since we don't want to chew up PCIe bandwidth for both Ingress + * Messages and Status Page writes. However, The code which manages + * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value + * stored in the Status Page at the end of the TX Queue. It's easiest + * to simply copy the CIDX Update value from the Egress Update message + * to the Status Page. Also note that no Endian issues need to be + * considered here since both are Big Endian and we're just copying + * bytes consistently ... + */ + if (txq->dbqt) { + struct cpl_sge_egr_update *egr; + + egr = (struct cpl_sge_egr_update *)rsp; + WRITE_ONCE(txq->q.stat->cidx, egr->cidx); + } + + t4_sge_eth_txq_egress_update(adapter, txq, -1); +} + +/** * t4_ethrx_handler - process an ingress ethernet packet * @q: the response queue that received the packet * @rsp: the response queue descriptor holding the RX_PKT message @@ -2816,6 +2970,15 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct port_info *pi; int ret = 0; + /* If we're looking at TX Queue CIDX Update, handle that separately + * and return. + */ + if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) || + (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) { + t4_tx_completion_handler(q, rsp, si); + return 0; + } + if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) return handle_trace_pkt(q->adap, si); @@ -3289,10 +3452,10 @@ done: static void sge_tx_timer_cb(struct timer_list *t) { - unsigned long m; - unsigned int i, budget; struct adapter *adap = from_timer(adap, t, sge.tx_timer); struct sge *s = &adap->sge; + unsigned long m, period; + unsigned int i, budget; for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) for (m = s->txq_maperr[i]; m; m &= m - 1) { @@ -3320,29 +3483,29 @@ static void sge_tx_timer_cb(struct timer_list *t) budget = MAX_TIMER_TX_RECLAIM; i = s->ethtxq_rover; do { - struct sge_eth_txq *q = &s->ethtxq[i]; - - if (q->q.in_use && - time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && - __netif_tx_trylock(q->txq)) { - int avail = reclaimable(&q->q); - - if (avail) { - if (avail > budget) - avail = budget; - - free_tx_desc(adap, &q->q, avail, true); - q->q.in_use -= avail; - budget -= avail; - } - __netif_tx_unlock(q->txq); - } + budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i], + budget); + if (!budget) + break; if (++i >= s->ethqsets) i = 0; - } while (budget && i != s->ethtxq_rover); + } while (i != s->ethtxq_rover); s->ethtxq_rover = i; - mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); + + if (budget == 0) { + /* If we found too many reclaimable packets schedule a timer + * in the near future to continue where we left off. + */ + period = 2; + } else { + /* We reclaimed all reclaimable TX Descriptors, so reschedule + * at the normal period. + */ + period = TX_QCHECK_PERIOD; + } + + mod_timer(&s->tx_timer, jiffies + period); } /** @@ -3421,7 +3584,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, : FW_IQ_IQTYPE_OFLD)); if (fl) { - enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); + unsigned int chip_ver = + CHELSIO_CHIP_VERSION(adap->params.chip); /* Allocate the ring for the hardware free list (with space * for its status page) along with the associated software @@ -3459,10 +3623,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, * the smaller 64-byte value there). */ c.fl0dcaen_to_fl0cidxfthresh = - htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ? + htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ? FETCHBURSTMIN_128B_X : - FETCHBURSTMIN_64B_X) | - FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? + FETCHBURSTMIN_64B_T6_X) | + FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ? FETCHBURSTMAX_512B_X : FETCHBURSTMAX_256B_X)); c.fl0size = htons(flsz); @@ -3584,14 +3748,24 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) adap->sge.egr_map[id - adap->sge.egr_start] = q; } +/** + * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue + * @adap: the adapter + * @txq: the SGE Ethernet TX Queue to initialize + * @dev: the Linux Network Device + * @netdevq: the corresponding Linux TX Queue + * @iqid: the Ingress Queue to which to deliver CIDX Update messages + * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers + */ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, struct net_device *dev, struct netdev_queue *netdevq, - unsigned int iqid) + unsigned int iqid, u8 dbqt) { - int ret, nentries; - struct fw_eq_eth_cmd c; - struct sge *s = &adap->sge; + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); struct port_info *pi = netdev_priv(dev); + struct sge *s = &adap->sge; + struct fw_eq_eth_cmd c; + int ret, nentries; /* Add status entries */ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); @@ -3610,19 +3784,47 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, FW_EQ_ETH_CMD_VFN_V(0)); c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); - c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | - FW_EQ_ETH_CMD_VIID_V(pi->viid)); + + /* For TX Ethernet Queues using the SGE Doorbell Queue Timer + * mechanism, we use Ingress Queue messages for Hardware Consumer + * Index Updates on the TX Queue. Otherwise we have the Hardware + * write the CIDX Updates into the Status Page at the end of the + * TX Queue. + */ + c.autoequiqe_to_viid = htonl((dbqt + ? FW_EQ_ETH_CMD_AUTOEQUIQE_F + : FW_EQ_ETH_CMD_AUTOEQUEQE_F) | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); + c.fetchszm_to_iqid = - htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | + htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt + ? HOSTFCMODE_INGRESS_QUEUE_X + : HOSTFCMODE_STATUS_PAGE_X) | FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); + + /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */ c.dcaen_to_eqsize = - htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | + htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 + ? FETCHBURSTMIN_64B_X + : FETCHBURSTMIN_64B_T6_X) | FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | FW_EQ_ETH_CMD_EQSIZE_V(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the + * currently configured Timer Index. THis can be changed later via an + * ethtool -C tx-usecs {Timer Val} command. Note that the SGE + * Doorbell Queue mode is currently automatically enabled in the + * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ... + */ + if (dbqt) + c.timeren_timerix = + cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F | + FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); if (ret) { kfree(txq->q.sdesc); @@ -3639,6 +3841,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, txq->txq = netdevq; txq->tso = txq->tx_cso = txq->vlan_ins = 0; txq->mapping_err = 0; + txq->dbqt = dbqt; + return 0; } @@ -3646,10 +3850,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, struct net_device *dev, unsigned int iqid, unsigned int cmplqid) { - int ret, nentries; - struct fw_eq_ctrl_cmd c; - struct sge *s = &adap->sge; + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); struct port_info *pi = netdev_priv(dev); + struct sge *s = &adap->sge; + struct fw_eq_ctrl_cmd c; + int ret, nentries; /* Add status entries */ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); @@ -3673,7 +3878,9 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid)); c.dcaen_to_eqsize = - htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | + htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 + ? FETCHBURSTMIN_64B_X + : FETCHBURSTMIN_64B_T6_X) | FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); @@ -3713,6 +3920,7 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, struct net_device *dev, unsigned int iqid, unsigned int uld_type) { + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); int ret, nentries; struct fw_eq_ofld_cmd c; struct sge *s = &adap->sge; @@ -3743,7 +3951,9 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid)); c.dcaen_to_eqsize = - htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | + htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 + ? FETCHBURSTMIN_64B_X + : FETCHBURSTMIN_64B_T6_X) | FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index c5e5466ee38b..49e4374d584c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4962,7 +4962,13 @@ static void pl_intr_handler(struct adapter *adap) */ int t4_slow_intr_handler(struct adapter *adapter) { - u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A); + /* There are rare cases where a PL_INT_CAUSE bit may end up getting + * set when the corresponding PL_INT_ENABLE bit isn't set. It's + * easiest just to mask that case here. + */ + u32 raw_cause = t4_read_reg(adapter, PL_INT_CAUSE_A); + u32 enable = t4_read_reg(adapter, PL_INT_ENABLE_A); + u32 cause = raw_cause & enable; if (!(cause & GLBL_INTR_MASK)) return 0; @@ -5014,7 +5020,7 @@ int t4_slow_intr_handler(struct adapter *adapter) ulptx_intr_handler(adapter); /* Clear the interrupts just processed for which we are the master. */ - t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK); + t4_write_reg(adapter, PL_INT_CAUSE_A, raw_cause & GLBL_INTR_MASK); (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */ return 1; } @@ -6713,6 +6719,47 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type) } /** + * t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values + * @adap - the adapter + * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table + * @dbqtimers: SGE Doorbell Queue Timer table + * + * Reads the SGE Doorbell Queue Timer values into the provided table. + * Returns 0 on success (Firmware and Hardware support this feature), + * an error on failure. + */ +int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers, + u16 *dbqtimers) +{ + int ret, dbqtimerix; + + ret = 0; + dbqtimerix = 0; + while (dbqtimerix < ndbqtimers) { + int nparams, param; + u32 params[7], vals[7]; + + nparams = ndbqtimers - dbqtimerix; + if (nparams > ARRAY_SIZE(params)) + nparams = ARRAY_SIZE(params); + + for (param = 0; param < nparams; param++) + params[param] = + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMER) | + FW_PARAMS_PARAM_Y_V(dbqtimerix + param)); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + nparams, params, vals); + if (ret) + break; + + for (param = 0; param < nparams; param++) + dbqtimers[dbqtimerix++] = vals[param]; + } + return ret; +} + +/** * t4_fw_hello - establish communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 361d5032c288..002fc62ea726 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -91,6 +91,7 @@ enum { SGE_CTXT_SIZE = 24, /* size of SGE context */ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ + SGE_NDBQTIMERS = 8, /* # of Doorbell Queue Timer values */ SGE_MAX_IQ_SIZE = 65520, SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index bf7325f6d553..0c5373462ced 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -218,6 +218,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */ CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */ + CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h index f6558cbfc54e..eb1aa82149db 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h @@ -71,12 +71,18 @@ #define FETCHBURSTMIN_64B_X 2 #define FETCHBURSTMIN_128B_X 3 +/* T6 and later use a single-bit encoding for FetchBurstMin */ +#define FETCHBURSTMIN_64B_T6_X 0 +#define FETCHBURSTMIN_128B_T6_X 1 + #define FETCHBURSTMAX_256B_X 2 #define FETCHBURSTMAX_512B_X 3 +#define HOSTFCMODE_INGRESS_QUEUE_X 1 #define HOSTFCMODE_STATUS_PAGE_X 2 #define CIDXFLUSHTHRESH_32_X 5 +#define CIDXFLUSHTHRESH_128_X 7 #define UPDATEDELIVERY_INTERRUPT_X 1 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 1d9b3e1e5f94..631f1663f4e0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1254,6 +1254,8 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21, FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24, FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27, + FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29, + FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A, }; /* @@ -1322,6 +1324,7 @@ enum fw_params_param_dmaq { FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13, + FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX = 0x15, FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20, }; @@ -1751,8 +1754,8 @@ struct fw_eq_eth_cmd { __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; - __be32 viid_pkd; - __be32 r8_lo; + __be32 autoequiqe_to_viid; + __be32 timeren_timerix; __be64 r9; }; @@ -1847,6 +1850,10 @@ struct fw_eq_eth_cmd { #define FW_EQ_ETH_CMD_EQSIZE_S 0 #define FW_EQ_ETH_CMD_EQSIZE_V(x) ((x) << FW_EQ_ETH_CMD_EQSIZE_S) +#define FW_EQ_ETH_CMD_AUTOEQUIQE_S 31 +#define FW_EQ_ETH_CMD_AUTOEQUIQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUIQE_S) +#define FW_EQ_ETH_CMD_AUTOEQUIQE_F FW_EQ_ETH_CMD_AUTOEQUIQE_V(1U) + #define FW_EQ_ETH_CMD_AUTOEQUEQE_S 30 #define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S) #define FW_EQ_ETH_CMD_AUTOEQUEQE_F FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U) @@ -1854,6 +1861,19 @@ struct fw_eq_eth_cmd { #define FW_EQ_ETH_CMD_VIID_S 16 #define FW_EQ_ETH_CMD_VIID_V(x) ((x) << FW_EQ_ETH_CMD_VIID_S) +#define FW_EQ_ETH_CMD_TIMEREN_S 3 +#define FW_EQ_ETH_CMD_TIMEREN_M 0x1 +#define FW_EQ_ETH_CMD_TIMEREN_V(x) ((x) << FW_EQ_ETH_CMD_TIMEREN_S) +#define FW_EQ_ETH_CMD_TIMEREN_G(x) \ + (((x) >> FW_EQ_ETH_CMD_TIMEREN_S) & FW_EQ_ETH_CMD_TIMEREN_M) +#define FW_EQ_ETH_CMD_TIMEREN_F FW_EQ_ETH_CMD_TIMEREN_V(1U) + +#define FW_EQ_ETH_CMD_TIMERIX_S 0 +#define FW_EQ_ETH_CMD_TIMERIX_M 0x7 +#define FW_EQ_ETH_CMD_TIMERIX_V(x) ((x) << FW_EQ_ETH_CMD_TIMERIX_S) +#define FW_EQ_ETH_CMD_TIMERIX_G(x) \ + (((x) >> FW_EQ_ETH_CMD_TIMERIX_S) & FW_EQ_ETH_CMD_TIMERIX_M) + struct fw_eq_ctrl_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index a844296135b4..9125ddd89dd1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,8 +36,8 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x14 -#define T4FW_VERSION_MICRO 0x08 +#define T4FW_VERSION_MINOR 0x16 +#define T4FW_VERSION_MICRO 0x09 #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -45,8 +45,8 @@ #define T4FW_MIN_VERSION_MICRO 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x14 -#define T5FW_VERSION_MICRO 0x08 +#define T5FW_VERSION_MINOR 0x16 +#define T5FW_VERSION_MICRO 0x09 #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -54,8 +54,8 @@ #define T5FW_MIN_VERSION_MICRO 0x00 #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x14 -#define T6FW_VERSION_MICRO 0x08 +#define T6FW_VERSION_MINOR 0x16 +#define T6FW_VERSION_MICRO 0x09 #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 84be3313fede..3300b69a42b3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1408,7 +1408,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_CR4_QSFP: SET_LMM(FIBRE); FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); - FW_CAPS_TO_LMM(SPEED_10G, 10000baseSR_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full); FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full); FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full); @@ -1419,6 +1419,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, break; } + if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) { + FW_CAPS_TO_LMM(FEC_RS, FEC_RS); + FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER); + } else { + SET_LMM(FEC_NONE); + } + FW_CAPS_TO_LMM(ANEG, Autoneg); FW_CAPS_TO_LMM(802_3_PAUSE, Pause); FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 1d534f0baa69..11d2ba0a2bf5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2268,7 +2268,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, cmd.iqaddr = cpu_to_be64(rspq->phys_addr); if (fl) { - enum chip_type chip = + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); /* * Allocate the ring for the hardware free list (with space @@ -2319,10 +2319,10 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, */ cmd.fl0dcaen_to_fl0cidxfthresh = cpu_to_be16( - FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ? - FETCHBURSTMIN_128B_X : - FETCHBURSTMIN_64B_X) | - FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? + FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 + ? FETCHBURSTMIN_128B_X + : FETCHBURSTMIN_64B_T6_X) | + FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ? FETCHBURSTMAX_512B_X : FETCHBURSTMAX_256B_X)); cmd.fl0size = cpu_to_be16(flsz); @@ -2411,10 +2411,11 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, struct net_device *dev, struct netdev_queue *devq, unsigned int iqid) { + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); + struct port_info *pi = netdev_priv(dev); + struct fw_eq_eth_cmd cmd, rpl; struct sge *s = &adapter->sge; int ret, nentries; - struct fw_eq_eth_cmd cmd, rpl; - struct port_info *pi = netdev_priv(dev); /* * Calculate the size of the hardware TX Queue (including the Status @@ -2448,17 +2449,19 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F | FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(cmd)); - cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F | - FW_EQ_ETH_CMD_VIID_V(pi->viid)); + cmd.autoequiqe_to_viid = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); cmd.fetchszm_to_iqid = cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) | FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) | FW_EQ_ETH_CMD_IQID_V(iqid)); cmd.dcaen_to_eqsize = - cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) | - FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) | + cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 + ? FETCHBURSTMIN_64B_X + : FETCHBURSTMIN_64B_T6_X) | + FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | FW_EQ_ETH_CMD_CIDXFTHRESH_V( - SGE_CIDXFLUSHTHRESH_32) | + CIDXFLUSHTHRESH_32_X) | FW_EQ_ETH_CMD_EQSIZE_V(nentries)); cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 60641e202534..9a7f70db20c7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, * csum is correct or is zero. */ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && - tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { + tcp_udp_csum_ok && outer_csum_ok && + (ipv4_csum_ok || ipv6)) { skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = encap; } diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 13430f75496c..f1a2da15dd0a 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -585,7 +585,7 @@ static void de_tx (struct de_private *de) netif_dbg(de, tx_done, de->dev, "tx done, slot %d\n", tx_tail); } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } next: diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index d8d423f22c4f..cfcdfee718b0 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -843,9 +843,9 @@ rio_free_tx (struct net_device *dev, int irq) desc_to_dma(&np->tx_ring[entry]), skb->len, PCI_DMA_TODEVICE); if (irq) - dev_kfree_skb_irq (skb); + dev_consume_skb_irq(skb); else - dev_kfree_skb (skb); + dev_kfree_skb(skb); np->tx_skbuff[entry] = NULL; entry = (entry + 1) % TX_RING_SIZE; diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 1a27176381fb..4a37a69764ce 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -1193,7 +1193,6 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) int handled = 0; int i; - do { int intr_status = ioread16(ioaddr + IntrStatus); iowrite16(intr_status, ioaddr + IntrStatus); @@ -1286,7 +1285,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[entry].frag[0].addr), skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq (np->tx_skbuff[entry]); + dev_consume_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; np->tx_ring[entry].frag[0].addr = 0; np->tx_ring[entry].frag[0].length = 0; @@ -1305,7 +1304,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[entry].frag[0].addr), skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq (np->tx_skbuff[entry]); + dev_consume_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; np->tx_ring[entry].frag[0].addr = 0; np->tx_ring[entry].frag[0].length = 0; diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index ae55da60ed0e..c24fd56a2c71 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -1531,7 +1531,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) /* Free the original skb. */ pci_unmap_single(np->pci_dev, np->cur_tx->buffer, np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(np->cur_tx->skbuff); + dev_consume_skb_irq(np->cur_tx->skbuff); np->cur_tx->skbuff = NULL; --np->really_tx_count; if (np->cur_tx->control & TXLD) { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 62497119c85f..bdee441bc3b7 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -501,7 +501,7 @@ static int dpaa_get_ts_info(struct net_device *net_dev, struct device_node *mac_node = dev->of_node; struct device_node *fman_node = NULL, *ptp_node = NULL; struct platform_device *ptp_dev = NULL; - struct qoriq_ptp *ptp = NULL; + struct ptp_qoriq *ptp = NULL; info->phc_index = -1; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 04925c731f0b..87777b09f5e0 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -86,16 +86,16 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv, for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { addr = dpaa2_sg_get_addr(&sgt[i]); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); - skb_free_frag(sg_vaddr); + free_pages((unsigned long)sg_vaddr, 0); if (dpaa2_sg_is_final(&sgt[i])) break; } free_buf: - skb_free_frag(vaddr); + free_pages((unsigned long)vaddr, 0); } /* Build a linear skb based on a single-buffer frame descriptor */ @@ -109,7 +109,7 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, ch->buf_count--; - skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); + skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); if (unlikely(!skb)) return NULL; @@ -144,19 +144,19 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, /* Get the address and length from the S/G entry */ sg_addr = dpaa2_sg_get_addr(sge); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); - dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); sg_length = dpaa2_sg_get_len(sge); if (i == 0) { /* We build the skb around the first data buffer */ - skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); + skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); if (unlikely(!skb)) { /* Free the first SG entry now, since we already * unmapped it and obtained the virtual address */ - skb_free_frag(sg_vaddr); + free_pages((unsigned long)sg_vaddr, 0); /* We still need to subtract the buffers used * by this FD from our software counter @@ -211,9 +211,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) for (i = 0; i < count; i++) { vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); - dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); - skb_free_frag(vaddr); + dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)vaddr, 0); } } @@ -264,9 +264,7 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd, fq = &priv->fq[queue_id]; for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { - err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, - priv->tx_qdid, 0, - fq->tx_qdbin, fd); + err = priv->enqueue(priv, fq, fd, 0); if (err != -EBUSY) break; } @@ -378,16 +376,16 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, return; } - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); skb = build_linear_skb(ch, fd, vaddr); } else if (fd_format == dpaa2_fd_sg) { WARN_ON(priv->xdp_prog); - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); skb = build_frag_skb(priv, ch, buf_data); - skb_free_frag(vaddr); + free_pages((unsigned long)vaddr, 0); percpu_extras->rx_sg_frames++; percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); } else { @@ -657,7 +655,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, * dpaa2_eth_tx(). */ static void free_tx_fd(const struct dpaa2_eth_priv *priv, - const struct dpaa2_fd *fd) + const struct dpaa2_fd *fd, bool in_napi) { struct device *dev = priv->net_dev->dev.parent; dma_addr_t fd_addr; @@ -712,7 +710,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv, skb_free_frag(skbh); /* Move on with skb release */ - dev_kfree_skb(skb); + napi_consume_skb(skb, in_napi); } static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) @@ -785,9 +783,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) queue_mapping = skb_get_queue_mapping(skb); fq = &priv->fq[queue_mapping]; for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { - err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, - priv->tx_qdid, 0, - fq->tx_qdbin, &fd); + err = priv->enqueue(priv, fq, &fd, 0); if (err != -EBUSY) break; } @@ -795,7 +791,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) if (unlikely(err < 0)) { percpu_stats->tx_errors++; /* Clean up everything, including freeing the skb */ - free_tx_fd(priv, &fd); + free_tx_fd(priv, &fd, false); } else { fd_len = dpaa2_fd_get_len(&fd); percpu_stats->tx_packets++; @@ -837,7 +833,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, /* Check frame errors in the FD field */ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; - free_tx_fd(priv, fd); + free_tx_fd(priv, fd, true); if (likely(!fd_errors)) return; @@ -903,7 +899,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, { struct device *dev = priv->net_dev->dev.parent; u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; - void *buf; + struct page *page; dma_addr_t addr; int i, err; @@ -911,14 +907,16 @@ static int add_bufs(struct dpaa2_eth_priv *priv, /* Allocate buffer visible to WRIOP + skb shared info + * alignment padding */ - buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv)); - if (unlikely(!buf)) + /* allocate one page for each Rx buffer. WRIOP sees + * the entire page except for a tailroom reserved for + * skb shared info + */ + page = dev_alloc_pages(0); + if (!page) goto err_alloc; - buf = PTR_ALIGN(buf, priv->rx_buf_align); - - addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, addr))) goto err_map; @@ -926,7 +924,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, /* tracing point */ trace_dpaa2_eth_buf_seed(priv->net_dev, - buf, dpaa2_eth_buf_raw_size(priv), + page, DPAA2_ETH_RX_BUF_RAW_SIZE, addr, DPAA2_ETH_RX_BUF_SIZE, bpid); } @@ -948,7 +946,7 @@ release_bufs: return i; err_map: - skb_free_frag(buf); + __free_pages(page, 0); err_alloc: /* If we managed to allocate at least some buffers, * release them to hardware @@ -2134,6 +2132,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpni_buffer_layout buf_layout = {0}; + u16 rx_buf_align; int err; /* We need to check for WRIOP version 1.0.0, but depending on the MC @@ -2142,9 +2141,9 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) */ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) - priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; + rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; else - priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; + rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; /* tx buffer */ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; @@ -2184,7 +2183,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) /* rx buffer */ buf_layout.pass_frame_status = true; buf_layout.pass_parser_result = true; - buf_layout.data_align = priv->rx_buf_align; + buf_layout.data_align = rx_buf_align; buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); buf_layout.private_data_size = 0; buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | @@ -2202,6 +2201,36 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) return 0; } +#define DPNI_ENQUEUE_FQID_VER_MAJOR 7 +#define DPNI_ENQUEUE_FQID_VER_MINOR 9 + +static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq, + struct dpaa2_fd *fd, u8 prio) +{ + return dpaa2_io_service_enqueue_qd(fq->channel->dpio, + priv->tx_qdid, prio, + fq->tx_qdbin, fd); +} + +static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq, + struct dpaa2_fd *fd, + u8 prio __always_unused) +{ + return dpaa2_io_service_enqueue_fq(fq->channel->dpio, + fq->tx_fqid, fd); +} + +static void set_enqueue_mode(struct dpaa2_eth_priv *priv) +{ + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, + DPNI_ENQUEUE_FQID_VER_MINOR) < 0) + priv->enqueue = dpaa2_eth_enqueue_qd; + else + priv->enqueue = dpaa2_eth_enqueue_fq; +} + /* Configure the DPNI object this interface is associated with */ static int setup_dpni(struct fsl_mc_device *ls_dev) { @@ -2255,6 +2284,8 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) if (err) goto close; + set_enqueue_mode(priv); + priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * dpaa2_eth_fs_count(priv), GFP_KERNEL); if (!priv->cls_rules) @@ -2339,6 +2370,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv, } fq->tx_qdbin = qid.qdbin; + fq->tx_fqid = qid.fqid; err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 31fe486ec25f..9510928b7cca 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -53,7 +53,8 @@ */ #define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) #define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) -#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE +#define DPAA2_ETH_REFILL_THRESH \ + (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD) /* Maximum number of buffers that can be acquired/released through a single * QBMan command @@ -63,9 +64,11 @@ /* Hardware requires alignment for ingress/egress buffer addresses */ #define DPAA2_ETH_TX_BUF_ALIGN 64 -#define DPAA2_ETH_RX_BUF_SIZE 2048 -#define DPAA2_ETH_SKB_SIZE \ - (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE +#define DPAA2_ETH_RX_BUF_TAILROOM \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#define DPAA2_ETH_RX_BUF_SIZE \ + (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM) /* Hardware annotation area in RX/TX buffers */ #define DPAA2_ETH_RX_HWA_SIZE 64 @@ -274,6 +277,7 @@ struct dpaa2_eth_priv; struct dpaa2_eth_fq { u32 fqid; u32 tx_qdbin; + u32 tx_fqid; u16 flowid; int target_cpu; u32 dq_frames; @@ -326,6 +330,9 @@ struct dpaa2_eth_priv { u8 num_fqs; struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; + int (*enqueue)(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq, + struct dpaa2_fd *fd, u8 prio); u8 num_channels; struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; @@ -343,7 +350,6 @@ struct dpaa2_eth_priv { bool rx_tstamp; /* Rx timestamping enabled */ u16 tx_qdid; - u16 rx_buf_align; struct fsl_mc_io *mc_io; /* Cores which have an affine DPIO/DPCON. * This is the cpu set on which Rx and Tx conf frames are processed @@ -418,15 +424,6 @@ enum dpaa2_eth_rx_dist { DPAA2_ETH_RX_DIST_CLS }; -/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around - * the buffer also needs space for its shared info struct, and we need - * to allocate enough to accommodate hardware alignment restrictions - */ -static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv) -{ - return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align; -} - static inline unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb) @@ -451,8 +448,7 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, */ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) { - return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - - DPAA2_ETH_RX_HWA_SIZE; + return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE; } int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig index f9dd26fbfc83..8429f5c1d810 100644 --- a/drivers/net/ethernet/freescale/enetc/Kconfig +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -17,3 +17,15 @@ config FSL_ENETC_VF virtual function (VF) devices enabled by the ENETC PF driver. If compiled as module (M), the module name is fsl-enetc-vf. + +config FSL_ENETC_PTP_CLOCK + tristate "ENETC PTP clock driver" + depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF) + default y + help + This driver adds support for using the ENETC 1588 timer + as a PTP clock. This clock is only useful if your PTP + programs are getting hardware time stamps on the PTP Ethernet + packets using the SO_TIMESTAMPING API. + + If compiled as module (M), the module name is fsl-enetc-ptp. diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile index 9529b01872ca..697660294dbc 100644 --- a/drivers/net/ethernet/freescale/enetc/Makefile +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -13,3 +13,6 @@ fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \ enetc_ethtool.o fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y) endif + +obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o +fsl-enetc-ptp-$(CONFIG_FSL_ENETC_PTP_CLOCK) += enetc_ptp.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index efa0b1a5ef4f..df8eb8882d92 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -4,8 +4,9 @@ #include <linux/bitops.h> /* ENETC device IDs */ -#define ENETC_DEV_ID_PF 0xe100 -#define ENETC_DEV_ID_VF 0xef00 +#define ENETC_DEV_ID_PF 0xe100 +#define ENETC_DEV_ID_VF 0xef00 +#define ENETC_DEV_ID_PTP 0xee02 /* ENETC register block BAR */ #define ENETC_BAR_REGS 0 diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c new file mode 100644 index 000000000000..8c1497e7d9c5 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/fsl/ptp_qoriq.h> + +#include "enetc.h" + +static struct ptp_clock_info enetc_ptp_caps = { + .owner = THIS_MODULE, + .name = "ENETC PTP clock", + .max_adj = 512000, + .n_alarm = 0, + .n_ext_ts = 2, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfine = ptp_qoriq_adjfine, + .adjtime = ptp_qoriq_adjtime, + .gettime64 = ptp_qoriq_gettime, + .settime64 = ptp_qoriq_settime, + .enable = ptp_qoriq_enable, +}; + +static int enetc_ptp_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct ptp_qoriq *ptp_qoriq; + void __iomem *base; + int err, len, n; + + if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { + dev_info(&pdev->dev, "device is disabled, skipping\n"); + return -ENODEV; + } + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "device enable failed\n"); + return err; + } + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_mem_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); + goto err_pci_mem_reg; + } + + pci_set_master(pdev); + + ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL); + if (!ptp_qoriq) { + err = -ENOMEM; + goto err_alloc_ptp; + } + + len = pci_resource_len(pdev, ENETC_BAR_REGS); + + base = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); + if (!base) { + err = -ENXIO; + dev_err(&pdev->dev, "ioremap() failed\n"); + goto err_ioremap; + } + + /* Allocate 1 interrupt */ + n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); + if (n != 1) { + err = -EPERM; + goto err_irq; + } + + ptp_qoriq->irq = pci_irq_vector(pdev, 0); + + err = request_irq(ptp_qoriq->irq, ptp_qoriq_isr, 0, DRIVER, ptp_qoriq); + if (err) { + dev_err(&pdev->dev, "request_irq() failed!\n"); + goto err_irq; + } + + ptp_qoriq->dev = &pdev->dev; + + err = ptp_qoriq_init(ptp_qoriq, base, &enetc_ptp_caps); + if (err) + goto err_no_clock; + + pci_set_drvdata(pdev, ptp_qoriq); + + return 0; + +err_no_clock: + free_irq(ptp_qoriq->irq, ptp_qoriq); +err_irq: + iounmap(base); +err_ioremap: + kfree(ptp_qoriq); +err_alloc_ptp: + pci_release_mem_regions(pdev); +err_pci_mem_reg: +err_dma: + pci_disable_device(pdev); + + return err; +} + +static void enetc_ptp_remove(struct pci_dev *pdev) +{ + struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev); + + ptp_qoriq_free(ptp_qoriq); + kfree(ptp_qoriq); + + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id enetc_ptp_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PTP) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_ptp_id_table); + +static struct pci_driver enetc_ptp_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_ptp_id_table, + .probe = enetc_ptp_probe, + .remove = enetc_ptp_remove, +}; +module_pci_driver(enetc_ptp_driver); + +MODULE_DESCRIPTION("ENETC PTP clock driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2370dc204202..697c2427f2b7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) +static __u32 fec_enet_register_version = 2; static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, @@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = { IEEE_R_FDXFC, IEEE_R_OCTETS_OK }; #else +static __u32 fec_enet_register_version = 1; static u32 fec_enet_register_offset[] = { FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, @@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev, u32 *buf = (u32 *)regbuf; u32 i, off; + regs->version = fec_enet_register_version; + memset(buf, 0, regs->len); for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index b90bab72efdb..c1968b3ecec8 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } spin_unlock(&priv->lock); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 241325c35cb4..27ed995f439a 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1492,7 +1492,7 @@ static int gfar_get_ts_info(struct net_device *dev, struct gfar_private *priv = netdev_priv(dev); struct platform_device *ptp_dev; struct device_node *ptp_node; - struct qoriq_ptp *ptp = NULL; + struct ptp_qoriq *ptp = NULL; info->phc_index = -1; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index c3d539e209ed..eb3e65e8868f 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) u16 i, j; u8 __iomem *bd; + netdev_reset_queue(ugeth->ndev); + ug_info = ugeth->ug_info; uf_info = &ug_info->uf_info; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 3b9e74be5fbd..ac55db065f16 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_dev = dev_get_drvdata(&pdev->dev); if (!dsaf_dev) { dev_err(&pdev->dev, "dsaf_dev is NULL\n"); + put_device(&pdev->dev); return -ENODEV; } @@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", dsaf_dev->ae_dev.name); + put_device(&pdev->dev); return -ENODEV; } @@ -3126,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); } + + put_device(&pdev->dev); + return 0; } EXPORT_SYMBOL(hns_dsaf_roce_reset); diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 53089cd980d9..9b6b7b45dde3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -42,6 +42,8 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ + + HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ }; /* below are per-VF mac-vlan subcodes */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 50011aafbae4..17ab4f4af6ad 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -32,6 +32,9 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, void hnae3_set_client_init_flag(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev, int inited) { + if (!client || !ae_dev) + return; + switch (client->type) { case HNAE3_CLIENT_KNIC: hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); @@ -109,6 +112,9 @@ int hnae3_register_client(struct hnae3_client *client) struct hnae3_ae_dev *ae_dev; int ret = 0; + if (!client) + return -ENODEV; + mutex_lock(&hnae3_common_lock); /* one system should only have one client for every type */ list_for_each_entry(client_tmp, &hnae3_client_list, node) { @@ -141,6 +147,9 @@ void hnae3_unregister_client(struct hnae3_client *client) { struct hnae3_ae_dev *ae_dev; + if (!client) + return; + mutex_lock(&hnae3_common_lock); /* un-initialize the client on every matched port */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { @@ -163,6 +172,9 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) struct hnae3_client *client; int ret = 0; + if (!ae_algo) + return; + mutex_lock(&hnae3_common_lock); list_add_tail(&ae_algo->node, &hnae3_ae_algo_list); @@ -173,8 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) if (!id) continue; - /* ae_dev init should set flag */ + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); + continue; + } ae_dev->ops = ae_algo->ops; + ret = ae_algo->ops->init_ae_dev(ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, @@ -182,6 +198,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) continue; } + /* ae_dev init should set flag */ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); /* check the client list for the match with this ae_dev type and @@ -209,6 +226,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) struct hnae3_ae_dev *ae_dev; struct hnae3_client *client; + if (!ae_algo) + return; + mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { @@ -245,6 +265,9 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) struct hnae3_client *client; int ret = 0; + if (!ae_dev) + return -ENODEV; + mutex_lock(&hnae3_common_lock); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); @@ -255,15 +278,13 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) if (!id) continue; - ae_dev->ops = ae_algo->ops; - - if (!ae_dev->ops) { - dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); ret = -EOPNOTSUPP; goto out_err; } + ae_dev->ops = ae_algo->ops; - /* ae_dev init should set flag */ ret = ae_dev->ops->init_ae_dev(ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, @@ -271,6 +292,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) goto out_err; } + /* ae_dev init should set flag */ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); break; } @@ -307,6 +329,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; + if (!ae_dev) + return; + mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 63f5f56bda94..d94c90a38563 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -621,12 +621,11 @@ static int hns3_get_link_ksettings(struct net_device *netdev, hns3_get_ksettings(h, cmd); break; case HNAE3_MEDIA_TYPE_COPPER: - if (!netdev->phydev) - return -EOPNOTSUPP; - cmd->base.port = PORT_TP; - phy_ethtool_ksettings_get(netdev->phydev, cmd); - + if (!netdev->phydev) + hns3_get_ksettings(h, cmd); + else + phy_ethtool_ksettings_get(netdev->phydev, cmd); break; default: diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 1dddfcba41a9..3a093a92eac5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -390,6 +390,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) return 0; } +static void hclge_cmd_uninit_regs(struct hclge_hw *hw) +{ + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); +} + static void hclge_destroy_queue(struct hclge_cmq_ring *ring) { spin_lock(&ring->lock); @@ -402,3 +416,15 @@ void hclge_destroy_cmd_queue(struct hclge_hw *hw) hclge_destroy_queue(&hw->cmq.csq); hclge_destroy_queue(&hw->cmq.crq); } + +void hclge_cmd_uninit(struct hclge_dev *hdev) +{ + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + hclge_cmd_uninit_regs(&hdev->hw); + spin_unlock(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + + hclge_destroy_cmd_queue(&hdev->hw); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index ac2cd219a9fb..bad21c89f6b7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -975,6 +975,6 @@ enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, struct hclge_desc *desc); -void hclge_destroy_cmd_queue(struct hclge_hw *hw); +void hclge_cmd_uninit(struct hclge_dev *hdev); int hclge_cmd_queue_init(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index d0f654123b9b..b9d363fa595a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -80,7 +80,7 @@ static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" }, { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" }, { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" }, { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" }, { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" }, { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" }, @@ -219,6 +219,12 @@ static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" }, { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" }, { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" }, + { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" }, + { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" }, + { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" }, + { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" }, + { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" }, { /* sentinel */ } }; @@ -277,6 +283,45 @@ static const struct hclge_hw_error hclge_ssu_com_err_int[] = { { /* sentinel */ } }; +#define HCLGE_SSU_MEM_ECC_ERR(x) \ + { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" } + +static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { + HCLGE_SSU_MEM_ECC_ERR(0), + HCLGE_SSU_MEM_ECC_ERR(1), + HCLGE_SSU_MEM_ECC_ERR(2), + HCLGE_SSU_MEM_ECC_ERR(3), + HCLGE_SSU_MEM_ECC_ERR(4), + HCLGE_SSU_MEM_ECC_ERR(5), + HCLGE_SSU_MEM_ECC_ERR(6), + HCLGE_SSU_MEM_ECC_ERR(7), + HCLGE_SSU_MEM_ECC_ERR(8), + HCLGE_SSU_MEM_ECC_ERR(9), + HCLGE_SSU_MEM_ECC_ERR(10), + HCLGE_SSU_MEM_ECC_ERR(11), + HCLGE_SSU_MEM_ECC_ERR(12), + HCLGE_SSU_MEM_ECC_ERR(13), + HCLGE_SSU_MEM_ECC_ERR(14), + HCLGE_SSU_MEM_ECC_ERR(15), + HCLGE_SSU_MEM_ECC_ERR(16), + HCLGE_SSU_MEM_ECC_ERR(17), + HCLGE_SSU_MEM_ECC_ERR(18), + HCLGE_SSU_MEM_ECC_ERR(19), + HCLGE_SSU_MEM_ECC_ERR(20), + HCLGE_SSU_MEM_ECC_ERR(21), + HCLGE_SSU_MEM_ECC_ERR(22), + HCLGE_SSU_MEM_ECC_ERR(23), + HCLGE_SSU_MEM_ECC_ERR(24), + HCLGE_SSU_MEM_ECC_ERR(25), + HCLGE_SSU_MEM_ECC_ERR(26), + HCLGE_SSU_MEM_ECC_ERR(27), + HCLGE_SSU_MEM_ECC_ERR(28), + HCLGE_SSU_MEM_ECC_ERR(29), + HCLGE_SSU_MEM_ECC_ERR(30), + HCLGE_SSU_MEM_ECC_ERR(31), + { /* sentinel */ } +}; + static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" }, @@ -835,13 +880,15 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, desc_data = (__le32 *)&desc[2]; status = le32_to_cpu(*(desc_data + 2)); if (status) { - dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n"); + hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", + &hclge_ssu_mem_ecc_err_int[0], status); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } status = le32_to_cpu(*(desc_data + 3)) & BIT(0); if (status) { - dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n"); + dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", + status); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } @@ -997,6 +1044,13 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", &hclge_igu_egu_tnl_int[0], status); + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; + if (status) + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", + &hclge_ppu_pf_abnormal_int[0], status); + /* clear all PF RAS errors */ hclge_cmd_reuse_desc(&desc[0], false); desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); @@ -1332,14 +1386,13 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, set_bit(HNAE3_GLOBAL_RESET, reset_requests); } - /* log PPU(RCB) errors */ + /* log PPU(RCB) MPF errors */ desc_data = (__le32 *)&desc[5]; status = le32_to_cpu(*(desc_data + 2)) & HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; if (status) { - dev_warn(dev, - "PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n", - status); + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], status); set_bit(HNAE3_CORE_RESET, reset_requests); } @@ -1386,7 +1439,7 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", &hclge_ppp_pf_abnormal_int[0], status); - /* PPU(RCB) PF errors */ + /* log PPU(RCB) PF errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK; if (status) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index 51a7d4eb066a..fc068280d391 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -45,8 +45,8 @@ #define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF #define HCLGE_NCSI_ERR_INT_EN 0x3 #define HCLGE_NCSI_ERR_INT_TYPE 0x9 -#define HCLGE_MAC_COMMON_ERR_INT_EN GENMASK(7, 0) -#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK GENMASK(7, 0) +#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF +#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) @@ -79,6 +79,7 @@ #define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0) #define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0) #define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28) +#define HCLGE_PPU_PF_INT_RAS_MASK 0x18 #define HCLGE_PPU_PF_INT_MSIX_MASK 0x27 #define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0) #define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 0a64c7f2dc6c..5c8f2e4d71c9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -839,37 +839,67 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, unsigned long *supported = hdev->hw.mac.supported; if (speed_ability & HCLGE_SUPPORT_1G_BIT) - set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_10G_BIT) - set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_25G_BIT) - set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_50G_BIT) - set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_100G_BIT) - set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + supported); - set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); - set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); +} + +static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, + u8 speed_ability) +{ + unsigned long *supported = hdev->hw.mac.supported; + + /* default to support all speed for GE port */ + if (!speed_ability) + speed_ability = HCLGE_SUPPORT_GE; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_100M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + supported); + } + + if (speed_ability & HCLGE_SUPPORT_10M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); + } + + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); } static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) { u8 media_type = hdev->hw.mac.media_type; - if (media_type != HNAE3_MEDIA_TYPE_FIBER) - return; - - hclge_parse_fiber_link_mode(hdev, speed_ability); + if (media_type == HNAE3_MEDIA_TYPE_FIBER) + hclge_parse_fiber_link_mode(hdev, speed_ability); + else if (media_type == HNAE3_MEDIA_TYPE_COPPER) + hclge_parse_copper_link_mode(hdev, speed_ability); } static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) @@ -1299,6 +1329,9 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) vport->back = hdev; vport->vport_id = i; vport->mps = HCLGE_MAC_DEFAULT_FRAME; + INIT_LIST_HEAD(&vport->vlan_list); + INIT_LIST_HEAD(&vport->uc_mac_list); + INIT_LIST_HEAD(&vport->mc_mac_list); if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -2599,7 +2632,7 @@ static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) return hclge_cmd_send(&hdev->hw, &desc, 1); } -int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) +static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) { int i; @@ -6044,6 +6077,103 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, return status; } +void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg; + struct list_head *list; + + if (!vport->vport_id) + return; + + mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL); + if (!mac_cfg) + return; + + mac_cfg->hd_tbl_status = true; + memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN); + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + list_add_tail(&mac_cfg->node, list); +} + +void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + bool is_write_tbl, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; + struct list_head *list; + bool uc_flag, mc_flag; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC; + mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) { + if (uc_flag && mac_cfg->hd_tbl_status) + hclge_rm_uc_addr_common(vport, mac_addr); + + if (mc_flag && mac_cfg->hd_tbl_status) + hclge_rm_mc_addr_common(vport, mac_addr); + + list_del(&mac_cfg->node); + kfree(mac_cfg); + break; + } + } +} + +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; + struct list_head *list; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status) + hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr); + + if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status) + hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr); + + mac_cfg->hd_tbl_status = false; + if (is_del_list) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + } +} + +void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) +{ + struct hclge_vport_mac_addr_cfg *mac, *tmp; + struct hclge_vport *vport; + int i; + + mutex_lock(&hdev->vport_cfg_mutex); + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) { + list_del(&mac->node); + kfree(mac); + } + + list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) { + list_del(&mac->node); + kfree(mac); + } + } + mutex_unlock(&hdev->vport_cfg_mutex); +} + static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, u16 cmdq_resp, u8 resp_code) { @@ -6617,6 +6747,84 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } +void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id) +{ + struct hclge_vport_vlan_cfg *vlan; + + /* vlan 0 is reserved */ + if (!vlan_id) + return; + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + return; + + vlan->hd_tbl_status = true; + vlan->vlan_id = vlan_id; + + list_add_tail(&vlan->node, &vport->vlan_list); +} + +void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + if (is_write_tbl && vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan_id, 0, + true); + + list_del(&vlan->node); + kfree(vlan); + break; + } + } +} + +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, 0, + true); + + vlan->hd_tbl_status = false; + if (is_del_list) { + list_del(&vlan->node); + kfree(vlan); + } + } +} + +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_vport *vport; + int i; + + mutex_lock(&hdev->vport_cfg_mutex); + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + list_del(&vlan->node); + kfree(vlan); + } + } + mutex_unlock(&hdev->vport_cfg_mutex); +} + int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -7299,6 +7507,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; mutex_init(&hdev->vport_lock); + mutex_init(&hdev->vport_cfg_mutex); ret = hclge_pci_init(hdev); if (ret) { @@ -7460,7 +7669,7 @@ err_msi_irq_uninit: err_msi_uninit: pci_free_irq_vectors(pdev); err_cmd_uninit: - hclge_destroy_cmd_queue(&hdev->hw); + hclge_cmd_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.io_base); pci_clear_master(pdev); @@ -7587,10 +7796,13 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) synchronize_irq(hdev->misc_vector.vector_irq); hclge_hw_error_set_state(hdev, false); - hclge_destroy_cmd_queue(&hdev->hw); + hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); + hclge_uninit_vport_mac_table(hdev); + hclge_uninit_vport_vlan_table(hdev); + mutex_destroy(&hdev->vport_cfg_mutex); ae_dev->priv = NULL; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index c939f4a7f5f0..abbc58ea4864 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -188,6 +188,10 @@ enum HLCGE_PORT_TYPE { #define HCLGE_SUPPORT_25G_BIT BIT(2) #define HCLGE_SUPPORT_50G_BIT BIT(3) #define HCLGE_SUPPORT_100G_BIT BIT(4) +#define HCLGE_SUPPORT_100M_BIT BIT(6) +#define HCLGE_SUPPORT_10M_BIT BIT(7) +#define HCLGE_SUPPORT_GE \ + (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, @@ -628,6 +632,23 @@ struct hclge_fd_ad_data { u16 rule_id; }; +struct hclge_vport_mac_addr_cfg { + struct list_head node; + int hd_tbl_status; + u8 mac_addr[ETH_ALEN]; +}; + +enum HCLGE_MAC_ADDR_TYPE { + HCLGE_MAC_ADDR_UC, + HCLGE_MAC_ADDR_MC +}; + +struct hclge_vport_vlan_cfg { + struct list_head node; + int hd_tbl_status; + u16 vlan_id; +}; + /* For each bit of TCAM entry, it uses a pair of 'x' and * 'y' to indicate which value to match, like below: * ---------------------------------- @@ -767,6 +788,8 @@ struct hclge_dev { /* unicast mac vlan space shared by PF and its VFs */ u16 share_umv_size; struct mutex umv_mutex; /* protect share_umv_size */ + + struct mutex vport_cfg_mutex; /* Protect stored vf table */ }; /* VPort level vlan tag configuration for TX direction */ @@ -834,6 +857,10 @@ struct hclge_vport { unsigned long state; unsigned long last_active_jiffies; u32 mps; /* Max packet size */ + + struct list_head uc_mac_list; /* Store VF unicast table */ + struct list_head mc_mac_list; /* Store VF multicast table */ + struct list_head vlan_list; /* Store VF vlan table */ }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -888,4 +915,17 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf); u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); int hclge_notify_client(struct hclge_dev *hdev, enum hnae3_reset_notify_type type); +void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + bool is_write_tbl, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_uninit_vport_mac_table(struct hclge_dev *hdev); +void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id); +void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl); +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 7e4a104582d6..9e0952c329bb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -224,12 +224,24 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, hclge_rm_uc_addr_common(vport, old_addr); status = hclge_add_uc_addr_common(vport, mac_addr); - if (status) + if (status) { hclge_add_uc_addr_common(vport, old_addr); + } else { + hclge_rm_vport_mac_table(vport, mac_addr, + false, HCLGE_MAC_ADDR_UC); + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_UC); + } } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) { status = hclge_add_uc_addr_common(vport, mac_addr); + if (!status) + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_UC); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { status = hclge_rm_uc_addr_common(vport, mac_addr); + if (!status) + hclge_rm_vport_mac_table(vport, mac_addr, + false, HCLGE_MAC_ADDR_UC); } else { dev_err(&hdev->pdev->dev, "failed to set unicast mac addr, unknown subcode %d\n", @@ -255,8 +267,14 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { status = hclge_add_mc_addr_common(vport, mac_addr); + if (!status) + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_MC); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { status = hclge_rm_mc_addr_common(vport, mac_addr); + if (!status) + hclge_rm_vport_mac_table(vport, mac_addr, + false, HCLGE_MAC_ADDR_MC); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %d\n", @@ -287,6 +305,9 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), vlan, is_kill); + if (!status) + is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false) + : hclge_add_vport_vlan_table(vport, vlan); } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { struct hnae3_handle *handle = &vport->nic; bool en = mbx_req->msg[2] ? true : false; @@ -585,6 +606,15 @@ void hclge_mbx_handler(struct hclge_dev *hdev) case HCLGE_MBX_GET_LINK_MODE: hclge_get_link_mode(vport, req); break; + case HCLGE_MBX_GET_VF_FLR_STATUS: + mutex_lock(&hdev->vport_cfg_mutex); + hclge_rm_vport_all_mac_table(vport, true, + HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(vport, true, + HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(vport, true); + mutex_unlock(&hdev->vport_cfg_mutex); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 84f28785ba28..48eda2c6fdae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -8,12 +8,6 @@ #include "hclge_main.h" #include "hclge_mdio.h" -#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ - SUPPORTED_TP | \ - PHY_10BT_FEATURES | \ - PHY_100BT_FEATURES | \ - SUPPORTED_1000baseT_Full) - enum hclge_mdio_c22_op_seq { HCLGE_MDIO_C22_WRITE = 1, HCLGE_MDIO_C22_READ = 2 @@ -217,16 +211,9 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle) return ret; } - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask); - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - mask); - linkmode_set_bit_array(phy_gbit_features_array, - ARRAY_SIZE(phy_gbit_features_array), - mask); + linkmode_copy(mask, hdev->hw.mac.supported); linkmode_and(phydev->supported, phydev->supported, mask); - phy_support_asym_pause(phydev); + linkmode_copy(phydev->advertising, phydev->supported); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 4e78e8812a04..9441b453d38d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -362,8 +362,28 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) return 0; } +static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) +{ + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); +} + void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) { + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); + clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + hclgevf_cmd_uninit_regs(&hdev->hw); + spin_unlock(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); } diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index 2f7ae118217f..1274ad24d6af 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -1194,7 +1194,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) dma_unmap_single(dev->dev.parent, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); tx_cmd->cmd.command = 0; /* Mark free */ break; diff --git a/drivers/net/ethernet/ibm/emac/Kconfig b/drivers/net/ethernet/ibm/emac/Kconfig index 90d49191beb3..eacf7e141fdc 100644 --- a/drivers/net/ethernet/ibm/emac/Kconfig +++ b/drivers/net/ethernet/ibm/emac/Kconfig @@ -28,18 +28,6 @@ config IBM_EMAC_RX_COPY_THRESHOLD depends on IBM_EMAC default "256" -config IBM_EMAC_RX_SKB_HEADROOM - int "Additional RX skb headroom (bytes)" - depends on IBM_EMAC - default "0" - help - Additional receive skb headroom. Note, that driver - will always reserve at least 2 bytes to make IP header - aligned, so usually there is no need to add any additional - headroom. - - If unsure, set to 0. - config IBM_EMAC_DEBUG bool "Debugging" depends on IBM_EMAC diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 209255495bc9..3c2a5759844a 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1071,7 +1071,9 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) /* Second pass, allocate new skbs */ for (i = 0; i < NUM_RX_BUFF; ++i) { - struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC); + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size); if (!skb) { ret = -ENOMEM; goto oom; @@ -1080,10 +1082,10 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) BUG_ON(!dev->rx_skb[i]); dev_kfree_skb(dev->rx_skb[i]); - skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2); dev->rx_desc[i].data_ptr = - dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size, - DMA_FROM_DEVICE) + 2; + dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN, + rx_sync_size, DMA_FROM_DEVICE) + + NET_IP_ALIGN; dev->rx_skb[i] = skb; } skip: @@ -1174,20 +1176,18 @@ static void emac_clean_rx_ring(struct emac_instance *dev) } } -static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot, - gfp_t flags) +static int +__emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot) { - struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags); if (unlikely(!skb)) return -ENOMEM; dev->rx_skb[slot] = skb; dev->rx_desc[slot].data_len = 0; - skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2); dev->rx_desc[slot].data_ptr = - dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size, - DMA_FROM_DEVICE) + 2; + dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN, + dev->rx_sync_size, DMA_FROM_DEVICE) + NET_IP_ALIGN; wmb(); dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY | (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); @@ -1195,6 +1195,27 @@ static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot, return 0; } +static int +emac_alloc_rx_skb(struct emac_instance *dev, int slot) +{ + struct sk_buff *skb; + + skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size, + GFP_KERNEL); + + return __emac_prepare_rx_skb(skb, dev, slot); +} + +static int +emac_alloc_rx_skb_napi(struct emac_instance *dev, int slot) +{ + struct sk_buff *skb; + + skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size); + + return __emac_prepare_rx_skb(skb, dev, slot); +} + static void emac_print_link_status(struct emac_instance *dev) { if (netif_carrier_ok(dev->ndev)) @@ -1225,7 +1246,7 @@ static int emac_open(struct net_device *ndev) /* Allocate RX ring */ for (i = 0; i < NUM_RX_BUFF; ++i) - if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) { + if (emac_alloc_rx_skb(dev, i)) { printk(KERN_ERR "%s: failed to allocate RX ring\n", ndev->name); goto oom; @@ -1660,8 +1681,9 @@ static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot, DBG2(dev, "recycle %d %d" NL, slot, len); if (len) - dma_map_single(&dev->ofdev->dev, skb->data - 2, - EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE); + dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN, + SKB_DATA_ALIGN(len + NET_IP_ALIGN), + DMA_FROM_DEVICE); dev->rx_desc[slot].data_len = 0; wmb(); @@ -1713,7 +1735,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot) int len = dev->rx_desc[slot].data_len; int tot_len = dev->rx_sg_skb->len + len; - if (unlikely(tot_len + 2 > dev->rx_skb_size)) { + if (unlikely(tot_len + NET_IP_ALIGN > dev->rx_skb_size)) { ++dev->estats.rx_dropped_mtu; dev_kfree_skb(dev->rx_sg_skb); dev->rx_sg_skb = NULL; @@ -1769,16 +1791,18 @@ static int emac_poll_rx(void *param, int budget) } if (len && len < EMAC_RX_COPY_THRESH) { - struct sk_buff *copy_skb = - alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC); + struct sk_buff *copy_skb; + + copy_skb = napi_alloc_skb(&dev->mal->napi, len); if (unlikely(!copy_skb)) goto oom; - skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2); - memcpy(copy_skb->data - 2, skb->data - 2, len + 2); + memcpy(copy_skb->data - NET_IP_ALIGN, + skb->data - NET_IP_ALIGN, + len + NET_IP_ALIGN); emac_recycle_rx_skb(dev, slot, len); skb = copy_skb; - } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) + } else if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) goto oom; skb_put(skb, len); @@ -1799,7 +1823,7 @@ static int emac_poll_rx(void *param, int budget) sg: if (ctrl & MAL_RX_CTRL_FIRST) { BUG_ON(dev->rx_sg_skb); - if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) { + if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) { DBG(dev, "rx OOM %d" NL, slot); ++dev->estats.rx_dropped_oom; emac_recycle_rx_skb(dev, slot, 0); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 84caa4a3fc52..187689cd8212 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -68,22 +68,18 @@ static inline int emac_rx_size(int mtu) return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD); } -#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment()) - -#define EMAC_RX_SKB_HEADROOM \ - EMAC_DMA_ALIGN(CONFIG_IBM_EMAC_RX_SKB_HEADROOM) - /* Size of RX skb for the given MTU */ static inline int emac_rx_skb_size(int mtu) { int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu)); - return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM; + + return SKB_DATA_ALIGN(size + NET_IP_ALIGN) + NET_SKB_PAD; } /* RX DMA sync size */ static inline int emac_rx_sync_size(int mtu) { - return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2); + return SKB_DATA_ALIGN(emac_rx_size(mtu) + NET_IP_ALIGN); } /* Driver statistcs is split into two parts to make it more cache friendly: diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 257bd59bc9c6..f86d55657959 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -696,11 +696,16 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) ret_val = e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data); - if (ret_val) - return ret_val; - kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; - e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + e_dbg("Error disabling far-end loopback\n"); + } else { + e_dbg("Error disabling far-end loopback\n"); + } ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) @@ -754,11 +759,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) return ret_val; /* Disable IBIST slave mode (far-end loopback) */ - e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - &kum_reg_data); - kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; - e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - kum_reg_data); + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + e_dbg("Error disabling far-end loopback\n"); + } else { + e_dbg("Error disabling far-end loopback\n"); + } /* Set the transmit descriptor write-back policy */ reg_data = er32(TXDCTL(0)); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 189f231075c2..736fa51878f8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5309,8 +5309,13 @@ static void e1000_watchdog_task(struct work_struct *work) /* 8000ES2LAN requires a Rx packet buffer work-around * on link down event; reset the controller to flush * the Rx packet buffer. + * + * If the link is lost the controller stops DMA, but + * if there is queued Tx work it cannot be done. So + * reset the controller to flush the Tx packet buffers. */ - if (adapter->flags & FLAG_RX_NEEDS_RESTART) + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || + e1000_desc_unused(tx_ring) + 1 < tx_ring->count) adapter->flags |= FLAG_RESTART_NOW; else pm_schedule_suspend(netdev->dev.parent, @@ -5333,14 +5338,6 @@ link_up: adapter->gotc_old = adapter->stats.gotc; spin_unlock(&adapter->stats64_lock); - /* If the link is lost the controller stops DMA, but - * if there is queued Tx work it cannot be done. So - * reset the controller to flush the Tx packet buffers. - */ - if (!netif_carrier_ok(netdev) && - (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) - adapter->flags |= FLAG_RESTART_NOW; - /* If reset is necessary, do it outside of interrupt context. */ if (adapter->flags & FLAG_RESTART_NOW) { schedule_work(&adapter->reset_task); @@ -7351,6 +7348,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e1000_print_device_info(adapter); + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); + if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 6fd15a734324..5a0419421511 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1603,14 +1603,12 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, { struct fm10k_q_vector *q_vector; struct fm10k_ring *ring; - int ring_count, size; + int ring_count; ring_count = txr_count + rxr_count; - size = sizeof(struct fm10k_q_vector) + - (sizeof(struct fm10k_ring) * ring_count); /* allocate q_vector and rings */ - q_vector = kzalloc(size, GFP_KERNEL); + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL); if (!q_vector) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 8f0a99b6a537..cb4d02629b86 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1148,7 +1148,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, * @results: Pointer array to message, results[0] is pointer to message * @mbx: Pointer to mailbox information structure * - * This function is a default handler for MSI-X requests from the VF. The + * This function is a default handler for MSI-X requests from the VF. The * assumption is that in this case it is acceptable to just directly * hand off the message from the VF to the underlying shared code. **/ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 5c6731b97059..5e74a5127849 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7169,11 +7169,13 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, struct tc_cls_flower_offload *f, struct i40e_cloud_filter *filter) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; struct i40e_pf *pf = vsi->back; u8 field_flags = 0; - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | @@ -7183,143 +7185,109 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, BIT(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", - f->dissector->used_keys); + dissector->used_keys); return -EOPNOTSUPP; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; - struct flow_dissector_key_keyid *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->mask); - - if (mask->keyid != 0) + flow_rule_match_enc_keyid(rule, &match); + if (match.mask->keyid != 0) field_flags |= I40E_CLOUD_FIELD_TEN_ID; - filter->tenant_id = be32_to_cpu(key->keyid); + filter->tenant_id = be32_to_cpu(match.key->keyid); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - n_proto_key = ntohs(key->n_proto); - n_proto_mask = ntohs(mask->n_proto); + flow_rule_match_basic(rule, &match); + n_proto_key = ntohs(match.key->n_proto); + n_proto_mask = ntohs(match.mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; n_proto_mask = 0; } filter->n_proto = n_proto_key & n_proto_mask; - filter->ip_proto = key->ip_proto; + filter->ip_proto = match.key->ip_proto; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; - struct flow_dissector_key_eth_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->mask); + flow_rule_match_eth_addrs(rule, &match); /* use is_broadcast and is_zero to check for all 0xf or 0 */ - if (!is_zero_ether_addr(mask->dst)) { - if (is_broadcast_ether_addr(mask->dst)) { + if (!is_zero_ether_addr(match.mask->dst)) { + if (is_broadcast_ether_addr(match.mask->dst)) { field_flags |= I40E_CLOUD_FIELD_OMAC; } else { dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", - mask->dst); + match.mask->dst); return I40E_ERR_CONFIG; } } - if (!is_zero_ether_addr(mask->src)) { - if (is_broadcast_ether_addr(mask->src)) { + if (!is_zero_ether_addr(match.mask->src)) { + if (is_broadcast_ether_addr(match.mask->src)) { field_flags |= I40E_CLOUD_FIELD_IMAC; } else { dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", - mask->src); + match.mask->src); return I40E_ERR_CONFIG; } } - ether_addr_copy(filter->dst_mac, key->dst); - ether_addr_copy(filter->src_mac, key->src); + ether_addr_copy(filter->dst_mac, match.key->dst); + ether_addr_copy(filter->src_mac, match.key->src); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->key); - struct flow_dissector_key_vlan *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; - if (mask->vlan_id) { - if (mask->vlan_id == VLAN_VID_MASK) { + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_id) { + if (match.mask->vlan_id == VLAN_VID_MASK) { field_flags |= I40E_CLOUD_FIELD_IVLAN; } else { dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", - mask->vlan_id); + match.mask->vlan_id); return I40E_ERR_CONFIG; } } - filter->vlan_id = cpu_to_be16(key->vlan_id); + filter->vlan_id = cpu_to_be16(match.key->vlan_id); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; - addr_type = key->addr_type; + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); - - if (mask->dst) { - if (mask->dst == cpu_to_be32(0xffffffff)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + if (match.mask->dst) { + if (match.mask->dst == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", - &mask->dst); + &match.mask->dst); return I40E_ERR_CONFIG; } } - if (mask->src) { - if (mask->src == cpu_to_be32(0xffffffff)) { + if (match.mask->src) { + if (match.mask->src == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", - &mask->src); + &match.mask->src); return I40E_ERR_CONFIG; } } @@ -7328,70 +7296,60 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); return I40E_ERR_CONFIG; } - filter->dst_ipv4 = key->dst; - filter->src_ipv4 = key->src; + filter->dst_ipv4 = match.key->dst; + filter->src_ipv4 = match.key->src; } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); /* src and dest IPV6 address should not be LOOPBACK * (0:0:0:0:0:0:0:1), which can be represented as ::1 */ - if (ipv6_addr_loopback(&key->dst) || - ipv6_addr_loopback(&key->src)) { + if (ipv6_addr_loopback(&match.key->dst) || + ipv6_addr_loopback(&match.key->src)) { dev_err(&pf->pdev->dev, "Bad ipv6, addr is LOOPBACK\n"); return I40E_ERR_CONFIG; } - if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) + if (!ipv6_addr_any(&match.mask->dst) || + !ipv6_addr_any(&match.mask->src)) field_flags |= I40E_CLOUD_FIELD_IIP; - memcpy(&filter->src_ipv6, &key->src.s6_addr32, + memcpy(&filter->src_ipv6, &match.key->src.s6_addr32, sizeof(filter->src_ipv6)); - memcpy(&filter->dst_ipv6, &key->dst.s6_addr32, + memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32, sizeof(filter->dst_ipv6)); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - struct flow_dissector_key_ports *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; - if (mask->src) { - if (mask->src == cpu_to_be16(0xffff)) { + flow_rule_match_ports(rule, &match); + if (match.mask->src) { + if (match.mask->src == cpu_to_be16(0xffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", - be16_to_cpu(mask->src)); + be16_to_cpu(match.mask->src)); return I40E_ERR_CONFIG; } } - if (mask->dst) { - if (mask->dst == cpu_to_be16(0xffff)) { + if (match.mask->dst) { + if (match.mask->dst == cpu_to_be16(0xffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", - be16_to_cpu(mask->dst)); + be16_to_cpu(match.mask->dst)); return I40E_ERR_CONFIG; } } - filter->dst_port = key->dst; - filter->src_port = key->src; + filter->dst_port = match.key->dst; + filter->src_port = match.key->src; switch (filter->ip_proto) { case IPPROTO_TCP: @@ -12170,9 +12128,6 @@ static int i40e_xdp(struct net_device *dev, case XDP_QUERY_PROG: xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; - case XDP_QUERY_XSK_UMEM: - return i40e_xsk_umem_query(vsi, &xdp->xsk.umem, - xdp->xsk.queue_id); case XDP_SETUP_XSK_UMEM: return i40e_xsk_umem_setup(vsi, xdp->xsk.umem, xdp->xsk.queue_id); diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 96d849460d9b..e190a2c2b9ff 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -155,34 +155,6 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) } /** - * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM - * @vsi: Current VSI - * @umem: UMEM associated to the ring, if any - * @qid: Rx ring to associate UMEM to - * - * This function will store, if any, the UMEM associated to certain ring. - * - * Returns 0 on success, <0 on failure - **/ -int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, - u16 qid) -{ - struct net_device *netdev = vsi->netdev; - struct xdp_umem *queried_umem; - - if (vsi->type != I40E_VSI_MAIN) - return -EINVAL; - - queried_umem = xdp_get_umem_from_qid(netdev, qid); - - if (!queried_umem) - return -EINVAL; - - *umem = queried_umem; - return 0; -} - -/** * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid * @vsi: Current VSI * @umem: UMEM to enable/associate to a ring, or NULL to disable diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index 9038c5d5cf08..8cc0a2e7d9a2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -10,8 +10,6 @@ struct zero_copy_allocator; int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair); -int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, - u16 qid); int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, u16 qid); void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 9f2b7b7adf6b..4569d69a2b55 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -2439,6 +2439,8 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, struct tc_cls_flower_offload *f, struct iavf_cloud_filter *filter) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0; u16 n_proto_key = 0; u8 field_flags = 0; @@ -2447,7 +2449,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, int i = 0; struct virtchnl_filter *vf = &filter->f; - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | @@ -2457,32 +2459,24 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, BIT(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", - f->dissector->used_keys); + dissector->used_keys); return -EOPNOTSUPP; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; - if (mask->keyid != 0) + flow_rule_match_enc_keyid(rule, &match); + if (match.mask->keyid != 0) field_flags |= IAVF_CLOUD_FIELD_TEN_ID; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); - n_proto_key = ntohs(key->n_proto); - n_proto_mask = ntohs(mask->n_proto); + flow_rule_match_basic(rule, &match); + n_proto_key = ntohs(match.key->n_proto); + n_proto_mask = ntohs(match.mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; @@ -2496,122 +2490,103 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, vf->flow_type = VIRTCHNL_TCP_V6_FLOW; } - if (key->ip_proto != IPPROTO_TCP) { + if (match.key->ip_proto != IPPROTO_TCP) { dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); return -EINVAL; } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); - struct flow_dissector_key_eth_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->mask); /* use is_broadcast and is_zero to check for all 0xf or 0 */ - if (!is_zero_ether_addr(mask->dst)) { - if (is_broadcast_ether_addr(mask->dst)) { + if (!is_zero_ether_addr(match.mask->dst)) { + if (is_broadcast_ether_addr(match.mask->dst)) { field_flags |= IAVF_CLOUD_FIELD_OMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", - mask->dst); + match.mask->dst); return I40E_ERR_CONFIG; } } - if (!is_zero_ether_addr(mask->src)) { - if (is_broadcast_ether_addr(mask->src)) { + if (!is_zero_ether_addr(match.mask->src)) { + if (is_broadcast_ether_addr(match.mask->src)) { field_flags |= IAVF_CLOUD_FIELD_IMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", - mask->src); + match.mask->src); return I40E_ERR_CONFIG; } } - if (!is_zero_ether_addr(key->dst)) - if (is_valid_ether_addr(key->dst) || - is_multicast_ether_addr(key->dst)) { + if (!is_zero_ether_addr(match.key->dst)) + if (is_valid_ether_addr(match.key->dst) || + is_multicast_ether_addr(match.key->dst)) { /* set the mask if a valid dst_mac address */ for (i = 0; i < ETH_ALEN; i++) vf->mask.tcp_spec.dst_mac[i] |= 0xff; ether_addr_copy(vf->data.tcp_spec.dst_mac, - key->dst); + match.key->dst); } - if (!is_zero_ether_addr(key->src)) - if (is_valid_ether_addr(key->src) || - is_multicast_ether_addr(key->src)) { + if (!is_zero_ether_addr(match.key->src)) + if (is_valid_ether_addr(match.key->src) || + is_multicast_ether_addr(match.key->src)) { /* set the mask if a valid dst_mac address */ for (i = 0; i < ETH_ALEN; i++) vf->mask.tcp_spec.src_mac[i] |= 0xff; ether_addr_copy(vf->data.tcp_spec.src_mac, - key->src); + match.key->src); } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->key); - struct flow_dissector_key_vlan *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; - if (mask->vlan_id) { - if (mask->vlan_id == VLAN_VID_MASK) { + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_id) { + if (match.mask->vlan_id == VLAN_VID_MASK) { field_flags |= IAVF_CLOUD_FIELD_IVLAN; } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", - mask->vlan_id); + match.mask->vlan_id); return I40E_ERR_CONFIG; } } vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); - vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id); + vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; - addr_type = key->addr_type; + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); - - if (mask->dst) { - if (mask->dst == cpu_to_be32(0xffffffff)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + if (match.mask->dst) { + if (match.mask->dst == cpu_to_be32(0xffffffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", - be32_to_cpu(mask->dst)); + be32_to_cpu(match.mask->dst)); return I40E_ERR_CONFIG; } } - if (mask->src) { - if (mask->src == cpu_to_be32(0xffffffff)) { + if (match.mask->src) { + if (match.mask->src == cpu_to_be32(0xffffffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", - be32_to_cpu(mask->dst)); + be32_to_cpu(match.mask->dst)); return I40E_ERR_CONFIG; } } @@ -2620,28 +2595,23 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); return I40E_ERR_CONFIG; } - if (key->dst) { + if (match.key->dst) { vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); - vf->data.tcp_spec.dst_ip[0] = key->dst; + vf->data.tcp_spec.dst_ip[0] = match.key->dst; } - if (key->src) { + if (match.key->src) { vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); - vf->data.tcp_spec.src_ip[0] = key->src; + vf->data.tcp_spec.src_ip[0] = match.key->src; } } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); /* validate mask, make sure it is not IPV6_ADDR_ANY */ - if (ipv6_addr_any(&mask->dst)) { + if (ipv6_addr_any(&match.mask->dst)) { dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", IPV6_ADDR_ANY); return I40E_ERR_CONFIG; @@ -2650,61 +2620,56 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, /* src and dest IPv6 address should not be LOOPBACK * (0:0:0:0:0:0:0:1) which can be represented as ::1 */ - if (ipv6_addr_loopback(&key->dst) || - ipv6_addr_loopback(&key->src)) { + if (ipv6_addr_loopback(&match.key->dst) || + ipv6_addr_loopback(&match.key->src)) { dev_err(&adapter->pdev->dev, "ipv6 addr should not be loopback\n"); return I40E_ERR_CONFIG; } - if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) + if (!ipv6_addr_any(&match.mask->dst) || + !ipv6_addr_any(&match.mask->src)) field_flags |= IAVF_CLOUD_FIELD_IIP; for (i = 0; i < 4; i++) vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); - memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32, + memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, sizeof(vf->data.tcp_spec.dst_ip)); for (i = 0; i < 4; i++) vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); - memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32, + memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, sizeof(vf->data.tcp_spec.src_ip)); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - struct flow_dissector_key_ports *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); - - if (mask->src) { - if (mask->src == cpu_to_be16(0xffff)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + if (match.mask->src) { + if (match.mask->src == cpu_to_be16(0xffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", - be16_to_cpu(mask->src)); + be16_to_cpu(match.mask->src)); return I40E_ERR_CONFIG; } } - if (mask->dst) { - if (mask->dst == cpu_to_be16(0xffff)) { + if (match.mask->dst) { + if (match.mask->dst == cpu_to_be16(0xffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", - be16_to_cpu(mask->dst)); + be16_to_cpu(match.mask->dst)); return I40E_ERR_CONFIG; } } - if (key->dst) { + if (match.key->dst) { vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); - vf->data.tcp_spec.dst_port = key->dst; + vf->data.tcp_spec.dst_port = match.key->dst; } - if (key->src) { + if (match.key->src) { vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); - vf->data.tcp_spec.src_port = key->src; + vf->data.tcp_spec.src_port = match.key->src; } } vf->field_flags = field_flags; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index dfa357b1a9d6..69b230c53fed 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -39,7 +39,7 @@ #include "igb.h" #define MAJ 5 -#define MIN 4 +#define MIN 6 #define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" @@ -1189,15 +1189,15 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, { struct igb_q_vector *q_vector; struct igb_ring *ring; - int ring_count, size; + int ring_count; + size_t size; /* igb only supports 1 Tx and/or 1 Rx queue per vector */ if (txr_count > 1 || rxr_count > 1) return -ENOMEM; ring_count = txr_count + rxr_count; - size = sizeof(struct igb_q_vector) + - (sizeof(struct igb_ring) * ring_count); + size = struct_size(q_vector, ring, ring_count); /* allocate q_vector and rings */ q_vector = adapter->q_vector[v_idx]; @@ -2581,9 +2581,11 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter, int traffic_class, struct igb_nfc_filter *input) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; struct netlink_ext_ack *extack = f->common.extack; - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | @@ -2593,78 +2595,60 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter, return -EOPNOTSUPP; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key, *mask; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->mask); - - if (!is_zero_ether_addr(mask->dst)) { - if (!is_broadcast_ether_addr(mask->dst)) { + flow_rule_match_eth_addrs(rule, &match); + if (!is_zero_ether_addr(match.mask->dst)) { + if (!is_broadcast_ether_addr(match.mask->dst)) { NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); return -EINVAL; } input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR; - ether_addr_copy(input->filter.dst_addr, key->dst); + ether_addr_copy(input->filter.dst_addr, match.key->dst); } - if (!is_zero_ether_addr(mask->src)) { - if (!is_broadcast_ether_addr(mask->src)) { + if (!is_zero_ether_addr(match.mask->src)) { + if (!is_broadcast_ether_addr(match.mask->src)) { NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); return -EINVAL; } input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR; - ether_addr_copy(input->filter.src_addr, key->src); + ether_addr_copy(input->filter.src_addr, match.key->src); } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - if (mask->n_proto) { - if (mask->n_proto != ETHER_TYPE_FULL_MASK) { + flow_rule_match_basic(rule, &match); + if (match.mask->n_proto) { + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); return -EINVAL; } input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; - input->filter.etype = key->n_proto; + input->filter.etype = match.key->n_proto; } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; - if (mask->vlan_priority) { - if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_priority) { + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); return -EINVAL; } input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; - input->filter.vlan_tci = key->vlan_priority; + input->filter.vlan_tci = match.key->vlan_priority; } } diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile index 4387f6ba8e67..88c6f88baac5 100644 --- a/drivers/net/ethernet/intel/igc/Makefile +++ b/drivers/net/ethernet/intel/igc/Makefile @@ -7,4 +7,5 @@ obj-$(CONFIG_IGC) += igc.o -igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o +igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ +igc_ethtool.o diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index b1039dd3dd13..80faccc34cda 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -13,19 +13,43 @@ #include "igc_hw.h" -/* main */ +/* forward declaration */ +void igc_set_ethtool_ops(struct net_device *); + +struct igc_adapter; +struct igc_ring; + +void igc_up(struct igc_adapter *adapter); +void igc_down(struct igc_adapter *adapter); +int igc_setup_tx_resources(struct igc_ring *ring); +int igc_setup_rx_resources(struct igc_ring *ring); +void igc_free_tx_resources(struct igc_ring *ring); +void igc_free_rx_resources(struct igc_ring *ring); +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues); +int igc_reinit_queues(struct igc_adapter *adapter); +bool igc_has_link(struct igc_adapter *adapter); +void igc_reset(struct igc_adapter *adapter); +int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); + extern char igc_driver_name[]; extern char igc_driver_version[]; +#define IGC_REGS_LEN 740 +#define IGC_RETA_SIZE 128 + /* Interrupt defines */ #define IGC_START_ITR 648 /* ~6000 ints/sec */ #define IGC_FLAG_HAS_MSI BIT(0) -#define IGC_FLAG_QUEUE_PAIRS BIT(4) +#define IGC_FLAG_QUEUE_PAIRS BIT(3) +#define IGC_FLAG_DMAC BIT(4) #define IGC_FLAG_NEED_LINK_UPDATE BIT(9) #define IGC_FLAG_MEDIA_RESET BIT(10) #define IGC_FLAG_MAS_ENABLE BIT(12) #define IGC_FLAG_HAS_MSIX BIT(13) #define IGC_FLAG_VLAN_PROMISC BIT(15) +#define IGC_FLAG_RX_LEGACY BIT(16) #define IGC_START_ITR 648 /* ~6000 ints/sec */ #define IGC_4K_ITR 980 @@ -60,6 +84,7 @@ extern char igc_driver_version[]; #define IGC_RXBUFFER_2048 2048 #define IGC_RXBUFFER_3072 3072 +#define AUTO_ALL_MODES 0 #define IGC_RX_HDR_LEN IGC_RXBUFFER_256 /* RX and TX descriptor control thresholds. @@ -340,6 +365,8 @@ struct igc_adapter { struct igc_mac_addr *mac_table; + u8 rss_indir_tbl[IGC_RETA_SIZE]; + unsigned long link_check_timeout; struct igc_info ei; }; @@ -418,6 +445,9 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) return 0; } +/* forward declaration */ +void igc_reinit_locked(struct igc_adapter *); + #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) #define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index df40af759542..51a8b8769c67 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -54,22 +54,6 @@ out: } /** - * igc_check_for_link_base - Check for link - * @hw: pointer to the HW structure - * - * If sgmii is enabled, then use the pcs register to determine link, otherwise - * use the generic interface for determining link. - */ -static s32 igc_check_for_link_base(struct igc_hw *hw) -{ - s32 ret_val = 0; - - ret_val = igc_check_for_copper_link(hw); - - return ret_val; -} - -/** * igc_reset_hw_base - Reset hardware * @hw: pointer to the HW structure * @@ -124,22 +108,6 @@ static s32 igc_reset_hw_base(struct igc_hw *hw) } /** - * igc_get_phy_id_base - Retrieve PHY addr and id - * @hw: pointer to the HW structure - * - * Retrieves the PHY address and ID for both PHY's which do and do not use - * sgmi interface. - */ -static s32 igc_get_phy_id_base(struct igc_hw *hw) -{ - s32 ret_val = 0; - - ret_val = igc_get_phy_id(hw); - - return ret_val; -} - -/** * igc_init_nvm_params_base - Init NVM func ptrs. * @hw: pointer to the HW structure */ @@ -163,6 +131,7 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw) if (size > 15) size = 15; + nvm->type = igc_nvm_eeprom_spi; nvm->word_size = BIT(size); nvm->opcode_bits = 8; nvm->delay_usec = 1; @@ -261,11 +230,11 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) goto out; } - ret_val = igc_get_phy_id_base(hw); + ret_val = igc_get_phy_id(hw); if (ret_val) return ret_val; - igc_check_for_link_base(hw); + igc_check_for_copper_link(hw); /* Verify phy id and set remaining function pointers */ switch (phy->id) { @@ -350,26 +319,6 @@ static void igc_release_phy_base(struct igc_hw *hw) } /** - * igc_get_link_up_info_base - Get link speed/duplex info - * @hw: pointer to the HW structure - * @speed: stores the current speed - * @duplex: stores the current duplex - * - * This is a wrapper function, if using the serial gigabit media independent - * interface, use PCS to retrieve the link speed and duplex information. - * Otherwise, use the generic function to get the link speed and duplex info. - */ -static s32 igc_get_link_up_info_base(struct igc_hw *hw, u16 *speed, - u16 *duplex) -{ - s32 ret_val; - - ret_val = igc_get_speed_and_duplex_copper(hw, speed, duplex); - - return ret_val; -} - -/** * igc_init_hw_base - Initialize hardware * @hw: pointer to the HW structure * @@ -408,19 +357,6 @@ static s32 igc_init_hw_base(struct igc_hw *hw) } /** - * igc_read_mac_addr_base - Read device MAC address - * @hw: pointer to the HW structure - */ -static s32 igc_read_mac_addr_base(struct igc_hw *hw) -{ - s32 ret_val = 0; - - ret_val = igc_read_mac_addr(hw); - - return ret_val; -} - -/** * igc_power_down_phy_copper_base - Remove link during PHY power down * @hw: pointer to the HW structure * @@ -512,10 +448,10 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw) static struct igc_mac_operations igc_mac_ops_base = { .init_hw = igc_init_hw_base, - .check_for_link = igc_check_for_link_base, + .check_for_link = igc_check_for_copper_link, .rar_set = igc_rar_set, - .read_mac_addr = igc_read_mac_addr_base, - .get_speed_and_duplex = igc_get_link_up_info_base, + .read_mac_addr = igc_read_mac_addr, + .get_speed_and_duplex = igc_get_speed_and_duplex_copper, }; static const struct igc_phy_operations igc_phy_ops_base = { diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index 35588fa7b8c5..76d4991d7284 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -36,28 +36,6 @@ union igc_adv_tx_desc { #define IGC_RAR_ENTRIES 16 -struct igc_adv_data_desc { - __le64 buffer_addr; /* Address of the descriptor's data buffer */ - union { - u32 data; - struct { - u32 datalen:16; /* Data buffer length */ - u32 rsvd:4; - u32 dtyp:4; /* Descriptor type */ - u32 dcmd:8; /* Descriptor command */ - } config; - } lower; - union { - u32 data; - struct { - u32 status:4; /* Descriptor status */ - u32 idx:4; - u32 popts:6; /* Packet Options */ - u32 paylen:18; /* Payload length */ - } options; - } upper; -}; - /* Receive Descriptor - Advanced */ union igc_adv_rx_desc { struct { @@ -90,9 +68,6 @@ union igc_adv_rx_desc { } wb; /* writeback */ }; -/* Adv Transmit Descriptor Config Masks */ -#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ - /* Additional Transmit Descriptor Control definitions */ #define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 8740754ea1fd..7d1bdcd1225a 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -4,6 +4,10 @@ #ifndef _IGC_DEFINES_H_ #define _IGC_DEFINES_H_ +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + #define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ /* PCI Bus Info */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c new file mode 100644 index 000000000000..eff37a6c0afa --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -0,0 +1,1032 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +/* ethtool support for igc */ +#include <linux/pm_runtime.h> + +#include "igc.h" + +static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) + +static void igc_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, igc_driver_version, sizeof(drvinfo->version)); + + /* add fw_version here */ + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; +} + +static int igc_get_regs_len(struct net_device *netdev) +{ + return IGC_REGS_LEN * sizeof(u32); +} + +static void igc_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGC_REGS_LEN * sizeof(u32)); + + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(IGC_CTRL); + regs_buff[1] = rd32(IGC_STATUS); + regs_buff[2] = rd32(IGC_CTRL_EXT); + regs_buff[3] = rd32(IGC_MDIC); + regs_buff[4] = rd32(IGC_CONNSW); + + /* NVM Register */ + regs_buff[5] = rd32(IGC_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[6] = rd32(IGC_EICS); + regs_buff[7] = rd32(IGC_EICS); + regs_buff[8] = rd32(IGC_EIMS); + regs_buff[9] = rd32(IGC_EIMC); + regs_buff[10] = rd32(IGC_EIAC); + regs_buff[11] = rd32(IGC_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[12] = rd32(IGC_ICS); + regs_buff[13] = rd32(IGC_ICS); + regs_buff[14] = rd32(IGC_IMS); + regs_buff[15] = rd32(IGC_IMC); + regs_buff[16] = rd32(IGC_IAC); + regs_buff[17] = rd32(IGC_IAM); + + /* Flow Control */ + regs_buff[18] = rd32(IGC_FCAL); + regs_buff[19] = rd32(IGC_FCAH); + regs_buff[20] = rd32(IGC_FCTTV); + regs_buff[21] = rd32(IGC_FCRTL); + regs_buff[22] = rd32(IGC_FCRTH); + regs_buff[23] = rd32(IGC_FCRTV); + + /* Receive */ + regs_buff[24] = rd32(IGC_RCTL); + regs_buff[25] = rd32(IGC_RXCSUM); + regs_buff[26] = rd32(IGC_RLPML); + regs_buff[27] = rd32(IGC_RFCTL); + + /* Transmit */ + regs_buff[28] = rd32(IGC_TCTL); + regs_buff[29] = rd32(IGC_TIPG); + + /* Wake Up */ + + /* MAC */ + + /* Statistics */ + regs_buff[30] = adapter->stats.crcerrs; + regs_buff[31] = adapter->stats.algnerrc; + regs_buff[32] = adapter->stats.symerrs; + regs_buff[33] = adapter->stats.rxerrc; + regs_buff[34] = adapter->stats.mpc; + regs_buff[35] = adapter->stats.scc; + regs_buff[36] = adapter->stats.ecol; + regs_buff[37] = adapter->stats.mcc; + regs_buff[38] = adapter->stats.latecol; + regs_buff[39] = adapter->stats.colc; + regs_buff[40] = adapter->stats.dc; + regs_buff[41] = adapter->stats.tncrs; + regs_buff[42] = adapter->stats.sec; + regs_buff[43] = adapter->stats.htdpmc; + regs_buff[44] = adapter->stats.rlec; + regs_buff[45] = adapter->stats.xonrxc; + regs_buff[46] = adapter->stats.xontxc; + regs_buff[47] = adapter->stats.xoffrxc; + regs_buff[48] = adapter->stats.xofftxc; + regs_buff[49] = adapter->stats.fcruc; + regs_buff[50] = adapter->stats.prc64; + regs_buff[51] = adapter->stats.prc127; + regs_buff[52] = adapter->stats.prc255; + regs_buff[53] = adapter->stats.prc511; + regs_buff[54] = adapter->stats.prc1023; + regs_buff[55] = adapter->stats.prc1522; + regs_buff[56] = adapter->stats.gprc; + regs_buff[57] = adapter->stats.bprc; + regs_buff[58] = adapter->stats.mprc; + regs_buff[59] = adapter->stats.gptc; + regs_buff[60] = adapter->stats.gorc; + regs_buff[61] = adapter->stats.gotc; + regs_buff[62] = adapter->stats.rnbc; + regs_buff[63] = adapter->stats.ruc; + regs_buff[64] = adapter->stats.rfc; + regs_buff[65] = adapter->stats.roc; + regs_buff[66] = adapter->stats.rjc; + regs_buff[67] = adapter->stats.mgprc; + regs_buff[68] = adapter->stats.mgpdc; + regs_buff[69] = adapter->stats.mgptc; + regs_buff[70] = adapter->stats.tor; + regs_buff[71] = adapter->stats.tot; + regs_buff[72] = adapter->stats.tpr; + regs_buff[73] = adapter->stats.tpt; + regs_buff[74] = adapter->stats.ptc64; + regs_buff[75] = adapter->stats.ptc127; + regs_buff[76] = adapter->stats.ptc255; + regs_buff[77] = adapter->stats.ptc511; + regs_buff[78] = adapter->stats.ptc1023; + regs_buff[79] = adapter->stats.ptc1522; + regs_buff[80] = adapter->stats.mptc; + regs_buff[81] = adapter->stats.bptc; + regs_buff[82] = adapter->stats.tsctc; + regs_buff[83] = adapter->stats.iac; + regs_buff[84] = adapter->stats.rpthc; + regs_buff[85] = adapter->stats.hgptc; + regs_buff[86] = adapter->stats.hgorc; + regs_buff[87] = adapter->stats.hgotc; + regs_buff[88] = adapter->stats.lenerrs; + regs_buff[89] = adapter->stats.scvpc; + regs_buff[90] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[99 + i] = rd32(IGC_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[103 + i] = rd32(IGC_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[107 + i] = rd32(IGC_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[111 + i] = rd32(IGC_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[115 + i] = rd32(IGC_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[123 + i] = rd32(IGC_EITR(i)); + for (i = 0; i < 16; i++) + regs_buff[139 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[145 + i] = rd32(IGC_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(IGC_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[152 + i] = rd32(IGC_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[156 + i] = rd32(IGC_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[160 + i] = rd32(IGC_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[164 + i] = rd32(IGC_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); +} + +static u32 igc_get_msglevel(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void igc_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int igc_nway_reset(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + return 0; +} + +static u32 igc_get_link(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igc_has_link(adapter); +} + +static int igc_get_eeprom_len(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.nvm.word_size * 2; +} + +static int igc_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int first_word, last_word; + u16 *eeprom_buff; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == igc_nvm_eeprom_spi) { + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igc_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int max_len, first_word, last_word, ret_val = 0; + u16 *eeprom_buff; + void *ptr; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (hw->mac.type >= igc_i225 && + !igc_get_flash_presence_i225(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + /* check if need: igc_set_fw_version(adapter); */ + kfree(eeprom_buff); + return ret_val; +} + +static void igc_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGC_MAX_RXD; + ring->tx_max_pending = IGC_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int igc_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_ring *temp_ring; + u16 new_rx_count, new_tx_count; + int i, err = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igc_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_tx_count; + err = igc_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igc_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_rx_count; + err = igc_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igc_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igc_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGC_RESETTING, &adapter->state); + return err; +} + +static void igc_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == igc_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igc_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = igc_fc_default; + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == igc_media_type_copper) ? + igc_force_mac_fc(hw) : igc_setup_link(hw)); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + return retval; +} + +static int igc_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igc_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (ec->rx_max_coalesced_frames || + ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_max_coalesced_frames || + ec->tx_coalesce_usecs_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_rx_coalesce || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_coalesce_usecs_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_coalesce_usecs_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return -ENOTSUPP; + + if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->rx_coalesce_usecs > 3 && + ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->rx_coalesce_usecs == 2) + return -EINVAL; + + if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->tx_coalesce_usecs > 3 && + ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->tx_coalesce_usecs == 2) + return -EINVAL; + + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGC_FLAG_DMAC) + adapter->flags &= ~IGC_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGC_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +void igc_write_rss_indir_tbl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 reg = IGC_RETA(0); + u32 shift = 0; + int i = 0; + + while (i < IGC_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static u32 igc_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGC_RETA_SIZE; +} + +static int igc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGC_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +static int igc_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 num_queues; + int i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + /* Verify user input. */ + for (i = 0; i < IGC_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + for (i = 0; i < IGC_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igc_write_rss_indir_tbl(adapter); + + return 0; +} + +static unsigned int igc_max_channels(struct igc_adapter *adapter) +{ + return igc_get_max_rss_queues(adapter); +} + +static void igc_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igc_max_channels(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igc_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igc_max_channels(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igc_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igc_reinit_queues(adapter); + } + + return 0; +} + +static u32 igc_get_priv_flags(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGC_FLAG_RX_LEGACY) + priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igc_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGC_FLAG_RX_LEGACY; + if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) + flags |= IGC_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + } + + return 0; +} + +static int igc_ethtool_begin(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igc_ethtool_complete(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_put(&adapter->pdev->dev); +} + +static int igc_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 status; + u32 speed; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* supported link modes */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); + + /* twisted pair */ + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + + /* advertising link modes */ + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + } + + switch (hw->fc.requested_mode) { + case igc_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case igc_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case igc_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + } + + status = rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both + * 1 Gbps and 2.5 Gbps link modes. + * An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + speed = SPEED_2500; + hw_dbg("2500 Mbs, "); + } else { + speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + if ((status & IGC_STATUS_FD) || + hw->phy.media_type != igc_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if (hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == igc_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int igc_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 advertising; + + /* When adapter in resetting mode, autoneg/speed/duplex + * cannot be changed + */ + if (igc_check_reset_block(hw)) { + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when reset is active.\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && + cmd->base.autoneg != AUTONEG_ENABLE) { + dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + hw->phy.autoneg_advertised = advertising; + if (adapter->fc_autoneg) + hw->fc.requested_mode = igc_fc_default; + } else { + /* calling this overrides forced MDI setting */ + dev_info(&adapter->pdev->dev, + "Force mode currently not supported\n"); + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +static const struct ethtool_ops igc_ethtool_ops = { + .get_drvinfo = igc_get_drvinfo, + .get_regs_len = igc_get_regs_len, + .get_regs = igc_get_regs, + .get_msglevel = igc_get_msglevel, + .set_msglevel = igc_set_msglevel, + .nway_reset = igc_nway_reset, + .get_link = igc_get_link, + .get_eeprom_len = igc_get_eeprom_len, + .get_eeprom = igc_get_eeprom, + .set_eeprom = igc_set_eeprom, + .get_ringparam = igc_get_ringparam, + .set_ringparam = igc_set_ringparam, + .get_pauseparam = igc_get_pauseparam, + .set_pauseparam = igc_set_pauseparam, + .get_coalesce = igc_get_coalesce, + .set_coalesce = igc_set_coalesce, + .get_rxfh_indir_size = igc_get_rxfh_indir_size, + .get_rxfh = igc_get_rxfh, + .set_rxfh = igc_set_rxfh, + .get_channels = igc_get_channels, + .set_channels = igc_set_channels, + .get_priv_flags = igc_get_priv_flags, + .set_priv_flags = igc_set_priv_flags, + .begin = igc_ethtool_begin, + .complete = igc_ethtool_complete, + .get_link_ksettings = igc_get_link_ksettings, + .set_link_ksettings = igc_set_link_ksettings, +}; + +void igc_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igc_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index c50414f48f0d..7c88b7bd4799 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -55,6 +55,7 @@ enum igc_media_type { enum igc_nvm_type { igc_nvm_unknown = 0, + igc_nvm_eeprom_spi, igc_nvm_flash_hw, igc_nvm_invm, }; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index f20183037fb2..87a11879bf2d 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -12,6 +12,8 @@ #define DRV_VERSION "0.0.1-k" #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + static int debug = -1; MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); @@ -66,7 +68,7 @@ enum latency_range { latency_invalid = 255 }; -static void igc_reset(struct igc_adapter *adapter) +void igc_reset(struct igc_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct igc_hw *hw = &adapter->hw; @@ -150,7 +152,7 @@ static void igc_get_hw_control(struct igc_adapter *adapter) * * Free all transmit software resources */ -static void igc_free_tx_resources(struct igc_ring *tx_ring) +void igc_free_tx_resources(struct igc_ring *tx_ring) { igc_clean_tx_ring(tx_ring); @@ -261,7 +263,7 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter) * * Return 0 on success, negative on failure */ -static int igc_setup_tx_resources(struct igc_ring *tx_ring) +int igc_setup_tx_resources(struct igc_ring *tx_ring) { struct device *dev = tx_ring->dev; int size = 0; @@ -381,7 +383,7 @@ static void igc_clean_all_rx_rings(struct igc_adapter *adapter) * * Free all receive software resources */ -static void igc_free_rx_resources(struct igc_ring *rx_ring) +void igc_free_rx_resources(struct igc_ring *rx_ring) { igc_clean_rx_ring(rx_ring); @@ -418,7 +420,7 @@ static void igc_free_all_rx_resources(struct igc_adapter *adapter) * * Returns 0 on success, negative on failure */ -static int igc_setup_rx_resources(struct igc_ring *rx_ring) +int igc_setup_rx_resources(struct igc_ring *rx_ring) { struct device *dev = rx_ring->dev; int size, desc_len; @@ -1703,7 +1705,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) * igc_up - Open the interface and prepare it to handle traffic * @adapter: board private structure */ -static void igc_up(struct igc_adapter *adapter) +void igc_up(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; int i = 0; @@ -1748,7 +1750,7 @@ static void igc_nfc_filter_exit(struct igc_adapter *adapter) * igc_down - Close the interface * @adapter: board private structure */ -static void igc_down(struct igc_adapter *adapter) +void igc_down(struct igc_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct igc_hw *hw = &adapter->hw; @@ -1810,7 +1812,7 @@ static void igc_down(struct igc_adapter *adapter) igc_clean_all_rx_rings(adapter); } -static void igc_reinit_locked(struct igc_adapter *adapter) +void igc_reinit_locked(struct igc_adapter *adapter) { WARN_ON(in_interrupt()); while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) @@ -1922,7 +1924,7 @@ static void igc_configure(struct igc_adapter *adapter) /** * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table - * @adapter: Pointer to adapter structure + * @adapter: address of board private structure * @index: Index of the RAR entry which need to be synced with MAC table */ static void igc_rar_set_index(struct igc_adapter *adapter, u32 index) @@ -2298,7 +2300,7 @@ static void igc_update_phy_info(struct timer_list *t) * igc_has_link - check shared code for link and determine up/down * @adapter: pointer to driver private info */ -static bool igc_has_link(struct igc_adapter *adapter) +bool igc_has_link(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; bool link_active = false; @@ -2956,22 +2958,21 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter, { struct igc_q_vector *q_vector; struct igc_ring *ring; - int ring_count, size; + int ring_count; /* igc only supports 1 Tx and/or 1 Rx queue per vector */ if (txr_count > 1 || rxr_count > 1) return -ENOMEM; ring_count = txr_count + rxr_count; - size = sizeof(struct igc_q_vector) + - (sizeof(struct igc_ring) * ring_count); /* allocate q_vector and rings */ q_vector = adapter->q_vector[v_idx]; if (!q_vector) - q_vector = kzalloc(size, GFP_KERNEL); + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); else - memset(q_vector, 0, size); + memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); if (!q_vector) return -ENOMEM; @@ -3501,6 +3502,57 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) return value; } +int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) +{ + struct pci_dev *pdev = adapter->pdev; + struct igc_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + goto err_inval; + case SPEED_2500 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; + break; + case SPEED_2500 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + /** * igc_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -3568,7 +3620,7 @@ static int igc_probe(struct pci_dev *pdev, hw = &adapter->hw; hw->back = adapter; adapter->port_num = hw->bus.func; - adapter->msg_enable = GENMASK(debug - 1, 0); + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); err = pci_save_state(pdev); if (err) @@ -3584,7 +3636,7 @@ static int igc_probe(struct pci_dev *pdev, hw->hw_addr = adapter->io_addr; netdev->netdev_ops = &igc_netdev_ops; - + igc_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; netdev->mem_start = pci_resource_start(pdev, 0); @@ -3744,8 +3796,8 @@ static struct pci_driver igc_driver = { .remove = igc_remove, }; -static void igc_set_flag_queue_pairs(struct igc_adapter *adapter, - const u32 max_rss_queues) +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues) { /* Determine if we need to pair queues. */ /* If rss_queues > half of max_rss_queues, pair the queues in @@ -3757,7 +3809,7 @@ static void igc_set_flag_queue_pairs(struct igc_adapter *adapter, adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; } -static unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) { unsigned int max_rss_queues; @@ -3837,6 +3889,32 @@ static int igc_sw_init(struct igc_adapter *adapter) } /** + * igc_reinit_queues - return error + * @adapter: pointer to adapter structure + */ +int igc_reinit_queues(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (netif_running(netdev)) + igc_close(netdev); + + igc_reset_interrupt_capability(adapter); + + if (igc_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igc_open(netdev); + + return err; +} + +/** * igc_get_hw_dev - return device * @hw: pointer to hardware structure * diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 38e43e6fc1c7..4c8f96a9a148 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -152,7 +152,6 @@ void igc_power_down_phy_copper(struct igc_hw *hw) s32 igc_check_downshift(struct igc_hw *hw) { struct igc_phy_info *phy = &hw->phy; - u16 phy_data, offset, mask; s32 ret_val; switch (phy->type) { @@ -161,15 +160,8 @@ s32 igc_check_downshift(struct igc_hw *hw) /* speed downshift not supported */ phy->speed_downgraded = false; ret_val = 0; - goto out; } - ret_val = phy->ops.read_reg(hw, offset, &phy_data); - - if (!ret_val) - phy->speed_downgraded = (phy_data & mask) ? true : false; - -out: return ret_val; } diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index a1bd3216c906..5afe7a8d3faf 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -80,6 +80,9 @@ /* MSI-X Table Register Descriptions */ #define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */ +/* Redirection Table - RW Array */ +#define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) + /* Receive Register Descriptions */ #define IGC_RCTL 0x00100 /* Rx Control - RW */ #define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) @@ -188,7 +191,6 @@ #define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ #define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ #define IGC_LENERRS 0x04138 /* Length Errors Count */ -#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ #define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ /* Management registers */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 1e49716f52bc..109f8de5a1c2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1048,7 +1048,7 @@ mac_reset_top: * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ - hw->mac.num_rar_entries = 128; + hw->mac.num_rar_entries = IXGBE_82599_RAR_ENTRIES; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 62e6499e4146..cc3196ae5aea 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -836,12 +836,10 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring; int node = NUMA_NO_NODE; int cpu = -1; - int ring_count, size; + int ring_count; u8 tcs = adapter->hw_tcs; ring_count = txr_count + rxr_count + xdp_count; - size = sizeof(struct ixgbe_q_vector) + - (sizeof(struct ixgbe_ring) * ring_count); /* customize cpu for Flow Director mapping */ if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { @@ -855,9 +853,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, } /* allocate q_vector and rings */ - q_vector = kzalloc_node(size, GFP_KERNEL, node); + q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), + GFP_KERNEL, node); if (!q_vector) - q_vector = kzalloc(size, GFP_KERNEL); + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); if (!q_vector) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b53087a980ef..38c430b94ae3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10280,9 +10280,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; - case XDP_QUERY_XSK_UMEM: - return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem, - xdp->xsk.queue_id); case XDP_SETUP_XSK_UMEM: return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem, xdp->xsk.queue_id); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index 53d4089f5644..d93a690aff74 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -30,8 +30,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring); struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring); -int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, - u16 qid); int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, u16 qid); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 65c3e2c979d4..98870707b51a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -174,23 +174,6 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) return 0; } -int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, - u16 qid) -{ - if (qid >= adapter->num_rx_queues) - return -EINVAL; - - if (adapter->xsk_umems) { - if (qid >= adapter->num_xsk_umems) - return -EINVAL; - *umem = adapter->xsk_umems[qid]; - return 0; - } - - *umem = NULL; - return 0; -} - int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, u16 qid) { diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 32ac9045cdae..f9bb890733b5 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -112,10 +112,12 @@ struct ltq_etop_priv { static int ltq_etop_alloc_skb(struct ltq_etop_chan *ch) { + struct ltq_etop_priv *priv = netdev_priv(ch->netdev); + ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN); if (!ch->skb[ch->dma.desc]) return -ENOMEM; - ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL, + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev, ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN, DMA_FROM_DEVICE); ch->dma.desc_base[ch->dma.desc].addr = @@ -487,7 +489,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) netif_trans_update(dev); spin_lock_irqsave(&priv->lock, flags); - desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len, + desc->addr = ((unsigned int) dma_map_single(&priv->pdev->dev, skb->data, len, DMA_TO_DEVICE)) - byte_offset; wmb(); desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 2f427271a793..292a668ce88e 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) ret = mv643xx_eth_shared_of_probe(pdev); if (ret) - return ret; + goto err_put_clk; pd = dev_get_platdata(&pdev->dev); msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? @@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) infer_hw_params(msp); return 0; + +err_put_clk: + if (!IS_ERR(msp->clk)) + clk_disable_unprepare(msp->clk); + return ret; } static int mv643xx_eth_shared_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 9d4568eb2297..d134d3538f9b 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -27,6 +27,7 @@ #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> +#include <linux/phy/phy.h> #include <linux/phy.h> #include <linux/phylink.h> #include <linux/platform_device.h> @@ -436,6 +437,7 @@ struct mvneta_port { struct device_node *dn; unsigned int tx_csum_limit; struct phylink *phylink; + struct phy *comphy; struct mvneta_bm *bm_priv; struct mvneta_bm_pool *pool_long; @@ -3151,6 +3153,8 @@ static void mvneta_start_dev(struct mvneta_port *pp) { int cpu; + WARN_ON(phy_power_on(pp->comphy)); + mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -3213,6 +3217,8 @@ static void mvneta_stop_dev(struct mvneta_port *pp) mvneta_tx_reset(pp); mvneta_rx_reset(pp); + + WARN_ON(phy_power_off(pp->comphy)); } static void mvneta_percpu_enable(void *arg) @@ -3338,6 +3344,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) static void mvneta_validate(struct net_device *ndev, unsigned long *supported, struct phylink_link_state *state) { + struct mvneta_port *pp = netdev_priv(ndev); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; /* We only support QSGMII, SGMII, 802.3z and RGMII modes */ @@ -3358,8 +3365,13 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, phylink_set(mask, Pause); /* Half-duplex at speeds higher than 100Mbit is unsupported */ - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); + if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + } + if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) { + phylink_set(mask, 2500baseX_Full); + } if (!phy_interface_mode_is_8023z(state->interface)) { /* 10M and 100M are only supported in non-802.3z mode */ @@ -3373,6 +3385,11 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, __ETHTOOL_LINK_MODE_MASK_NBITS); bitmap_and(state->advertising, state->advertising, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + + /* We can only operate at 2500BaseX or 1000BaseX. If requested + * to advertise both, only report advertising at 2500BaseX. + */ + phylink_helper_basex_speed(state); } static int mvneta_mac_link_state(struct net_device *ndev, @@ -3384,7 +3401,9 @@ static int mvneta_mac_link_state(struct net_device *ndev, gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); if (gmac_stat & MVNETA_GMAC_SPEED_1000) - state->speed = SPEED_1000; + state->speed = + state->interface == PHY_INTERFACE_MODE_2500BASEX ? + SPEED_2500 : SPEED_1000; else if (gmac_stat & MVNETA_GMAC_SPEED_100) state->speed = SPEED_100; else @@ -3499,12 +3518,20 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode, MVNETA_GMAC_FORCE_LINK_DOWN); } + /* When at 2.5G, the link partner can send frames with shortened * preambles. */ if (state->speed == SPEED_2500) new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; + if (pp->comphy && + (state->interface == PHY_INTERFACE_MODE_SGMII || + state->interface == PHY_INTERFACE_MODE_1000BASEX || + state->interface == PHY_INTERFACE_MODE_2500BASEX)) + WARN_ON(phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, + state->interface)); + if (new_ctrl0 != gmac_ctrl0) mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); if (new_ctrl2 != gmac_ctrl2) @@ -4404,7 +4431,7 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) if (phy_mode == PHY_INTERFACE_MODE_QSGMII) mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); else if (phy_mode == PHY_INTERFACE_MODE_SGMII || - phy_mode == PHY_INTERFACE_MODE_1000BASEX) + phy_interface_mode_is_8023z(phy_mode)) mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); else if (!phy_interface_mode_is_rgmii(phy_mode)) return -EINVAL; @@ -4421,6 +4448,7 @@ static int mvneta_probe(struct platform_device *pdev) struct mvneta_port *pp; struct net_device *dev; struct phylink *phylink; + struct phy *comphy; const char *dt_mac_addr; char hw_mac_addr[ETH_ALEN]; const char *mac_from; @@ -4446,6 +4474,14 @@ static int mvneta_probe(struct platform_device *pdev) goto err_free_irq; } + comphy = devm_of_phy_get(&pdev->dev, dn, NULL); + if (comphy == ERR_PTR(-EPROBE_DEFER)) { + err = -EPROBE_DEFER; + goto err_free_irq; + } else if (IS_ERR(comphy)) { + comphy = NULL; + } + phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode, &mvneta_phylink_ops); if (IS_ERR(phylink)) { @@ -4462,6 +4498,7 @@ static int mvneta_probe(struct platform_device *pdev) pp = netdev_priv(dev); spin_lock_init(&pp->lock); pp->phylink = phylink; + pp->comphy = comphy; pp->phy_interface = phy_mode; pp->dn = dn; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 398328f10743..96e3f0669032 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -402,8 +402,8 @@ #define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1) #define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2) #define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3) -#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6) -#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7) +#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(4) +#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(5) #define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11) #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 5c0c26fd8363..191d9ce85b7e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -965,6 +965,11 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) } /* Port configuration routines */ +static bool mvpp2_is_xlg(phy_interface_t interface) +{ + return interface == PHY_INTERFACE_MODE_10GKR || + interface == PHY_INTERFACE_MODE_XAUI; +} static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) { @@ -1101,7 +1106,7 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) if (port->gop_id == 0) { /* Enable the XLG/GIG irqs for this port */ val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); - if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) + if (mvpp2_is_xlg(port->phy_interface)) val |= MVPP22_XLG_EXT_INT_MASK_XLG; else val |= MVPP22_XLG_EXT_INT_MASK_GIG; @@ -1181,9 +1186,7 @@ static void mvpp2_port_enable(struct mvpp2_port *port) u32 val; /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_CTRL0_REG); val |= MVPP22_XLG_CTRL0_PORT_EN | MVPP22_XLG_CTRL0_MAC_RESET_DIS; @@ -1202,9 +1205,7 @@ static void mvpp2_port_disable(struct mvpp2_port *port) u32 val; /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_CTRL0_REG); val &= ~MVPP22_XLG_CTRL0_PORT_EN; writel(val, port->base + MVPP22_XLG_CTRL0_REG); @@ -1377,13 +1378,9 @@ static void mvpp2_port_reset(struct mvpp2_port *port) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & - ~MVPP2_GMAC_PORT_RESET_MASK; + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | + MVPP2_GMAC_PORT_RESET_MASK; writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); - - while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & - MVPP2_GMAC_PORT_RESET_MASK) - continue; } /* Change maximum receive size of the port */ @@ -2459,8 +2456,7 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) mvpp22_gop_mask_irq(port); - if (port->gop_id == 0 && - port->phy_interface == PHY_INTERFACE_MODE_10GKR) { + if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_INT_STAT); if (val & MVPP22_XLG_INT_STAT_LINK) { event = true; @@ -3150,8 +3146,7 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port) ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; - if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR) + if (mvpp2_is_xlg(port->phy_interface)) ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; else ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; @@ -3159,9 +3154,7 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port) writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); } - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) + if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) mvpp2_xlg_max_rx_size_set(port); else mvpp2_gmac_max_rx_size_set(port); @@ -4501,17 +4494,12 @@ static int mvpp2_phylink_mac_link_state(struct net_device *dev, static void mvpp2_mac_an_restart(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); - u32 val; + u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - if (port->phy_interface != PHY_INTERFACE_MODE_SGMII) - return; - - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - /* The RESTART_AN bit is cleared by the h/w after restarting the AN - * process. - */ - val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, + port->base + MVPP2_GMAC_AUTONEG_CONFIG); + writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, + port->base + MVPP2_GMAC_AUTONEG_CONFIG); } static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, @@ -4524,8 +4512,13 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, if (state->pause & MLO_PAUSE_TX) ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; + else + ctrl0 &= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; + if (state->pause & MLO_PAUSE_RX) ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; + else + ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC | @@ -4538,83 +4531,130 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, const struct phylink_link_state *state) { - u32 an, ctrl0, ctrl2, ctrl4; - - an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); - - /* Force link down */ - an &= ~MVPP2_GMAC_FORCE_LINK_PASS; - an |= MVPP2_GMAC_FORCE_LINK_DOWN; - writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + u32 old_an, an; + u32 old_ctrl0, ctrl0; + u32 old_ctrl2, ctrl2; + u32 old_ctrl4, ctrl4; - /* Set the GMAC in a reset state */ - ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; - writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN | - MVPP2_GMAC_FORCE_LINK_DOWN); + MVPP2_GMAC_IN_BAND_AUTONEG | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS); ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; - ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); + ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK | + MVPP2_GMAC_PCS_ENABLE_MASK); + ctrl4 &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); + /* Configure port type */ if (phy_interface_mode_is_8023z(state->interface)) { - /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can - * they negotiate duplex: they are always operating with a fixed - * speed of 1000/2500Mbps in full duplex, so force 1000/2500 - * speed and full duplex here. - */ - ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; - an |= MVPP2_GMAC_CONFIG_GMII_SPEED | - MVPP2_GMAC_CONFIG_FULL_DUPLEX; - } else if (!phy_interface_mode_is_rgmii(state->interface)) { - an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG; + ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; + ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; + ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_DP_CLK_SEL | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { + ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; + ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; + ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_DP_CLK_SEL | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + } else if (phy_interface_mode_is_rgmii(state->interface)) { + ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; + ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | + MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; } - if (state->duplex) - an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; + /* Configure advertisement bits */ if (phylink_test(state->advertising, Pause)) an |= MVPP2_GMAC_FC_ADV_EN; if (phylink_test(state->advertising, Asym_Pause)) an |= MVPP2_GMAC_FC_ADV_ASM_EN; - if (phy_interface_mode_is_8023z(state->interface) || - state->interface == PHY_INTERFACE_MODE_SGMII) { - an |= MVPP2_GMAC_IN_BAND_AUTONEG; - ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; + /* Configure negotiation style */ + if (!phylink_autoneg_inband(mode)) { + /* Phy or fixed speed - no in-band AN */ + if (state->duplex) + an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; - ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL | - MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); - ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | - MVPP22_CTRL4_DP_CLK_SEL | - MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + if (state->speed == SPEED_1000 || state->speed == SPEED_2500) + an |= MVPP2_GMAC_CONFIG_GMII_SPEED; + else if (state->speed == SPEED_100) + an |= MVPP2_GMAC_CONFIG_MII_SPEED; if (state->pause & MLO_PAUSE_TX) ctrl4 |= MVPP22_CTRL4_TX_FC_EN; if (state->pause & MLO_PAUSE_RX) ctrl4 |= MVPP22_CTRL4_RX_FC_EN; - } else if (phy_interface_mode_is_rgmii(state->interface)) { - an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS; + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { + /* SGMII in-band mode receives the speed and duplex from + * the PHY. Flow control information is not received. */ + an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS); + an |= MVPP2_GMAC_IN_BAND_AUTONEG | + MVPP2_GMAC_AN_SPEED_EN | + MVPP2_GMAC_AN_DUPLEX_EN; - if (state->speed == SPEED_1000) - an |= MVPP2_GMAC_CONFIG_GMII_SPEED; - else if (state->speed == SPEED_100) - an |= MVPP2_GMAC_CONFIG_MII_SPEED; + if (state->pause & MLO_PAUSE_TX) + ctrl4 |= MVPP22_CTRL4_TX_FC_EN; + if (state->pause & MLO_PAUSE_RX) + ctrl4 |= MVPP22_CTRL4_RX_FC_EN; + } else if (phy_interface_mode_is_8023z(state->interface)) { + /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can + * they negotiate duplex: they are always operating with a fixed + * speed of 1000/2500Mbps in full duplex, so force 1000/2500 + * speed and full duplex here. + */ + ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; + an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS); + an |= MVPP2_GMAC_IN_BAND_AUTONEG | + MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX; - ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; - ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | - MVPP22_CTRL4_SYNC_BYPASS_DIS | - MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + if (state->pause & MLO_PAUSE_AN && state->an_enabled) { + an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; + } else { + if (state->pause & MLO_PAUSE_TX) + ctrl4 |= MVPP22_CTRL4_TX_FC_EN; + if (state->pause & MLO_PAUSE_RX) + ctrl4 |= MVPP22_CTRL4_RX_FC_EN; + } } - writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); - writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); - writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); - writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK || + (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK || + (old_an ^ an) & MVPP2_GMAC_IN_BAND_AUTONEG) { + /* Force link down */ + old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS; + old_an |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + /* Set the GMAC in a reset state - do this in a way that + * ensures we clear it below. + */ + old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; + writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + } + + if (old_ctrl0 != ctrl0) + writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); + if (old_ctrl2 != ctrl2) + writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + if (old_ctrl4 != ctrl4) + writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); + if (old_an != an) + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) { + while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & + MVPP2_GMAC_PORT_RESET_MASK) + continue; + } } static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, @@ -4624,7 +4664,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, bool change_interface = port->phy_interface != state->interface; /* Check for invalid configuration */ - if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) { + if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) { netdev_err(dev, "Invalid mode on %s\n", dev->name); return; } @@ -4644,7 +4684,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, } /* mac (re)configuration */ - if (state->interface == PHY_INTERFACE_MODE_10GKR) + if (mvpp2_is_xlg(state->interface)) mvpp2_xlg_config(port, mode, state); else if (phy_interface_mode_is_rgmii(state->interface) || phy_interface_mode_is_8023z(state->interface) || @@ -4666,12 +4706,10 @@ static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, struct mvpp2_port *port = netdev_priv(dev); u32 val; - if (!phylink_autoneg_inband(mode) && - interface != PHY_INTERFACE_MODE_10GKR) { + if (!phylink_autoneg_inband(mode) && !mvpp2_is_xlg(interface)) { val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; - if (phy_interface_mode_is_rgmii(interface)) - val |= MVPP2_GMAC_FORCE_LINK_PASS; + val |= MVPP2_GMAC_FORCE_LINK_PASS; writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); } @@ -4688,8 +4726,7 @@ static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode, struct mvpp2_port *port = netdev_priv(dev); u32 val; - if (!phylink_autoneg_inband(mode) && - interface != PHY_INTERFACE_MODE_10GKR) { + if (!phylink_autoneg_inband(mode) && !mvpp2_is_xlg(interface)) { val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); val &= ~MVPP2_GMAC_FORCE_LINK_PASS; val |= MVPP2_GMAC_FORCE_LINK_DOWN; diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index f8a6d6e3cb7a..35f2142aac5e 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -201,6 +201,7 @@ struct tx_desc { }; struct pxa168_eth_private { + struct platform_device *pdev; int port_num; /* User Ethernet port number */ int phy_addr; int phy_speed; @@ -331,7 +332,7 @@ static void rxq_refill(struct net_device *dev) used_rx_desc = pep->rx_used_desc_q; p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; size = skb_end_pointer(skb) - skb->data; - p_used_rx_desc->buf_ptr = dma_map_single(NULL, + p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, size, DMA_FROM_DEVICE); @@ -743,7 +744,7 @@ static int txq_reclaim(struct net_device *dev, int force) netdev_err(dev, "Error in TX\n"); dev->stats.tx_errors++; } - dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); + dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE); if (skb) dev_kfree_skb_irq(skb); released++; @@ -805,7 +806,7 @@ static int rxq_process(struct net_device *dev, int budget) if (rx_next_curr_desc == rx_used_desc) pep->rx_resource_err = 1; pep->rx_desc_count--; - dma_unmap_single(NULL, rx_desc->buf_ptr, + dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr, rx_desc->buf_size, DMA_FROM_DEVICE); received_packets++; @@ -1274,7 +1275,8 @@ pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) length = skb->len; pep->tx_skb[tx_index] = skb; desc->byte_cnt = length; - desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); + desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length, + DMA_TO_DEVICE); skb_tx_timestamp(skb); @@ -1528,6 +1530,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) if (err) goto err_free_mdio; + pep->pdev = pdev; SET_NETDEV_DEV(dev, &pdev->dev); pxa168_init_hw(pep); err = register_netdev(dev); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 04fd1f135011..654ac534b10e 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, memset(p, 0, regs->len); memcpy_fromio(p, io, B3_RAM_ADDR); - memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, - regs->len - B3_RI_WTO_R1); + if (regs->len > B3_RI_WTO_R1) { + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); + } } /* Wake on Lan only supported on Yukon chips with rev 1 or above */ diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index f3a5fa84860f..57727fe1501e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 200; + pdev->d3_delay = 300; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6b88881b8e35..c1438ae52a11 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->addr_len = ETH_ALEN; mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); if (!is_valid_ether_addr(dev->dev_addr)) { - en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", + en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n", priv->port, dev->dev_addr); err = -EINVAL; goto out; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9a0881cb7f51..6c01314e87b0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -617,6 +617,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, } #endif +#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) + /* We reach this function only after checking that any of * the (IPv4 | IPv6) bits are set in cqe->status. */ @@ -624,9 +626,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, netdev_features_t dev_features) { __wsum hw_checksum = 0; + void *hdr; + + /* CQE csum doesn't cover padding octets in short ethernet + * frames. And the pad field is appended prior to calculating + * and appending the FCS field. + * + * Detecting these padded frames requires to verify and parse + * IP headers, so we simply force all those small frames to skip + * checksum complete. + */ + if (short_frame(skb->len)) + return -EINVAL; - void *hdr = (u8 *)va + sizeof(struct ethhdr); - + hdr = (u8 *)va + sizeof(struct ethhdr); hw_checksum = csum_unfold((__force __sum16)cqe->checksum); if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && @@ -819,6 +832,11 @@ xdp_drop_no_cnt: skb_record_rx_queue(skb, cq_ring); if (likely(dev->features & NETIF_F_RXCSUM)) { + /* TODO: For IP non TCP/UDP packets when csum complete is + * not an option (not supported or any other reason) we can + * actually check cqe IPOK status bit and report + * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE + */ if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) && (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index bdb8dd161923..1f6e16d5ea6b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3981,6 +3981,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_params_unregister; + devlink_params_publish(devlink); pci_save_state(pdev); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9de9abacf7f6..82d636baaa4e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o # mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ + transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o @@ -22,7 +22,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ # mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ - en_selftest.o en/port.o en/monitor_stats.o + en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o # # Netdev extra @@ -35,7 +35,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o # # Core extra # -mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o +mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3e0fa8a8077b..be48c6440251 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -316,6 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: case MLX5_CMD_OP_DEALLOC_MEMIC: case MLX5_CMD_OP_PAGE_FAULT_RESUME: + case MLX5_CMD_OP_QUERY_HOST_PARAMS: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -627,6 +628,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); + MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS); default: return "unknown command opcode"; } } @@ -1583,6 +1585,24 @@ no_trig: spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); } +void mlx5_cmd_flush(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + while (down_trylock(&cmd->sem)) + mlx5_cmd_trigger_completions(dev); + + while (down_trylock(&cmd->pages_sem)) + mlx5_cmd_trigger_completions(dev); + + /* Unlock cmdif */ + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + static int status_to_err(u8 status) { return status ? -1 : 0; /* TBD more meaningful codes */ @@ -1711,12 +1731,57 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, } EXPORT_SYMBOL(mlx5_cmd_exec); -int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, - void *out, int out_size, mlx5_cmd_cbk_t callback, - void *context) +void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, + struct mlx5_async_ctx *ctx) +{ + ctx->dev = dev; + /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ + atomic_set(&ctx->num_inflight, 1); + init_waitqueue_head(&ctx->wait); +} +EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); + +/** + * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx + * @ctx: The ctx to clean + * + * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The + * caller must ensure that mlx5_cmd_exec_cb() is not called during or after + * the call mlx5_cleanup_async_ctx(). + */ +void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) +{ + atomic_dec(&ctx->num_inflight); + wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0); +} +EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); + +static void mlx5_cmd_exec_cb_handler(int status, void *_work) +{ + struct mlx5_async_work *work = _work; + struct mlx5_async_ctx *ctx = work->ctx; + + work->user_callback(status, work); + if (atomic_dec_and_test(&ctx->num_inflight)) + wake_up(&ctx->wait); +} + +int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, + void *out, int out_size, mlx5_async_cbk_t callback, + struct mlx5_async_work *work) { - return cmd_exec(dev, in, in_size, out, out_size, callback, context, - false); + int ret; + + work->ctx = ctx; + work->user_callback = callback; + if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) + return -EIO; + ret = cmd_exec(ctx->dev, in, in_size, out, out_size, + mlx5_cmd_exec_cb_handler, work, false); + if (ret && atomic_dec_and_test(&ctx->num_inflight)) + wake_up(&ctx->wait); + + return ret; } EXPORT_SYMBOL(mlx5_cmd_exec_cb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c new file mode 100644 index 000000000000..4746f2d28fb6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include "ecpf.h" + +bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) +{ + return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1; +} + +static int mlx5_peer_pf_enable_hca(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {}; + u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {}; + + MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); + MLX5_SET(enable_hca_in, in, function_id, 0); + MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0); + return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); +} + +static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {}; + u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {}; + + MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); + MLX5_SET(disable_hca_in, in, function_id, 0); + MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_peer_pf_init(struct mlx5_core_dev *dev) +{ + int err; + + err = mlx5_peer_pf_enable_hca(dev); + if (err) + mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n", + err); + + return err; +} + +static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev) +{ + int err; + + err = mlx5_peer_pf_disable_hca(dev); + if (err) { + mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n", + err); + return; + } + + err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages); + if (err) + mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n", + err); +} + +int mlx5_ec_init(struct mlx5_core_dev *dev) +{ + int err = 0; + + if (!mlx5_core_is_ecpf(dev)) + return 0; + + /* ECPF shall enable HCA for peer PF in the same way a PF + * does this for its VFs. + */ + err = mlx5_peer_pf_init(dev); + if (err) + return err; + + return 0; +} + +void mlx5_ec_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_core_is_ecpf(dev)) + return; + + mlx5_peer_pf_cleanup(dev); +} + +static int mlx5_query_host_params_context(struct mlx5_core_dev *dev, + u32 *out, int outlen) +{ + u32 in[MLX5_ST_SZ_DW(query_host_params_in)] = {}; + + MLX5_SET(query_host_params_in, in, opcode, + MLX5_CMD_OP_QUERY_HOST_PARAMS); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); +} + +int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf) +{ + u32 out[MLX5_ST_SZ_DW(query_host_params_out)] = {}; + int err; + + err = mlx5_query_host_params_context(dev, out, sizeof(out)); + if (err) + return err; + + *num_vf = MLX5_GET(query_host_params_out, out, + host_params_context.host_num_of_vfs); + mlx5_core_dbg(dev, "host_num_of_vfs %d\n", *num_vf); + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h new file mode 100644 index 000000000000..346372df218f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __MLX5_ECPF_H__ +#define __MLX5_ECPF_H__ + +#include <linux/mlx5/driver.h> +#include "mlx5_core.h" + +#ifdef CONFIG_MLX5_ESWITCH + +enum { + MLX5_ECPU_BIT_NUM = 23, +}; + +bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev); +int mlx5_ec_init(struct mlx5_core_dev *dev); +void mlx5_ec_cleanup(struct mlx5_core_dev *dev); +int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf); + +#else /* CONFIG_MLX5_ESWITCH */ + +static inline bool +mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; } +static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {} +static inline int +mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf) +{ return -EOPNOTSUPP; } + +#endif /* CONFIG_MLX5_ESWITCH */ + +#endif /* __MLX5_ECPF_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 6dd74ef69389..71c65cc17904 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -387,10 +387,7 @@ struct mlx5e_txqsq { struct mlx5e_channel *channel; int txq_ix; u32 rate_limit; - struct mlx5e_txqsq_recover { - struct work_struct recover_work; - u64 last_recover; - } recover; + struct work_struct recover_work; } ____cacheline_aligned_in_smp; struct mlx5e_dma_info { @@ -658,6 +655,7 @@ struct mlx5e_channel_stats { enum { MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, + MLX5E_STATE_XDP_TX_ENABLED, }; struct mlx5e_rqt { @@ -683,6 +681,13 @@ struct mlx5e_rss_params { u8 hfunc; }; +struct mlx5e_modify_sq_param { + int curr_state; + int next_state; + int rl_update; + int rl_index; +}; + struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; @@ -738,6 +743,7 @@ struct mlx5e_priv { #ifdef CONFIG_MLX5_EN_TLS struct mlx5e_tls *tls; #endif + struct devlink_health_reporter *tx_reporter; }; struct mlx5e_profile { @@ -852,9 +858,9 @@ void mlx5e_close_channels(struct mlx5e_channels *chs); * switching channels */ typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); -void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, - struct mlx5e_channels *new_chs, - mlx5e_fp_hw_modify hw_modify); +int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, + mlx5e_fp_hw_modify hw_modify); void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); @@ -868,6 +874,11 @@ void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); +int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, + struct mlx5e_modify_sq_param *p); +void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); +void mlx5e_tx_disable_queue(struct netdev_queue *txq); + static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) { return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c index 2ce420851e77..7cd5b02e0f10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c @@ -66,7 +66,7 @@ static int mlx5e_monitor_event_handler(struct notifier_block *nb, return NOTIFY_OK; } -void mlx5e_monitor_counter_start(struct mlx5e_priv *priv) +static void mlx5e_monitor_counter_start(struct mlx5e_priv *priv) { MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler, MONITOR_COUNTER); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 4a37713023be..122927f3a600 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -63,66 +63,168 @@ static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = { [MLX5E_50GBASE_KR2] = 50000, }; -u32 mlx5e_port_ptys2speed(u32 eth_proto_oper) +static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { + [MLX5E_SGMII_100M] = 100, + [MLX5E_1000BASE_X_SGMII] = 1000, + [MLX5E_5GBASE_R] = 5000, + [MLX5E_10GBASE_XFI_XAUI_1] = 10000, + [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000, + [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000, + [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000, + [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000, + [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000, + [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, + [MLX5E_400GAUI_8] = 400000, +}; + +static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, + const u32 **arr, u32 *size) +{ + bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + + *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : + ARRAY_SIZE(mlx5e_link_speed); + *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed; +} + +int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, + struct mlx5e_port_eth_proto *eproto) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + if (!eproto) + return -EINVAL; + + if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet)) + return -EOPNOTSUPP; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); + if (err) + return err; + + eproto->cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, + eth_proto_capability); + eproto->admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_admin); + eproto->oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); + return 0; +} + +void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + + *an_status = 0; + *an_disable_cap = 0; + *an_disable_admin = 0; + + if (mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, 1)) + return; + + *an_status = MLX5_GET(ptys_reg, out, an_status); + *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap); + *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); +} + +int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, bool ext) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + u8 an_disable_admin; + u8 an_disable_cap; + u8 an_status; + + mlx5_port_query_eth_autoneg(dev, &an_status, &an_disable_cap, + &an_disable_admin); + if (!an_disable_cap && an_disable) + return -EPERM; + + memset(in, 0, sizeof(in)); + + MLX5_SET(ptys_reg, in, local_port, 1); + MLX5_SET(ptys_reg, in, an_disable_admin, an_disable); + MLX5_SET(ptys_reg, in, proto_mask, MLX5_PTYS_EN); + if (ext) + MLX5_SET(ptys_reg, in, ext_eth_proto_admin, proto_admin); + else + MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PTYS, 0, 1); +} + +u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper) { unsigned long temp = eth_proto_oper; + const u32 *table; u32 speed = 0; + u32 max_size; int i; - i = find_first_bit(&temp, MLX5E_LINK_MODES_NUMBER); - if (i < MLX5E_LINK_MODES_NUMBER) - speed = mlx5e_link_speed[i]; - + mlx5e_port_get_speed_arr(mdev, &table, &max_size); + i = find_first_bit(&temp, max_size); + if (i < max_size) + speed = table[i]; return speed; } int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) { - u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {}; - u32 eth_proto_oper; + struct mlx5e_port_eth_proto eproto; + bool ext; int err; - err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); + ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) - return err; + goto out; - eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - *speed = mlx5e_port_ptys2speed(eth_proto_oper); + *speed = mlx5e_port_ptys2speed(mdev, eproto.oper); if (!(*speed)) err = -EINVAL; +out: return err; } int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) { + struct mlx5e_port_eth_proto eproto; u32 max_speed = 0; - u32 proto_cap; + const u32 *table; + u32 max_size; + bool ext; int err; int i; - err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN); + ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) return err; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) - if (proto_cap & MLX5E_PROT_MASK(i)) - max_speed = max(max_speed, mlx5e_link_speed[i]); + mlx5e_port_get_speed_arr(mdev, &table, &max_size); + for (i = 0; i < max_size; ++i) + if (eproto.cap & MLX5E_PROT_MASK(i)) + max_speed = max(max_speed, table[i]); *speed = max_speed; return 0; } -u32 mlx5e_port_speed2linkmodes(u32 speed) +u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed) { u32 link_modes = 0; + const u32 *table; + u32 max_size; int i; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (mlx5e_link_speed[i] == speed) + mlx5e_port_get_speed_arr(mdev, &table, &max_size); + for (i = 0; i < max_size; ++i) { + if (table[i] == speed) link_modes |= MLX5E_PROT_MASK(i); } - return link_modes; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index cd2160b8c9bf..70f536ec51c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -36,10 +36,22 @@ #include <linux/mlx5/driver.h> #include "en.h" -u32 mlx5e_port_ptys2speed(u32 eth_proto_oper); +struct mlx5e_port_eth_proto { + u32 cap; + u32 admin; + u32 oper; +}; + +int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, + struct mlx5e_port_eth_proto *eproto); +void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin); +int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, bool ext); +u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper); int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); -u32 mlx5e_port_speed2linkmodes(u32 speed); +u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed); int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h new file mode 100644 index 000000000000..e78e92753d73 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __MLX5E_EN_REPORTER_H +#define __MLX5E_EN_REPORTER_H + +#include <linux/mlx5/driver.h> +#include "en.h" + +int mlx5e_tx_reporter_create(struct mlx5e_priv *priv); +void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv); +void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq); +int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c new file mode 100644 index 000000000000..0aebfb377cf0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -0,0 +1,297 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include <net/devlink.h> +#include "reporter.h" +#include "lib/eq.h" + +#define MLX5E_TX_REPORTER_PER_SQ_MAX_LEN 256 + +struct mlx5e_tx_err_ctx { + int (*recover)(struct mlx5e_txqsq *sq); + struct mlx5e_txqsq *sq; +}; + +static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) +{ + unsigned long exp_time = jiffies + msecs_to_jiffies(2000); + + while (time_before(jiffies, exp_time)) { + if (sq->cc == sq->pc) + return 0; + + msleep(20); + } + + netdev_err(sq->channel->netdev, + "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", + sq->sqn, sq->cc, sq->pc); + + return -ETIMEDOUT; +} + +static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) +{ + WARN_ONCE(sq->cc != sq->pc, + "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", + sq->sqn, sq->cc, sq->pc); + sq->cc = 0; + sq->dma_fifo_cc = 0; + sq->pc = 0; +} + +static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state) +{ + struct mlx5_core_dev *mdev = sq->channel->mdev; + struct net_device *dev = sq->channel->netdev; + struct mlx5e_modify_sq_param msp = {0}; + int err; + + msp.curr_state = curr_state; + msp.next_state = MLX5_SQC_STATE_RST; + + err = mlx5e_modify_sq(mdev, sq->sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn); + return err; + } + + memset(&msp, 0, sizeof(msp)); + msp.curr_state = MLX5_SQC_STATE_RST; + msp.next_state = MLX5_SQC_STATE_RDY; + + err = mlx5e_modify_sq(mdev, sq->sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn); + return err; + } + + return 0; +} + +static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) +{ + struct mlx5_core_dev *mdev = sq->channel->mdev; + struct net_device *dev = sq->channel->netdev; + u8 state; + int err; + + if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) + return 0; + + err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); + if (err) { + netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", + sq->sqn, err); + return err; + } + + if (state != MLX5_SQC_STATE_ERR) { + netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); + return -EINVAL; + } + + mlx5e_tx_disable_queue(sq->txq); + + err = mlx5e_wait_for_sq_flush(sq); + if (err) + return err; + + /* At this point, no new packets will arrive from the stack as TXQ is + * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all + * pending WQEs. SQ can safely reset the SQ. + */ + + err = mlx5e_sq_to_ready(sq, state); + if (err) + return err; + + mlx5e_reset_txqsq_cc_pc(sq); + sq->stats->recover++; + mlx5e_activate_txqsq(sq); + + return 0; +} + +void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq) +{ + char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN]; + struct mlx5e_tx_err_ctx err_ctx = {0}; + + err_ctx.sq = sq; + err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover; + sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn); + + devlink_health_report(sq->channel->priv->tx_reporter, err_str, + &err_ctx); +} + +static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq) +{ + struct mlx5_eq_comp *eq = sq->cq.mcq.eq; + u32 eqe_count; + int ret; + + netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", + eq->core.eqn, eq->core.cons_index, eq->core.irqn); + + eqe_count = mlx5_eq_poll_irq_disabled(eq); + ret = eqe_count ? true : false; + if (!eqe_count) { + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + return ret; + } + + netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n", + eqe_count, eq->core.eqn); + sq->channel->stats->eq_rearm++; + return ret; +} + +int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq) +{ + char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN]; + struct mlx5e_tx_err_ctx err_ctx; + + err_ctx.sq = sq; + err_ctx.recover = mlx5e_tx_reporter_timeout_recover; + sprintf(err_str, + "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", + sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, + jiffies_to_usecs(jiffies - sq->txq->trans_start)); + + return devlink_health_report(sq->channel->priv->tx_reporter, err_str, + &err_ctx); +} + +/* state lock cannot be grabbed within this function. + * It can cause a dead lock or a read-after-free. + */ +int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx) +{ + return err_ctx->recover(err_ctx->sq); +} + +static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) +{ + int err; + + rtnl_lock(); + mutex_lock(&priv->state_lock); + mlx5e_close_locked(priv->netdev); + err = mlx5e_open_locked(priv->netdev); + mutex_unlock(&priv->state_lock); + rtnl_unlock(); + + return err; +} + +static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, + void *context) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_tx_err_ctx *err_ctx = context; + + return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) : + mlx5e_tx_reporter_recover_all(priv); +} + +static int +mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, + u32 sqn, u8 state, bool stopped) +{ + int err; + + err = devlink_fmsg_obj_nest_start(fmsg); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sqn); + if (err) + return err; + + err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state); + if (err) + return err; + + err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped); + if (err) + return err; + + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; + + return 0; +} + +static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + int i, err = 0; + + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock; + + err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); + if (err) + goto unlock; + + for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; + i++) { + struct mlx5e_txqsq *sq = priv->txq2sq[i]; + u8 state; + + err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); + if (err) + break; + + err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn, + state, + netif_xmit_stopped(sq->txq)); + if (err) + break; + } + err = devlink_fmsg_arr_pair_nest_end(fmsg); + if (err) + goto unlock; + +unlock: + mutex_unlock(&priv->state_lock); + return err; +} + +static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { + .name = "tx", + .recover = mlx5e_tx_reporter_recover, + .diagnose = mlx5e_tx_reporter_diagnose, +}; + +#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500 + +int mlx5e_tx_reporter_create(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct devlink *devlink = priv_to_devlink(mdev); + + priv->tx_reporter = + devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops, + MLX5_REPORTER_TX_GRACEFUL_PERIOD, + true, priv); + if (IS_ERR_OR_NULL(priv->tx_reporter)) + netdev_warn(priv->netdev, + "Failed to create tx reporter, err = %ld\n", + PTR_ERR(priv->tx_reporter)); + return PTR_ERR_OR_ZERO(priv->tx_reporter); +} + +void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv) +{ + if (IS_ERR_OR_NULL(priv->tx_reporter)) + return; + + devlink_health_reporter_destroy(priv->tx_reporter); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 046948ead152..bdcc5e79328d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -25,7 +25,7 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, /* if the egress device isn't on the same HW e-switch or * it's a LAG device, use the uplink */ - if (!switchdev_port_same_parent_id(priv->netdev, dev) || + if (!netdev_port_same_parent_id(priv->netdev, dev) || dst_is_lag_dev) { *route_dev = uplink_dev; *out_dev = *route_dev; @@ -256,6 +256,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, e->m_neigh.family = n->ops->family; memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); e->out_dev = out_dev; + e->route_dev = route_dev; /* It's important to add the neigh to the hash table before checking * the neigh validity state. So if we'll get a notification, in case the @@ -369,6 +370,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, e->m_neigh.family = n->ops->family; memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); e->out_dev = out_dev; + e->route_dev = route_dev; /* It's importent to add the neigh to the hash table before checking * the neigh validity state. So if we'll get a notification, in case the @@ -496,25 +498,21 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, void *headers_c, void *headers_v) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); struct netlink_ext_ack *extack = f->common.extack; - struct flow_dissector_key_ports *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS, - f->key); - struct flow_dissector_key_ports *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS, - f->mask); void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + struct flow_match_ports enc_ports; + + flow_rule_match_enc_ports(rule, &enc_ports); /* Full udp dst port must be given */ - if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) || - memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) { + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) || + memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) { NL_SET_ERR_MSG_MOD(extack, "VXLAN decap filter must include enc_dst_port condition"); netdev_warn(priv->netdev, @@ -523,12 +521,12 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, } /* udp dst port must be knonwn as a VXLAN port */ - if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) { + if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(enc_ports.key->dst))) { NL_SET_ERR_MSG_MOD(extack, "Matched UDP port is not registered as a VXLAN port"); netdev_warn(priv->netdev, "UDP port %d is not registered as a VXLAN port\n", - be16_to_cpu(key->dst)); + be16_to_cpu(enc_ports.key->dst)); return -EOPNOTSUPP; } @@ -536,26 +534,26 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, + ntohs(enc_ports.mask->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + ntohs(enc_ports.key->dst)); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, + ntohs(enc_ports.mask->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, + ntohs(enc_ports.key->src)); /* match on VNI */ - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->key); - struct flow_dissector_key_keyid *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid enc_keyid; + + flow_rule_match_enc_keyid(rule, &enc_keyid); + MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, - be32_to_cpu(mask->keyid)); + be32_to_cpu(enc_keyid.mask->keyid)); MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, - be32_to_cpu(key->keyid)); + be32_to_cpu(enc_keyid.key->keyid)); } return 0; } @@ -570,6 +568,7 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) { NL_SET_ERR_MSG_MOD(f->common.extack, @@ -587,21 +586,14 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB); /* gre key */ - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *mask = NULL; - struct flow_dissector_key_keyid *key = NULL; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid enc_keyid; - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->mask); + flow_rule_match_enc_keyid(rule, &enc_keyid); MLX5_SET(fte_match_set_misc, misc_c, - gre_key.key, be32_to_cpu(mask->keyid)); - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->key); + gre_key.key, be32_to_cpu(enc_keyid.mask->keyid)); MLX5_SET(fte_match_set_misc, misc_v, - gre_key.key, be32_to_cpu(key->keyid)); + gre_key.key, be32_to_cpu(enc_keyid.key->keyid)); } return 0; @@ -612,16 +604,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, void *headers_c, - void *headers_v) + void *headers_v, u8 *match_level) { int tunnel_type; int err = 0; tunnel_type = mlx5e_tc_tun_get_type(filter_dev); if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { + *match_level = MLX5_MATCH_L4; err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, headers_c, headers_v); } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { + *match_level = MLX5_MATCH_L3; err = mlx5e_tc_tun_parse_gretap(priv, spec, f, headers_c, headers_v); } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 706ce7bf15e7..b63f15de899d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, void *headers_c, - void *headers_v); + void *headers_v, u8 *match_level); #endif //__MLX5_EN_TC_TUNNEL_H__ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 3740177eed09..03b2a9f9c589 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -365,7 +365,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, int sq_num; int i; - if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) + /* this flag is sufficient, no need to test internal sq state */ + if (unlikely(!mlx5e_xdp_tx_is_enabled(priv))) return -ENETDOWN; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@ -378,9 +379,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, sq = &priv->channels.c[sq_num]->xdpsq; - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) - return -ENETDOWN; - for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; struct mlx5e_xdp_info xdpi; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 3a67cb3cd179..ee27a7c8cd87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -50,6 +50,23 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq); int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); +static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) +{ + set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); +} + +static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) +{ + clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); + /* let other device's napi(s) see our new state */ + synchronize_rcu(); +} + +static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv) +{ + return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); +} + static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) { if (sq->doorbell_cseg) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 722998d68564..554672edf8c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1126,9 +1126,7 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv) priv->channels.params.tx_min_inline_mode) goto out; - if (mlx5e_open_channels(priv, &new_channels)) - goto out; - mlx5e_switch_priv_channels(priv, &new_channels, NULL); + mlx5e_safe_switch_channels(priv, &new_channels, NULL); out: mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3bbccead2f63..0804b478ad19 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -63,76 +63,147 @@ struct ptys2ethtool_config { __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised); }; -static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER]; +static +struct ptys2ethtool_config ptys2legacy_ethtool_table[MLX5E_LINK_MODES_NUMBER]; +static +struct ptys2ethtool_config ptys2ext_ethtool_table[MLX5E_EXT_LINK_MODES_NUMBER]; -#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, ...) \ +#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, table, ...) \ ({ \ struct ptys2ethtool_config *cfg; \ const unsigned int modes[] = { __VA_ARGS__ }; \ - unsigned int i; \ - cfg = &ptys2ethtool_table[reg_]; \ + unsigned int i, bit, idx; \ + cfg = &ptys2##table##_ethtool_table[reg_]; \ bitmap_zero(cfg->supported, \ __ETHTOOL_LINK_MODE_MASK_NBITS); \ bitmap_zero(cfg->advertised, \ __ETHTOOL_LINK_MODE_MASK_NBITS); \ for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ - __set_bit(modes[i], cfg->supported); \ - __set_bit(modes[i], cfg->advertised); \ + bit = modes[i] % 64; \ + idx = modes[i] / 64; \ + __set_bit(bit, &cfg->supported[idx]); \ + __set_bit(bit, &cfg->advertised[idx]); \ } \ }) void mlx5e_build_ptys2ethtool_map(void) { - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, + memset(ptys2legacy_ethtool_table, 0, sizeof(ptys2legacy_ethtool_table)); + memset(ptys2ext_ethtool_table, 0, sizeof(ptys2ext_ethtool_table)); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, legacy, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, legacy, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, legacy, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, legacy, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, legacy, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, legacy, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, legacy, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, legacy, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, legacy, ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, legacy, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, legacy, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, legacy, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, legacy, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, legacy, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, legacy, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, legacy, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, legacy, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, legacy, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy, ETHTOOL_LINK_MODE_10000baseT_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, legacy, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, legacy, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, legacy, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, legacy, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_SGMII_100M, ext, + ETHTOOL_LINK_MODE_100baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_X_SGMII, ext, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_5GBASE_R, ext, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_XFI_XAUI_1, ext, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_XLAUI_4_XLPPI_4, ext, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GAUI_1_25GBASE_CR_KR, ext, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, + ext, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR, ext, + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_CAUI_4_100GBASE_CR4_KR4, ext, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_2_100GBASE_CR2_KR2, ext, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_4_200GBASE_CR4_KR4, ext, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT); +} + +static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, + struct ptys2ethtool_config **arr, + u32 *size) +{ + bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + + *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; + *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : + ARRAY_SIZE(ptys2legacy_ethtool_table); } typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable); @@ -298,11 +369,7 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, goto unlock; } - err = mlx5e_open_channels(priv, &new_channels); - if (err) - goto unlock; - - mlx5e_switch_priv_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); unlock: mutex_unlock(&priv->state_lock); @@ -354,32 +421,29 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, new_channels.params = priv->channels.params; new_channels.params.num_channels = count; - if (!netif_is_rxfh_configured(priv->netdev)) - mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, count); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; goto out; } - /* Create fresh channels with new parameters */ - err = mlx5e_open_channels(priv, &new_channels); - if (err) - goto out; - arfs_enabled = priv->netdev->features & NETIF_F_NTUPLE; if (arfs_enabled) mlx5e_arfs_disable(priv); + if (!netif_is_rxfh_configured(priv->netdev)) + mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, count); + /* Switch to new channels, set new parameters and close old ones */ - mlx5e_switch_priv_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (arfs_enabled) { - err = mlx5e_arfs_enable(priv); - if (err) + int err2 = mlx5e_arfs_enable(priv); + + if (err2) netdev_err(priv->netdev, "%s: mlx5e_arfs_enable failed: %d\n", - __func__, err); + __func__, err2); } out: @@ -505,12 +569,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, goto out; } - /* open fresh channels with new coal parameters */ - err = mlx5e_open_channels(priv, &new_channels); - if (err) - goto out; - - mlx5e_switch_priv_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); out: mutex_unlock(&priv->state_lock); @@ -525,27 +584,35 @@ static int mlx5e_set_coalesce(struct net_device *netdev, return mlx5e_ethtool_set_coalesce(priv, coal); } -static void ptys2ethtool_supported_link(unsigned long *supported_modes, +static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, + unsigned long *supported_modes, u32 eth_proto_cap) { unsigned long proto_cap = eth_proto_cap; + struct ptys2ethtool_config *table; + u32 max_size; int proto; - for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER) + mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); + for_each_set_bit(proto, &proto_cap, max_size) bitmap_or(supported_modes, supported_modes, - ptys2ethtool_table[proto].supported, + table[proto].supported, __ETHTOOL_LINK_MODE_MASK_NBITS); } -static void ptys2ethtool_adver_link(unsigned long *advertising_modes, +static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev, + unsigned long *advertising_modes, u32 eth_proto_cap) { unsigned long proto_cap = eth_proto_cap; + struct ptys2ethtool_config *table; + u32 max_size; int proto; - for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER) + mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); + for_each_set_bit(proto, &proto_cap, max_size) bitmap_or(advertising_modes, advertising_modes, - ptys2ethtool_table[proto].advertised, + table[proto].advertised, __ETHTOOL_LINK_MODE_MASK_NBITS); } @@ -695,13 +762,14 @@ static void get_speed_duplex(struct net_device *netdev, u32 eth_proto_oper, struct ethtool_link_ksettings *link_ksettings) { + struct mlx5e_priv *priv = netdev_priv(netdev); u32 speed = SPEED_UNKNOWN; u8 duplex = DUPLEX_UNKNOWN; if (!netif_carrier_ok(netdev)) goto out; - speed = mlx5e_port_ptys2speed(eth_proto_oper); + speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper); if (!speed) { speed = SPEED_UNKNOWN; goto out; @@ -714,22 +782,22 @@ out: link_ksettings->base.duplex = duplex; } -static void get_supported(u32 eth_proto_cap, +static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap, struct ethtool_link_ksettings *link_ksettings) { unsigned long *supported = link_ksettings->link_modes.supported; + ptys2ethtool_supported_link(mdev, supported, eth_proto_cap); - ptys2ethtool_supported_link(supported, eth_proto_cap); ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); } -static void get_advertising(u32 eth_proto_cap, u8 tx_pause, - u8 rx_pause, +static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap, + u8 tx_pause, u8 rx_pause, struct ethtool_link_ksettings *link_ksettings) { unsigned long *advertising = link_ksettings->link_modes.advertising; + ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap); - ptys2ethtool_adver_link(advertising, eth_proto_cap); if (rx_pause) ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); if (tx_pause ^ rx_pause) @@ -779,12 +847,12 @@ static u8 get_connector_port(u32 eth_proto, u8 connector_type) return PORT_OTHER; } -static void get_lp_advertising(u32 eth_proto_lp, +static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, struct ethtool_link_ksettings *link_ksettings) { unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; - ptys2ethtool_adver_link(lp_advertising, eth_proto_lp); + ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp); } int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, @@ -801,6 +869,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, u8 an_disable_admin; u8 an_status; u8 connector_type; + bool ext; int err; err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); @@ -809,22 +878,25 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, __func__, err); goto err_query_regs; } - - eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); - eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); - eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); - an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); - an_status = MLX5_GET(ptys_reg, out, an_status); - connector_type = MLX5_GET(ptys_reg, out, connector_type); + ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, + eth_proto_capability); + eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, + eth_proto_admin); + eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, + eth_proto_oper); + eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); + an_status = MLX5_GET(ptys_reg, out, an_status); + connector_type = MLX5_GET(ptys_reg, out, connector_type); mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); - get_supported(eth_proto_cap, link_ksettings); - get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings); + get_supported(mdev, eth_proto_cap, link_ksettings); + get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings); get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; @@ -833,7 +905,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, connector_type); ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin, connector_type); - get_lp_advertising(eth_proto_lp, link_ksettings); + get_lp_advertising(mdev, eth_proto_lp, link_ksettings); if (an_status == MLX5_AN_COMPLETE) ethtool_link_ksettings_add_link_mode(link_ksettings, @@ -872,7 +944,9 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) u32 i, ptys_modes = 0; for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (bitmap_intersects(ptys2ethtool_table[i].advertised, + if (*ptys2legacy_ethtool_table[i].advertised == 0) + continue; + if (bitmap_intersects(ptys2legacy_ethtool_table[i].advertised, link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) ptys_modes |= MLX5E_PROT_MASK(i); @@ -881,13 +955,34 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) return ptys_modes; } +static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes) +{ + u32 i, ptys_modes = 0; + unsigned long modes[2]; + + for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) { + if (*ptys2ext_ethtool_table[i].advertised == 0) + continue; + memset(modes, 0, sizeof(modes)); + bitmap_and(modes, ptys2ext_ethtool_table[i].advertised, + link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS); + + if (modes[0] == ptys2ext_ethtool_table[i].advertised[0] && + modes[1] == ptys2ext_ethtool_table[i].advertised[1]) + ptys_modes |= MLX5E_PROT_MASK(i); + } + return ptys_modes; +} + int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, const struct ethtool_link_ksettings *link_ksettings) { struct mlx5_core_dev *mdev = priv->mdev; - u32 eth_proto_cap, eth_proto_admin; + struct mlx5e_port_eth_proto eproto; bool an_changes = false; u8 an_disable_admin; + bool ext_supported; + bool ext_requested; u8 an_disable_cap; bool an_disable; u32 link_modes; @@ -895,20 +990,33 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, u32 speed; int err; - speed = link_ksettings->base.speed; + u32 (*ethtool2ptys_adver_func)(const unsigned long *adver); - link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? - mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) : - mlx5e_port_speed2linkmodes(speed); +#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) - err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); + ext_requested = (link_ksettings->link_modes.advertising[0] > + MLX5E_PTYS_EXT); + ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + + /*when ptys_extended_ethernet is set legacy link modes are deprecated */ + if (ext_requested != ext_supported) + return -EPROTONOSUPPORT; + + speed = link_ksettings->base.speed; + ethtool2ptys_adver_func = ext_requested ? + mlx5e_ethtool2ptys_ext_adver_link : + mlx5e_ethtool2ptys_adver_link; + err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto); if (err) { - netdev_err(priv->netdev, "%s: query port eth proto cap failed: %d\n", + netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", __func__, err); goto out; } + link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? + ethtool2ptys_adver_func(link_ksettings->link_modes.advertising) : + mlx5e_port_speed2linkmodes(mdev, speed); - link_modes = link_modes & eth_proto_cap; + link_modes = link_modes & eproto.cap; if (!link_modes) { netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", __func__); @@ -916,24 +1024,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, goto out; } - err = mlx5_query_port_proto_admin(mdev, ð_proto_admin, MLX5_PTYS_EN); - if (err) { - netdev_err(priv->netdev, "%s: query port eth proto admin failed: %d\n", - __func__, err); - goto out; - } - - mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status, - &an_disable_cap, &an_disable_admin); + mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap, + &an_disable_admin); an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE; an_changes = ((!an_disable && an_disable_admin) || (an_disable && !an_disable_admin)); - if (!an_changes && link_modes == eth_proto_admin) + if (!an_changes && link_modes == eproto.admin) goto out; - mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN); + mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported); mlx5_toggle_port_link(mdev); out: @@ -1521,7 +1622,6 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, struct mlx5e_channels new_channels = {}; bool mode_changed; u8 cq_period_mode, current_cq_period_mode; - int err = 0; cq_period_mode = enable ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : @@ -1549,12 +1649,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, return 0; } - err = mlx5e_open_channels(priv, &new_channels); - if (err) - return err; - - mlx5e_switch_priv_channels(priv, &new_channels, NULL); - return 0; + return mlx5e_safe_switch_channels(priv, &new_channels, NULL); } static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable) @@ -1587,11 +1682,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val return 0; } - err = mlx5e_open_channels(priv, &new_channels); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (err) return err; - mlx5e_switch_priv_channels(priv, &new_channels, NULL); mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n", MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF"); @@ -1624,7 +1718,6 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; - int err; if (enable) { if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) @@ -1646,12 +1739,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return 0; } - err = mlx5e_open_channels(priv, &new_channels); - if (err) - return err; - - mlx5e_switch_priv_channels(priv, &new_channels, NULL); - return 0; + return mlx5e_safe_switch_channels(priv, &new_channels, NULL); } static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) @@ -1694,12 +1782,8 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable) return 0; } - err = mlx5e_open_channels(priv, &new_channels); - if (err) - return err; - - mlx5e_switch_priv_channels(priv, &new_channels, NULL); - return 0; + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); + return err; } static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 099d307e6f25..878b3467e459 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -51,6 +51,7 @@ #include "en/xdp.h" #include "lib/eq.h" #include "en/monitor_stats.h" +#include "en/reporter.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; @@ -1159,7 +1160,7 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) return 0; } -static void mlx5e_sq_recover(struct work_struct *work); +static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, @@ -1181,7 +1182,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; - INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); + INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); if (mlx5_accel_is_tls_device(c->priv->mdev)) @@ -1269,15 +1270,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, return err; } -struct mlx5e_modify_sq_param { - int curr_state; - int next_state; - bool rl_update; - int rl_index; -}; - -static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, - struct mlx5e_modify_sq_param *p) +int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, + struct mlx5e_modify_sq_param *p) { void *in; void *sqc; @@ -1375,17 +1369,7 @@ err_free_txqsq: return err; } -static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) -{ - WARN_ONCE(sq->cc != sq->pc, - "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", - sq->sqn, sq->cc, sq->pc); - sq->cc = 0; - sq->dma_fifo_cc = 0; - sq->pc = 0; -} - -static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) +void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) { sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); @@ -1394,7 +1378,7 @@ static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) netif_tx_start_queue(sq->txq); } -static inline void netif_tx_disable_queue(struct netdev_queue *txq) +void mlx5e_tx_disable_queue(struct netdev_queue *txq) { __netif_tx_lock_bh(txq); netif_tx_stop_queue(txq); @@ -1410,7 +1394,7 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) /* prevent netif_tx_wake_queue */ napi_synchronize(&c->napi); - netif_tx_disable_queue(sq->txq); + mlx5e_tx_disable_queue(sq->txq); /* last doorbell out, godspeed .. */ if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { @@ -1430,6 +1414,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) struct mlx5_rate_limit rl = {0}; cancel_work_sync(&sq->dim.work); + cancel_work_sync(&sq->recover_work); mlx5e_destroy_sq(mdev, sq->sqn); if (sq->rate_limit) { rl.rate = sq->rate_limit; @@ -1439,105 +1424,12 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) mlx5e_free_txqsq(sq); } -static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) +static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) { - unsigned long exp_time = jiffies + msecs_to_jiffies(2000); - - while (time_before(jiffies, exp_time)) { - if (sq->cc == sq->pc) - return 0; - - msleep(20); - } - - netdev_err(sq->channel->netdev, - "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", - sq->sqn, sq->cc, sq->pc); + struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, + recover_work); - return -ETIMEDOUT; -} - -static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state) -{ - struct mlx5_core_dev *mdev = sq->channel->mdev; - struct net_device *dev = sq->channel->netdev; - struct mlx5e_modify_sq_param msp = {0}; - int err; - - msp.curr_state = curr_state; - msp.next_state = MLX5_SQC_STATE_RST; - - err = mlx5e_modify_sq(mdev, sq->sqn, &msp); - if (err) { - netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn); - return err; - } - - memset(&msp, 0, sizeof(msp)); - msp.curr_state = MLX5_SQC_STATE_RST; - msp.next_state = MLX5_SQC_STATE_RDY; - - err = mlx5e_modify_sq(mdev, sq->sqn, &msp); - if (err) { - netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn); - return err; - } - - return 0; -} - -static void mlx5e_sq_recover(struct work_struct *work) -{ - struct mlx5e_txqsq_recover *recover = - container_of(work, struct mlx5e_txqsq_recover, - recover_work); - struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq, - recover); - struct mlx5_core_dev *mdev = sq->channel->mdev; - struct net_device *dev = sq->channel->netdev; - u8 state; - int err; - - err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); - if (err) { - netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", - sq->sqn, err); - return; - } - - if (state != MLX5_RQC_STATE_ERR) { - netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); - return; - } - - netif_tx_disable_queue(sq->txq); - - if (mlx5e_wait_for_sq_flush(sq)) - return; - - /* If the interval between two consecutive recovers per SQ is too - * short, don't recover to avoid infinite loop of ERR_CQE -> recover. - * If we reached this state, there is probably a bug that needs to be - * fixed. let's keep the queue close and let tx timeout cleanup. - */ - if (jiffies_to_msecs(jiffies - recover->last_recover) < - MLX5E_SQ_RECOVER_MIN_INTERVAL) { - netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n", - sq->sqn); - return; - } - - /* At this point, no new packets will arrive from the stack as TXQ is - * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all - * pending WQEs. SQ can safely reset the SQ. - */ - if (mlx5e_sq_to_ready(sq, state)) - return; - - mlx5e_reset_txqsq_cc_pc(sq); - sq->stats->recover++; - recover->last_recover = jiffies; - mlx5e_activate_txqsq(sq); + mlx5e_tx_reporter_err_cqe(sq); } static int mlx5e_open_icosq(struct mlx5e_channel *c, @@ -2967,6 +2859,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_build_tx2sq_maps(priv); mlx5e_activate_channels(&priv->channels); + mlx5e_xdp_tx_enable(priv); netif_tx_start_all_queues(priv->netdev); if (mlx5e_is_vport_rep(priv)) @@ -2988,16 +2881,18 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) */ netif_tx_stop_all_queues(priv->netdev); netif_tx_disable(priv->netdev); + mlx5e_xdp_tx_disable(priv); mlx5e_deactivate_channels(&priv->channels); } -void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, - struct mlx5e_channels *new_chs, - mlx5e_fp_hw_modify hw_modify) +static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, + mlx5e_fp_hw_modify hw_modify) { struct net_device *netdev = priv->netdev; int new_num_txqs; int carrier_ok; + new_num_txqs = new_chs->num * new_chs->params.num_tc; carrier_ok = netif_carrier_ok(netdev); @@ -3023,6 +2918,20 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, netif_carrier_on(netdev); } +int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, + mlx5e_fp_hw_modify hw_modify) +{ + int err; + + err = mlx5e_open_channels(priv, new_chs); + if (err) + return err; + + mlx5e_switch_priv_channels(priv, new_chs, hw_modify); + return 0; +} + void mlx5e_timestamp_init(struct mlx5e_priv *priv) { priv->tstamp.tx_type = HWTSTAMP_TX_OFF; @@ -3236,6 +3145,7 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) { int tc; + mlx5e_tx_reporter_destroy(priv); for (tc = 0; tc < priv->profile->max_tc; tc++) mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); } @@ -3438,13 +3348,12 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev, goto out; } - err = mlx5e_open_channels(priv, &new_channels); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (err) goto out; priv->max_opened_tc = max_t(u8, priv->max_opened_tc, new_channels.params.num_tc); - mlx5e_switch_priv_channels(priv, &new_channels, NULL); out: mutex_unlock(&priv->state_lock); return err; @@ -3654,11 +3563,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable) goto out; } - err = mlx5e_open_channels(priv, &new_channels); - if (err) - goto out; - - mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro); + err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_modify_tirs_lro); out: mutex_unlock(&priv->state_lock); return err; @@ -3876,11 +3781,10 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, goto out; } - err = mlx5e_open_channels(priv, &new_channels); + err = mlx5e_safe_switch_channels(priv, &new_channels, set_mtu_cb); if (err) goto out; - mlx5e_switch_priv_channels(priv, &new_channels, set_mtu_cb); netdev->mtu = new_channels.params.sw_mtu; out: @@ -4223,31 +4127,13 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb, return features; } -static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, - struct mlx5e_txqsq *sq) -{ - struct mlx5_eq_comp *eq = sq->cq.mcq.eq; - u32 eqe_count; - - netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", - eq->core.eqn, eq->core.cons_index, eq->core.irqn); - - eqe_count = mlx5_eq_poll_irq_disabled(eq); - if (!eqe_count) - return false; - - netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn); - sq->channel->stats->eq_rearm++; - return true; -} - static void mlx5e_tx_timeout_work(struct work_struct *work) { struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, tx_timeout_work); - struct net_device *dev = priv->netdev; - bool reopen_channels = false; - int i, err; + bool report_failed = false; + int err; + int i; rtnl_lock(); mutex_lock(&priv->state_lock); @@ -4256,31 +4142,22 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) goto unlock; for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { - struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i); + struct netdev_queue *dev_queue = + netdev_get_tx_queue(priv->netdev, i); struct mlx5e_txqsq *sq = priv->txq2sq[i]; if (!netif_xmit_stopped(dev_queue)) continue; - netdev_err(dev, - "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", - i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, - jiffies_to_usecs(jiffies - dev_queue->trans_start)); - - /* If we recover a lost interrupt, most likely TX timeout will - * be resolved, skip reopening channels - */ - if (!mlx5e_tx_timeout_eq_recover(dev, sq)) { - clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - reopen_channels = true; - } + if (mlx5e_tx_reporter_timeout(sq)) + report_failed = true; } - if (!reopen_channels) + if (!report_failed) goto unlock; - mlx5e_close_locked(dev); - err = mlx5e_open_locked(dev); + mlx5e_close_locked(priv->netdev); + err = mlx5e_open_locked(priv->netdev); if (err) netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", @@ -4296,6 +4173,12 @@ static void mlx5e_tx_timeout(struct net_device *dev) struct mlx5e_priv *priv = netdev_priv(dev); netdev_err(dev, "TX timeout detected\n"); + + if (IS_ERR_OR_NULL(priv->tx_reporter)) { + netdev_err_once(priv->netdev, "tx timeout will not be handled, no valid tx reporter\n"); + return; + } + queue_work(priv->wq, &priv->tx_timeout_work); } @@ -4953,6 +4836,7 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_dcbnl_initialize(priv); #endif + mlx5e_tx_reporter_create(priv); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 5d2e0c2f6624..287d48e5b073 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -153,7 +153,7 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - if (rep->vport == FDB_UPLINK_VPORT) + if (rep->vport == MLX5_VPORT_UPLINK) mlx5e_uplink_rep_update_hw_counters(priv); else mlx5e_vf_rep_update_hw_counters(priv); @@ -381,7 +381,8 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = { .set_pauseparam = mlx5e_uplink_rep_set_pauseparam, }; -static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) +static int mlx5e_rep_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -398,20 +399,14 @@ static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) uplink_priv = netdev_priv(uplink_dev); } - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = ETH_ALEN; - if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) { - ether_addr_copy(attr->u.ppid.id, uplink_upper->dev_addr); - } else { - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep = rpriv->rep; + ppid->id_len = ETH_ALEN; + if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) { + ether_addr_copy(ppid->id, uplink_upper->dev_addr); + } else { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; - ether_addr_copy(attr->u.ppid.id, rep->hw_id); - } - break; - default: - return -EOPNOTSUPP; + ether_addr_copy(ppid->id, rep->hw_id); } return 0; @@ -584,6 +579,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { ether_addr_copy(e->h_dest, ha); ether_addr_copy(eth->h_dest, ha); + /* Update the encap source mac, in case that we delete + * the flows when encap source mac changed. + */ + ether_addr_copy(eth->h_source, e->route_dev->dev_addr); mlx5e_tc_encap_flows_add(priv, e); } @@ -1084,7 +1083,8 @@ static int mlx5e_vf_rep_open(struct net_device *dev) if (!mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - rep->vport, MLX5_VPORT_ADMIN_STATE_UP)) + rep->vport, 1, + MLX5_VPORT_ADMIN_STATE_UP)) netif_carrier_on(dev); unlock: @@ -1102,7 +1102,8 @@ static int mlx5e_vf_rep_close(struct net_device *dev) mutex_lock(&priv->state_lock); mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - rep->vport, MLX5_VPORT_ADMIN_STATE_DOWN); + rep->vport, 1, + MLX5_VPORT_ADMIN_STATE_DOWN); ret = mlx5e_close_locked(dev); mutex_unlock(&priv->state_lock); return ret; @@ -1120,7 +1121,7 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, if (ret) return ret; - if (rep->vport == FDB_UPLINK_VPORT) + if (rep->vport == MLX5_VPORT_UPLINK) ret = snprintf(buf, len, "p%d", pf_num); else ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1); @@ -1207,7 +1208,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) return false; rep = rpriv->rep; - return (rep->vport == FDB_UPLINK_VPORT); + return (rep->vport == MLX5_VPORT_UPLINK); } static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id) @@ -1284,10 +1285,6 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan return 0; } -static const struct switchdev_ops mlx5e_rep_switchdev_ops = { - .switchdev_port_attr_get = mlx5e_attr_get, -}; - static const struct net_device_ops mlx5e_netdev_ops_vf_rep = { .ndo_open = mlx5e_vf_rep_open, .ndo_stop = mlx5e_vf_rep_close, @@ -1298,6 +1295,7 @@ static const struct net_device_ops mlx5e_netdev_ops_vf_rep = { .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, .ndo_change_mtu = mlx5e_vf_rep_change_mtu, + .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id, }; static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { @@ -1319,6 +1317,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_get_vf_stats = mlx5e_get_vf_stats, .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan, + .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id, }; bool mlx5e_eswitch_rep(struct net_device *netdev) @@ -1347,7 +1346,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev) params->sw_mtu = netdev->mtu; /* SQ */ - if (rep->vport == FDB_UPLINK_VPORT) + if (rep->vport == MLX5_VPORT_UPLINK) params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; else params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE; @@ -1374,7 +1373,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_core_dev *mdev = priv->mdev; - if (rep->vport == FDB_UPLINK_VPORT) { + if (rep->vport == MLX5_VPORT_UPLINK) { SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev); netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; /* we want a persistent mac for the uplink rep */ @@ -1393,8 +1392,6 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->watchdog_timeo = 15 * HZ; - netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; - netdev->features |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; netdev->hw_features |= NETIF_F_HW_TC; @@ -1406,7 +1403,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_TSO6; netdev->hw_features |= NETIF_F_RXCSUM; - if (rep->vport != FDB_UPLINK_VPORT) + if (rep->vport != MLX5_VPORT_UPLINK) netdev->features |= NETIF_F_VLAN_CHALLENGED; netdev->features |= netdev->hw_features; @@ -1559,7 +1556,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) return err; } - if (rpriv->rep->vport == FDB_UPLINK_VPORT) { + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { uplink_priv = &rpriv->uplink_priv; /* init shared tc flow table */ @@ -1595,7 +1592,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) for (tc = 0; tc < priv->profile->max_tc; tc++) mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); - if (rpriv->rep->vport == FDB_UPLINK_VPORT) { + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { /* clean indirect TC block notifications */ unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb); mlx5e_rep_indr_clean_block_privs(rpriv); @@ -1714,7 +1711,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) rpriv->rep = rep; nch = mlx5e_get_max_num_channels(dev); - profile = (rep->vport == FDB_UPLINK_VPORT) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile; + profile = (rep->vport == MLX5_VPORT_UPLINK) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile; netdev = mlx5e_create_netdev(dev, profile, nch, rpriv); if (!netdev) { pr_warn("Failed to create representor netdev for vport %d\n", @@ -1727,7 +1724,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) rep->rep_if[REP_ETH].priv = rpriv; INIT_LIST_HEAD(&rpriv->vport_sqs_list); - if (rep->vport == FDB_UPLINK_VPORT) { + if (rep->vport == MLX5_VPORT_UPLINK) { err = mlx5e_create_mdev_resources(dev); if (err) goto err_destroy_netdev; @@ -1763,7 +1760,7 @@ err_detach_netdev: mlx5e_detach_netdev(netdev_priv(netdev)); err_destroy_mdev_resources: - if (rep->vport == FDB_UPLINK_VPORT) + if (rep->vport == MLX5_VPORT_UPLINK) mlx5e_destroy_mdev_resources(dev); err_destroy_netdev: @@ -1783,7 +1780,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) unregister_netdev(netdev); mlx5e_rep_neigh_cleanup(rpriv); mlx5e_detach_netdev(priv); - if (rep->vport == FDB_UPLINK_VPORT) + if (rep->vport == MLX5_VPORT_UPLINK) mlx5e_destroy_mdev_resources(priv->mdev); mlx5e_destroy_netdev(priv); kfree(ppriv); /* mlx5e_rep_priv */ @@ -1801,25 +1798,18 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev) { struct mlx5_eswitch *esw = mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(mdev); - int vport; + struct mlx5_eswitch_rep_if rep_if = {}; - for (vport = 0; vport < total_vfs; vport++) { - struct mlx5_eswitch_rep_if rep_if = {}; + rep_if.load = mlx5e_vport_rep_load; + rep_if.unload = mlx5e_vport_rep_unload; + rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev; - rep_if.load = mlx5e_vport_rep_load; - rep_if.unload = mlx5e_vport_rep_unload; - rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev; - mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH); - } + mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_ETH); } void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev) { struct mlx5_eswitch *esw = mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(mdev); - int vport; - for (vport = total_vfs - 1; vport >= 0; vport--) - mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH); + mlx5_eswitch_unregister_vport_reps(esw, REP_ETH); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index edd722824697..36eafc877e6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -148,6 +148,7 @@ struct mlx5e_encap_entry { unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ struct net_device *out_dev; + struct net_device *route_dev; int tunnel_type; int tunnel_hlen; int reformat_type; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 74159d39dd66..b38986e18dd7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -38,7 +38,6 @@ #include <linux/mlx5/fs.h> #include <linux/mlx5/device.h> #include <linux/rhashtable.h> -#include <net/switchdev.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_tunnel_key.h> @@ -128,6 +127,7 @@ struct mlx5e_tc_flow_parse_attr { struct net_device *filter_dev; struct mlx5_flow_spec spec; int num_mod_hdr_actions; + int max_mod_hdr_actions; void *mod_hdr_actions; int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; }; @@ -929,13 +929,13 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, static int mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, - struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; u32 max_chain = mlx5_eswitch_get_chain_range(esw); struct mlx5_esw_flow_attr *attr = flow->esw_attr; + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; u16 max_prio = mlx5_eswitch_get_prio_range(esw); struct net_device *out_dev, *encap_dev = NULL; struct mlx5_fc *counter = NULL; @@ -967,7 +967,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) continue; - mirred_ifindex = attr->parse_attr->mirred_ifindex[out_index]; + mirred_ifindex = parse_attr->mirred_ifindex[out_index]; out_dev = __dev_get_by_index(dev_net(priv->netdev), mirred_ifindex); err = mlx5e_attach_encap(priv, @@ -1302,101 +1302,89 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static int parse_tunnel_attr(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, - struct net_device *filter_dev) + struct net_device *filter_dev, u8 *match_level) { struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); - - struct flow_dissector_key_control *enc_control = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_CONTROL, - f->key); - int err = 0; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_match_control enc_control; + int err; err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, - headers_c, headers_v); + headers_c, headers_v, match_level); if (err) { NL_SET_ERR_MSG_MOD(extack, "failed to parse tunnel attributes"); return err; } - if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, - f->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, - f->mask); + flow_rule_match_enc_control(rule, &enc_control); + + if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_enc_ipv4_addrs(rule, &match); MLX5_SET(fte_match_set_lyr_2_4, headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4, - ntohl(mask->src)); + ntohl(match.mask->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4, - ntohl(key->src)); + ntohl(match.key->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4, - ntohl(mask->dst)); + ntohl(match.mask->dst)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4, - ntohl(key->dst)); + ntohl(match.key->dst)); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); - } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, - f->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, - f->mask); + } else if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + flow_rule_match_enc_ipv6_addrs(rule, &match); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6), - &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6), - &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) { - struct flow_dissector_key_ip *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IP, - f->key); - struct flow_dissector_key_ip *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IP, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { + struct flow_match_ip match; - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3); + flow_rule_match_enc_ip(rule, &match); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, + match.mask->tos & 0x3); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, + match.key->tos & 0x3); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, + match.mask->tos >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, + match.key->tos >> 2); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, + match.mask->ttl); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, + match.key->ttl); - if (mask->ttl && + if (match.mask->ttl && !MLX5_CAP_ESW_FLOWTABLE_FDB (priv->mdev, ft_field_support.outer_ipv4_ttl)) { @@ -1426,7 +1414,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, struct net_device *filter_dev, - u8 *match_level) + u8 *match_level, u8 *tunnel_match_level) { struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, @@ -1437,12 +1425,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; u16 addr_type = 0; u8 ip_proto = 0; *match_level = MLX5_MATCH_NONE; - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | @@ -1461,23 +1451,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_ENC_IP))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", - f->dissector->used_keys); + dissector->used_keys); return -EOPNOTSUPP; } - if ((dissector_uses_key(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) || - dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) || - dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) && - dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_CONTROL, - f->key); - switch (key->addr_type) { + if ((flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) && + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_enc_control(rule, &match); + switch (match.key->addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: case FLOW_DISSECTOR_KEY_IPV6_ADDRS: - if (parse_tunnel_attr(priv, spec, f, filter_dev)) + if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) return -EOPNOTSUPP; break; default: @@ -1493,35 +1481,27 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, inner_headers); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, - ntohs(mask->n_proto)); + ntohs(match.mask->n_proto)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - ntohs(key->n_proto)); + ntohs(match.key->n_proto)); - if (mask->n_proto) + if (match.mask->n_proto) *match_level = MLX5_MATCH_L2; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->key); - struct flow_dissector_key_vlan *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->mask); - if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) { - if (key->vlan_tpid == htons(ETH_P_8021AD)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_id || + match.mask->vlan_priority || + match.mask->vlan_tpid) { + if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, @@ -1533,11 +1513,15 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, cvlan_tag, 1); } - MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, + match.mask->vlan_id); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, + match.key->vlan_id); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, + match.mask->vlan_priority); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, + match.key->vlan_priority); *match_level = MLX5_MATCH_L2; } @@ -1547,17 +1531,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, *match_level = MLX5_MATCH_L2; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { - struct flow_dissector_key_vlan *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CVLAN, - f->key); - struct flow_dissector_key_vlan *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CVLAN, - f->mask); - if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) { - if (key->vlan_tpid == htons(ETH_P_8021AD)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_id || + match.mask->vlan_priority || + match.mask->vlan_tpid) { + if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { MLX5_SET(fte_match_set_misc, misc_c, outer_second_svlan_tag, 1); MLX5_SET(fte_match_set_misc, misc_v, @@ -1570,69 +1551,58 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid, - mask->vlan_id); + match.mask->vlan_id); MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid, - key->vlan_id); + match.key->vlan_id); MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio, - mask->vlan_priority); + match.mask->vlan_priority); MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio, - key->vlan_priority); + match.key->vlan_priority); *match_level = MLX5_MATCH_L2; } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->key); - struct flow_dissector_key_eth_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + flow_rule_match_eth_addrs(rule, &match); ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, dmac_47_16), - mask->dst); + match.mask->dst); ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16), - key->dst); + match.key->dst); ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, smac_47_16), - mask->src); + match.mask->src); ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16), - key->src); + match.key->src); - if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) + if (!is_zero_ether_addr(match.mask->src) || + !is_zero_ether_addr(match.mask->dst)) *match_level = MLX5_MATCH_L2; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; - struct flow_dissector_key_control *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->mask); - addr_type = key->addr_type; + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; /* the HW doesn't support frag first/later */ - if (mask->flags & FLOW_DIS_FIRST_FRAG) + if (match.mask->flags & FLOW_DIS_FIRST_FRAG) return -EOPNOTSUPP; - if (mask->flags & FLOW_DIS_IS_FRAGMENT) { + if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, - key->flags & FLOW_DIS_IS_FRAGMENT); + match.key->flags & FLOW_DIS_IS_FRAGMENT); /* the HW doesn't need L3 inline to match on frag=no */ - if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) + if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT)) *match_level = MLX5_MATCH_L2; /* *** L2 attributes parsing up to here *** */ else @@ -1640,102 +1610,85 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); - ip_proto = key->ip_proto; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + ip_proto = match.key->ip_proto; MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, - mask->ip_proto); + match.mask->ip_proto); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, - key->ip_proto); + match.key->ip_proto); - if (mask->ip_proto) + if (match.mask->ip_proto) *match_level = MLX5_MATCH_L3; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); + struct flow_match_ipv4_addrs match; + flow_rule_match_ipv4_addrs(rule, &match); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4), - &mask->src, sizeof(mask->src)); + &match.mask->src, sizeof(match.mask->src)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4), - &key->src, sizeof(key->src)); + &match.key->src, sizeof(match.key->src)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), - &mask->dst, sizeof(mask->dst)); + &match.mask->dst, sizeof(match.mask->dst)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), - &key->dst, sizeof(key->dst)); + &match.key->dst, sizeof(match.key->dst)); - if (mask->src || mask->dst) + if (match.mask->src || match.mask->dst) *match_level = MLX5_MATCH_L3; } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); + struct flow_match_ipv6_addrs match; + flow_rule_match_ipv6_addrs(rule, &match); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6), - &mask->src, sizeof(mask->src)); + &match.mask->src, sizeof(match.mask->src)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6), - &key->src, sizeof(key->src)); + &match.key->src, sizeof(match.key->src)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &mask->dst, sizeof(mask->dst)); + &match.mask->dst, sizeof(match.mask->dst)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &key->dst, sizeof(key->dst)); + &match.key->dst, sizeof(match.key->dst)); - if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY || - ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY) + if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY || + ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY) *match_level = MLX5_MATCH_L3; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) { - struct flow_dissector_key_ip *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IP, - f->key); - struct flow_dissector_key_ip *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IP, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3); + flow_rule_match_ip(rule, &match); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, + match.mask->tos & 0x3); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, + match.key->tos & 0x3); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, + match.mask->tos >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, + match.key->tos >> 2); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, + match.mask->ttl); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, + match.key->ttl); - if (mask->ttl && + if (match.mask->ttl && !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ft_field_support.outer_ipv4_ttl)) { NL_SET_ERR_MSG_MOD(extack, @@ -1743,44 +1696,39 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - if (mask->tos || mask->ttl) + if (match.mask->tos || match.mask->ttl) *match_level = MLX5_MATCH_L3; } /* *** L3 attributes parsing up to here *** */ - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - struct flow_dissector_key_ports *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); switch (ip_proto) { case IPPROTO_TCP: MLX5_SET(fte_match_set_lyr_2_4, headers_c, - tcp_sport, ntohs(mask->src)); + tcp_sport, ntohs(match.mask->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - tcp_sport, ntohs(key->src)); + tcp_sport, ntohs(match.key->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_c, - tcp_dport, ntohs(mask->dst)); + tcp_dport, ntohs(match.mask->dst)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - tcp_dport, ntohs(key->dst)); + tcp_dport, ntohs(match.key->dst)); break; case IPPROTO_UDP: MLX5_SET(fte_match_set_lyr_2_4, headers_c, - udp_sport, ntohs(mask->src)); + udp_sport, ntohs(match.mask->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - udp_sport, ntohs(key->src)); + udp_sport, ntohs(match.key->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_c, - udp_dport, ntohs(mask->dst)); + udp_dport, ntohs(match.mask->dst)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - udp_dport, ntohs(key->dst)); + udp_dport, ntohs(match.key->dst)); break; default: NL_SET_ERR_MSG_MOD(extack, @@ -1790,26 +1738,20 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, return -EINVAL; } - if (mask->src || mask->dst) + if (match.mask->src || match.mask->dst) *match_level = MLX5_MATCH_L4; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) { - struct flow_dissector_key_tcp *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_TCP, - f->key); - struct flow_dissector_key_tcp *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_TCP, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { + struct flow_match_tcp match; + flow_rule_match_tcp(rule, &match); MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags, - ntohs(mask->flags)); + ntohs(match.mask->flags)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, - ntohs(key->flags)); + ntohs(match.key->flags)); - if (mask->flags) + if (match.mask->flags) *match_level = MLX5_MATCH_L4; } @@ -1826,15 +1768,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_core_dev *dev = priv->mdev; struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; + u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; struct mlx5_eswitch_rep *rep; - u8 match_level; int err; - err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level); + err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { rep = rpriv->rep; - if (rep->vport != FDB_UPLINK_VPORT && + if (rep->vport != MLX5_VPORT_UPLINK && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && esw->offloads.inline_mode < match_level)) { NL_SET_ERR_MSG_MOD(extack, @@ -1846,10 +1788,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv, } } - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { flow->esw_attr->match_level = match_level; - else + flow->esw_attr->tunnel_match_level = tunnel_match_level; + } else { flow->nic_attr->match_level = match_level; + } return err; } @@ -1862,27 +1806,29 @@ struct pedit_headers { struct udphdr udp; }; +struct pedit_headers_action { + struct pedit_headers vals; + struct pedit_headers masks; + u32 pedits; +}; + static int pedit_header_offsets[] = { - [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth), - [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4), - [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6), - [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp), - [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp), + [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth), + [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4), + [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6), + [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp), + [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp), }; #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype]) static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset, - struct pedit_headers *masks, - struct pedit_headers *vals) + struct pedit_headers_action *hdrs) { u32 *curr_pmask, *curr_pval; - if (hdr_type >= __PEDIT_HDR_TYPE_MAX) - goto out_err; - - curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset); - curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset); + curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset); + curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset); if (*curr_pmask & mask) /* disallow acting twice on the same location */ goto out_err; @@ -1934,12 +1880,11 @@ static struct mlx5_fields fields[] = { OFFLOAD(UDP_DPORT, 2, udp.dest, 0), }; -/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at - * max from the SW pedit action. On success, it says how many HW actions were - * actually parsed. +/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at + * max from the SW pedit action. On success, attr->num_mod_hdr_actions + * says how many HW actions were actually parsed. */ -static int offload_pedit_fields(struct pedit_headers *masks, - struct pedit_headers *vals, +static int offload_pedit_fields(struct pedit_headers_action *hdrs, struct mlx5e_tc_flow_parse_attr *parse_attr, struct netlink_ext_ack *extack) { @@ -1954,15 +1899,17 @@ static int offload_pedit_fields(struct pedit_headers *masks, __be16 mask_be16; void *action; - set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET]; - add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD]; - set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET]; - add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; + set_masks = &hdrs[0].masks; + add_masks = &hdrs[1].masks; + set_vals = &hdrs[0].vals; + add_vals = &hdrs[1].vals; action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); - action = parse_attr->mod_hdr_actions; - max_actions = parse_attr->num_mod_hdr_actions; - nactions = 0; + action = parse_attr->mod_hdr_actions + + parse_attr->num_mod_hdr_actions * action_size; + + max_actions = parse_attr->max_mod_hdr_actions; + nactions = parse_attr->num_mod_hdr_actions; for (i = 0; i < ARRAY_SIZE(fields); i++) { f = &fields[i]; @@ -2053,12 +2000,14 @@ static int offload_pedit_fields(struct pedit_headers *masks, } static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, - const struct tc_action *a, int namespace, + struct pedit_headers_action *hdrs, + int namespace, struct mlx5e_tc_flow_parse_attr *parse_attr) { int nkeys, action_size, max_actions; - nkeys = tcf_pedit_nkeys(a); + nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits + + hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits; action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */ @@ -2073,62 +2022,67 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, if (!parse_attr->mod_hdr_actions) return -ENOMEM; - parse_attr->num_mod_hdr_actions = max_actions; + parse_attr->max_mod_hdr_actions = max_actions; return 0; } static const struct pedit_headers zero_masks = {}; static int parse_tc_pedit_action(struct mlx5e_priv *priv, - const struct tc_action *a, int namespace, + const struct flow_action_entry *act, int namespace, struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, struct netlink_ext_ack *extack) { - struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks; - int nkeys, i, err = -EOPNOTSUPP; + u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1; + int err = -EOPNOTSUPP; u32 mask, val, offset; - u8 cmd, htype; + u8 htype; - nkeys = tcf_pedit_nkeys(a); + htype = act->mangle.htype; + err = -EOPNOTSUPP; /* can't be all optimistic */ - memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX); - memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX); + if (htype == FLOW_ACT_MANGLE_UNSPEC) { + NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded"); + goto out_err; + } - for (i = 0; i < nkeys; i++) { - htype = tcf_pedit_htype(a, i); - cmd = tcf_pedit_cmd(a, i); - err = -EOPNOTSUPP; /* can't be all optimistic */ + mask = act->mangle.mask; + val = act->mangle.val; + offset = act->mangle.offset; - if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) { - NL_SET_ERR_MSG_MOD(extack, - "legacy pedit isn't offloaded"); - goto out_err; - } + err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]); + if (err) + goto out_err; - if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) { - NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded"); - goto out_err; - } + hdrs[cmd].pedits++; - mask = tcf_pedit_mask(a, i); - val = tcf_pedit_val(a, i); - offset = tcf_pedit_offset(a, i); + return 0; +out_err: + return err; +} - err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]); +static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + struct netlink_ext_ack *extack) +{ + struct pedit_headers *cmd_masks; + int err; + u8 cmd; + + if (!parse_attr->mod_hdr_actions) { + err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr); if (err) goto out_err; } - err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); - if (err) - goto out_err; - - err = offload_pedit_fields(masks, vals, parse_attr, extack); + err = offload_pedit_fields(hdrs, parse_attr, extack); if (err < 0) goto out_dealloc_parsed_actions; for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { - cmd_masks = &masks[cmd]; + cmd_masks = &hdrs[cmd].masks; if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field"); @@ -2178,17 +2132,22 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, } static bool modify_header_match_supported(struct mlx5_flow_spec *spec, - struct tcf_exts *exts, + struct flow_action *flow_action, + u32 actions, struct netlink_ext_ack *extack) { - const struct tc_action *a; + const struct flow_action_entry *act; bool modify_ip_header; u8 htype, ip_proto; void *headers_v; u16 ethertype; - int nkeys, i; + int i; + + if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); + else + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); /* for non-IP we only re-write MACs, so we're okay */ @@ -2196,20 +2155,16 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, goto out_ok; modify_ip_header = false; - tcf_exts_for_each_action(i, a, exts) { - int k; - - if (!is_tcf_pedit(a)) + flow_action_for_each(i, act, flow_action) { + if (act->id != FLOW_ACTION_MANGLE && + act->id != FLOW_ACTION_ADD) continue; - nkeys = tcf_pedit_nkeys(a); - for (k = 0; k < nkeys; k++) { - htype = tcf_pedit_htype(a, k); - if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || - htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { - modify_ip_header = true; - break; - } + htype = act->mangle.htype; + if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 || + htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { + modify_ip_header = true; + break; } } @@ -2227,7 +2182,7 @@ out_ok: } static bool actions_match_supported(struct mlx5e_priv *priv, - struct tcf_exts *exts, + struct flow_action *flow_action, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) @@ -2244,7 +2199,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, return false; if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - return modify_header_match_supported(&parse_attr->spec, exts, + return modify_header_match_supported(&parse_attr->spec, + flow_action, actions, extack); return true; @@ -2264,52 +2220,50 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) return (fsystem_guid == psystem_guid); } -static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, +static int parse_tc_nic_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { struct mlx5_nic_flow_attr *attr = flow->nic_attr; - const struct tc_action *a; + struct pedit_headers_action hdrs[2] = {}; + const struct flow_action_entry *act; u32 action = 0; int err, i; - if (!tcf_exts_has_actions(exts)) + if (!flow_action_has_entries(flow_action)) return -EINVAL; attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; - tcf_exts_for_each_action(i, a, exts) { - if (is_tcf_gact_shot(a)) { + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: action |= MLX5_FLOW_CONTEXT_ACTION_DROP; if (MLX5_CAP_FLOWTABLE(priv->mdev, flow_table_properties_nic_receive.flow_counter)) action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; - continue; - } - - if (is_tcf_pedit(a)) { - err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL, - parse_attr, extack); + break; + case FLOW_ACTION_MANGLE: + case FLOW_ACTION_ADD: + err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL, + parse_attr, hdrs, extack); if (err) return err; action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - continue; - } - - if (is_tcf_csum(a)) { + break; + case FLOW_ACTION_CSUM: if (csum_offload_supported(priv, action, - tcf_csum_update_flags(a), + act->csum_flags, extack)) - continue; + break; return -EOPNOTSUPP; - } - - if (is_tcf_mirred_egress_redirect(a)) { - struct net_device *peer_dev = tcf_mirred_dev(a); + case FLOW_ACTION_REDIRECT: { + struct net_device *peer_dev = act->dev; if (priv->netdev->netdev_ops == peer_dev->netdev_ops && same_hw_devs(priv, netdev_priv(peer_dev))) { @@ -2324,11 +2278,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, peer_dev->name); return -EINVAL; } - continue; - } - - if (is_tcf_skbedit_mark(a)) { - u32 mark = tcf_skbedit_mark(a); + } + break; + case FLOW_ACTION_MARK: { + u32 mark = act->mark; if (mark & ~MLX5E_TC_FLOW_ID_MASK) { NL_SET_ERR_MSG_MOD(extack, @@ -2338,14 +2291,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, attr->flow_tag = mark; action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - continue; + } + break; + default: + return -EINVAL; } + } - return -EINVAL; + if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || + hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { + err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, + parse_attr, hdrs, extack); + if (err) + return err; } attr->action = action; - if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) + if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; return 0; @@ -2450,7 +2412,7 @@ out_err: } static int parse_tc_vlan_action(struct mlx5e_priv *priv, - const struct tc_action *a, + const struct flow_action_entry *act, struct mlx5_esw_flow_attr *attr, u32 *action) { @@ -2459,7 +2421,8 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, if (vlan_idx >= MLX5_FS_VLAN_DEPTH) return -EOPNOTSUPP; - if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { + switch (act->id) { + case FLOW_ACTION_VLAN_POP: if (vlan_idx) { if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, MLX5_FS_VLAN_DEPTH)) @@ -2469,10 +2432,11 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, } else { *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; } - } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a); - attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a); - attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a); + break; + case FLOW_ACTION_VLAN_PUSH: + attr->vlan_vid[vlan_idx] = act->vlan.vid; + attr->vlan_prio[vlan_idx] = act->vlan.prio; + attr->vlan_proto[vlan_idx] = act->vlan.proto; if (!attr->vlan_proto[vlan_idx]) attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q); @@ -2484,13 +2448,15 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; } else { if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) && - (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || - tcf_vlan_push_prio(a))) + (act->vlan.proto != htons(ETH_P_8021Q) || + act->vlan.prio)) return -EOPNOTSUPP; *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; } - } else { /* action is TCA_VLAN_ACT_MODIFY */ + break; + default: + /* action is FLOW_ACT_VLAN_MANGLE */ return -EOPNOTSUPP; } @@ -2499,58 +2465,56 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, return 0; } -static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, +static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { + struct pedit_headers_action hdrs[2] = {}; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct ip_tunnel_info *info = NULL; - const struct tc_action *a; + const struct ip_tunnel_info *info = NULL; + const struct flow_action_entry *act; bool encap = false; u32 action = 0; int err, i; - if (!tcf_exts_has_actions(exts)) + if (!flow_action_has_entries(flow_action)) return -EINVAL; attr->in_rep = rpriv->rep; attr->in_mdev = priv->mdev; - tcf_exts_for_each_action(i, a, exts) { - if (is_tcf_gact_shot(a)) { + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; - continue; - } - - if (is_tcf_pedit(a)) { - err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB, - parse_attr, extack); + break; + case FLOW_ACTION_MANGLE: + case FLOW_ACTION_ADD: + err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB, + parse_attr, hdrs, extack); if (err) return err; action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; attr->split_count = attr->out_count; - continue; - } - - if (is_tcf_csum(a)) { + break; + case FLOW_ACTION_CSUM: if (csum_offload_supported(priv, action, - tcf_csum_update_flags(a), - extack)) - continue; + act->csum_flags, extack)) + break; return -EOPNOTSUPP; - } - - if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_MIRRED: { struct mlx5e_priv *out_priv; struct net_device *out_dev; - out_dev = tcf_mirred_dev(a); + out_dev = act->dev; if (!out_dev) { /* out_dev is NULL when filters with * non-existing mirred device are replayed to @@ -2569,8 +2533,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; - if (switchdev_port_same_parent_id(priv->netdev, - out_dev) || + if (netdev_port_same_parent_id(priv->netdev, + out_dev) || is_merged_eswitch_dev(priv, out_dev)) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); @@ -2615,35 +2579,29 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, priv->netdev->name, out_dev->name); return -EINVAL; } - continue; - } - - if (is_tcf_tunnel_set(a)) { - info = tcf_tunnel_info(a); + } + break; + case FLOW_ACTION_TUNNEL_ENCAP: + info = act->tunnel; if (info) encap = true; else return -EOPNOTSUPP; - continue; - } - - if (is_tcf_vlan(a)) { - err = parse_tc_vlan_action(priv, a, attr, &action); + break; + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_POP: + err = parse_tc_vlan_action(priv, act, attr, &action); if (err) return err; attr->split_count = attr->out_count; - continue; - } - - if (is_tcf_tunnel_release(a)) { + break; + case FLOW_ACTION_TUNNEL_DECAP: action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; - continue; - } - - if (is_tcf_gact_goto_chain(a)) { - u32 dest_chain = tcf_gact_goto_chain_index(a); + break; + case FLOW_ACTION_GOTO: { + u32 dest_chain = act->chain_index; u32 max_chain = mlx5_eswitch_get_chain_range(esw); if (dest_chain <= attr->chain) { @@ -2656,15 +2614,23 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->dest_chain = dest_chain; - - continue; + break; + } + default: + return -EINVAL; } + } - return -EINVAL; + if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || + hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { + err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, + parse_attr, hdrs, extack); + if (err) + return err; } attr->action = action; - if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) + if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; if (attr->dest_chain) { @@ -2724,7 +2690,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags) static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) { struct mlx5_esw_flow_attr *attr = flow->esw_attr; - bool is_rep_ingress = attr->in_rep->vport != FDB_UPLINK_VPORT && + bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK && flow->flags & MLX5E_TC_FLOW_INGRESS; bool act_is_encap = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); @@ -2767,6 +2733,30 @@ err_free: return err; } +static void +mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr, + struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct tc_cls_flower_offload *f, + struct mlx5_eswitch_rep *in_rep, + struct mlx5_core_dev *in_mdev) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + esw_attr->parse_attr = parse_attr; + esw_attr->chain = f->common.chain_index; + esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; + + esw_attr->in_rep = in_rep; + esw_attr->in_mdev = in_mdev; + + if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) == + MLX5_COUNTER_SOURCE_ESWITCH) + esw_attr->counter_dev = in_mdev; + else + esw_attr->counter_dev = priv->mdev; +} + static struct mlx5e_tc_flow * __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, @@ -2775,8 +2765,8 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5_eswitch_rep *in_rep, struct mlx5_core_dev *in_mdev) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); struct netlink_ext_ack *extack = f->common.extack; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow *flow; int attr_size, err; @@ -2787,29 +2777,22 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, &parse_attr, &flow); if (err) goto out; + parse_attr->filter_dev = filter_dev; - flow->esw_attr->parse_attr = parse_attr; + mlx5e_flow_esw_attr_init(flow->esw_attr, + priv, parse_attr, + f, in_rep, in_mdev); + err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, f, filter_dev); if (err) goto err_free; - flow->esw_attr->chain = f->common.chain_index; - flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; - err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack); + err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack); if (err) goto err_free; - flow->esw_attr->in_rep = in_rep; - flow->esw_attr->in_mdev = in_mdev; - - if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) == - MLX5_COUNTER_SOURCE_ESWITCH) - flow->esw_attr->counter_dev = in_mdev; - else - flow->esw_attr->counter_dev = priv->mdev; - - err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack); + err = mlx5e_tc_add_fdb_flow(priv, flow, extack); if (err) goto err_free; @@ -2846,7 +2829,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f, * original flow and packets redirected from uplink use the * peer mdev. */ - if (flow->esw_attr->in_rep->vport == FDB_UPLINK_VPORT) + if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK) in_mdev = peer_priv->mdev; else in_mdev = priv->mdev; @@ -2912,6 +2895,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, struct net_device *filter_dev, struct mlx5e_tc_flow **__flow) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); struct netlink_ext_ack *extack = f->common.extack; struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow *flow; @@ -2934,7 +2918,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, if (err) goto err_free; - err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack); + err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack); if (err) goto err_free; @@ -3092,7 +3076,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); out: - tcf_exts_stats_update(f->exts, bytes, packets, lastuse); + flow_stats_update(&f->stats, bytes, packets, lastuse); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 598ad7e4d5c9..c1334a8ac8f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); if (unlikely(contig_wqebbs_room < num_wqebbs)) { +#ifdef CONFIG_MLX5_EN_IPSEC + struct mlx5_wqe_eth_seg cur_eth = wqe->eth; +#endif mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); mlx5e_sq_fetch_wqe(sq, &wqe, &pi); +#ifdef CONFIG_MLX5_EN_IPSEC + wqe->eth = cur_eth; +#endif } /* fill wqe */ @@ -513,8 +519,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) &sq->state)) { mlx5e_dump_error_cqe(sq, (struct mlx5_err_cqe *)cqe); - queue_work(cq->channel->priv->wq, - &sq->recover.recover_work); + if (!IS_ERR_OR_NULL(cq->channel->priv->tx_reporter)) + queue_work(cq->channel->priv->wq, + &sq->recover_work); } stats->cqe_err++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ee04aab65a9f..bb6e5b5d9681 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -34,6 +34,7 @@ #include <linux/notifier.h> #include <linux/module.h> #include <linux/mlx5/driver.h> +#include <linux/mlx5/vport.h> #include <linux/mlx5/eq.h> #include <linux/mlx5/cmd.h> #ifdef CONFIG_RFS_ACCEL @@ -114,11 +115,11 @@ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) struct mlx5_cq_table *table = &eq->cq_table; struct mlx5_core_cq *cq = NULL; - spin_lock(&table->lock); + rcu_read_lock(); cq = radix_tree_lookup(&table->tree, cqn); if (likely(cq)) mlx5_cq_hold(cq); - spin_unlock(&table->lock); + rcu_read_unlock(); return cq; } @@ -371,9 +372,9 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) struct mlx5_cq_table *table = &eq->cq_table; int err; - spin_lock_irq(&table->lock); + spin_lock(&table->lock); err = radix_tree_insert(&table->tree, cq->cqn, cq); - spin_unlock_irq(&table->lock); + spin_unlock(&table->lock); return err; } @@ -383,9 +384,9 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) struct mlx5_cq_table *table = &eq->cq_table; struct mlx5_core_cq *tmp; - spin_lock_irq(&table->lock); + spin_lock(&table->lock); tmp = radix_tree_delete(&table->tree, cq->cqn); - spin_unlock_irq(&table->lock); + spin_unlock(&table->lock); if (!tmp) { mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn); @@ -530,6 +531,9 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER); + if (mlx5_core_is_ecpf_esw_manager(dev)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE); + return async_event_mask; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 5b492b67f4e1..6cb9710f76d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -39,8 +39,7 @@ #include "lib/eq.h" #include "eswitch.h" #include "fs_core.h" - -#define UPLINK_VPORT 0xFFFF +#include "ecpf.h" enum { MLX5_ACTION_NONE = 0, @@ -52,7 +51,7 @@ enum { struct vport_addr { struct l2addr_node node; u8 action; - u32 vport; + u16 vport; struct mlx5_flow_handle *flow_rule; bool mpfs; /* UC MAC was added to MPFs */ /* A flag indicating that mac was added due to mc promiscuous vport */ @@ -70,6 +69,28 @@ enum { MC_ADDR_CHANGE | \ PROMISC_CHANGE) +/* The vport getter/iterator are only valid after esw->total_vports + * and vport->vport are initialized in mlx5_eswitch_init. + */ +#define mlx5_esw_for_all_vports(esw, i, vport) \ + for ((i) = MLX5_VPORT_PF; \ + (vport) = &(esw)->vports[i], \ + (i) < (esw)->total_vports; (i)++) + +#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ + for ((i) = MLX5_VPORT_FIRST_VF; \ + (vport) = &(esw)->vports[i], \ + (i) <= (nvfs); (i)++) + +static struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, + u16 vport_num) +{ + u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); + + WARN_ON(idx > esw->total_vports - 1); + return &esw->vports[idx]; +} + static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, u32 events_mask) { @@ -115,7 +136,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } -static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, +static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, u16 vlan, u8 qos, u8 set_flags) { u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0}; @@ -152,7 +173,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, /* E-Switch FDB */ static struct mlx5_flow_handle * -__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, +__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule, u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) { int match_header = (is_zero_ether_addr(mac_c) ? 0 : @@ -188,7 +209,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, misc_parameters); mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); - MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT); + MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK); MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); } @@ -215,7 +236,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, } static struct mlx5_flow_handle * -esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) +esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport) { u8 mac_c[ETH_ALEN]; @@ -224,7 +245,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) } static struct mlx5_flow_handle * -esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) +esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport) { u8 mac_c[ETH_ALEN]; u8 mac_v[ETH_ALEN]; @@ -237,7 +258,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) } static struct mlx5_flow_handle * -esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) +esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport) { u8 mac_c[ETH_ALEN]; u8 mac_v[ETH_ALEN]; @@ -377,19 +398,19 @@ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { u8 *mac = vaddr->node.addr; - u32 vport = vaddr->vport; + u16 vport = vaddr->vport; int err; - /* Skip mlx5_mpfs_add_mac for PFs, - * it is already done by the PF netdev in mlx5e_execute_l2_action + /* Skip mlx5_mpfs_add_mac for eswitch_managers, + * it is already done by its netdev in mlx5e_execute_l2_action */ - if (!vport) + if (esw->manager_vport == vport) goto fdb_add; err = mlx5_mpfs_add_mac(esw->dev, mac); if (err) { esw_warn(esw->dev, - "Failed to add L2 table mac(%pM) for vport(%d), err(%d)\n", + "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n", mac, vport, err); return err; } @@ -409,13 +430,13 @@ fdb_add: static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { u8 *mac = vaddr->node.addr; - u32 vport = vaddr->vport; + u16 vport = vaddr->vport; int err = 0; - /* Skip mlx5_mpfs_del_mac for PFs, - * it is already done by the PF netdev in mlx5e_execute_l2_action + /* Skip mlx5_mpfs_del_mac for eswitch managerss, + * it is already done by its netdev in mlx5e_execute_l2_action */ - if (!vport || !vaddr->mpfs) + if (!vaddr->mpfs || esw->manager_vport == vport) goto fdb_del; err = mlx5_mpfs_del_mac(esw->dev, mac); @@ -438,17 +459,18 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, struct esw_mc_addr *esw_mc) { u8 *mac = vaddr->node.addr; - u32 vport_idx = 0; + struct mlx5_vport *vport; + u16 i, vport_num; - for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) { - struct mlx5_vport *vport = &esw->vports[vport_idx]; + mlx5_esw_for_all_vports(esw, i, vport) { struct hlist_head *vport_hash = vport->mc_list; struct vport_addr *iter_vaddr = l2addr_hash_find(vport_hash, mac, struct vport_addr); + vport_num = vport->vport; if (IS_ERR_OR_NULL(vport->allmulti_rule) || - vaddr->vport == vport_idx) + vaddr->vport == vport_num) continue; switch (vaddr->action) { case MLX5_ACTION_ADD: @@ -460,14 +482,14 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, if (!iter_vaddr) { esw_warn(esw->dev, "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n", - mac, vport_idx); + mac, vport_num); continue; } - iter_vaddr->vport = vport_idx; + iter_vaddr->vport = vport_num; iter_vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, - vport_idx); + vport_num); iter_vaddr->mc_promisc = true; break; case MLX5_ACTION_DEL: @@ -485,7 +507,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) struct hlist_head *hash = esw->mc_table; struct esw_mc_addr *esw_mc; u8 *mac = vaddr->node.addr; - u32 vport = vaddr->vport; + u16 vport = vaddr->vport; if (!esw->fdb_table.legacy.fdb) return 0; @@ -499,7 +521,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) return -ENOMEM; esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ - esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT); + esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK); /* Add this multicast mac to all the mc promiscuous vports */ update_allmulti_vports(esw, vaddr, esw_mc); @@ -525,7 +547,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) struct hlist_head *hash = esw->mc_table; struct esw_mc_addr *esw_mc; u8 *mac = vaddr->node.addr; - u32 vport = vaddr->vport; + u16 vport = vaddr->vport; if (!esw->fdb_table.legacy.fdb) return 0; @@ -564,9 +586,9 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) /* Apply vport UC/MC list to HW l2 table and FDB table */ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, - u32 vport_num, int list_type) + u16 vport_num, int list_type) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; vport_addr_action vport_addr_add; vport_addr_action vport_addr_del; @@ -599,9 +621,9 @@ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, /* Sync vport UC/MC list from vport context */ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, - u32 vport_num, int list_type) + u16 vport_num, int list_type) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; u8 (*mac_list)[ETH_ALEN]; struct l2addr_node *node; @@ -686,9 +708,9 @@ out: /* Sync vport UC/MC list from vport context * Must be called after esw_update_vport_addr_list */ -static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num) +static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); struct l2addr_node *node; struct vport_addr *addr; struct hlist_head *hash; @@ -721,11 +743,11 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num) } /* Apply vport rx mode to HW FDB table */ -static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, +static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num, bool promisc, bool mc_promisc) { + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); struct esw_mc_addr *allmulti_addr = &esw->mc_promisc; - struct mlx5_vport *vport = &esw->vports[vport_num]; if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc) goto promisc; @@ -736,7 +758,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, if (!allmulti_addr->uplink_rule) allmulti_addr->uplink_rule = esw_fdb_set_vport_allmulti_rule(esw, - UPLINK_VPORT); + MLX5_VPORT_UPLINK); allmulti_addr->refcnt++; } else if (vport->allmulti_rule) { mlx5_del_flow_rules(vport->allmulti_rule); @@ -764,9 +786,9 @@ promisc: } /* Sync vport rx mode from vport context */ -static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num) +static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); int promisc_all = 0; int promisc_uc = 0; int promisc_mc = 0; @@ -1343,8 +1365,8 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw) static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, u32 initial_max_rate, u32 initial_bw_share) { + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; - struct mlx5_vport *vport = &esw->vports[vport_num]; struct mlx5_core_dev *dev = esw->dev; void *vport_elem; int err = 0; @@ -1383,7 +1405,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); int err = 0; if (!vport->qos.enabled) @@ -1402,8 +1424,8 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, u32 max_rate, u32 bw_share) { + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; - struct mlx5_vport *vport = &esw->vports[vport_num]; struct mlx5_core_dev *dev = esw->dev; void *vport_elem; u32 bitmask = 0; @@ -1459,15 +1481,22 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, { int vport_num = vport->vport; - if (!vport_num) + if (esw->manager_vport == vport_num) return; mlx5_modify_vport_admin_state(esw->dev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - vport_num, + vport_num, 1, vport->info.link_state); - mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac); - mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid); + + /* Host PF has its own mac/guid. */ + if (vport_num) { + mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, + vport->info.mac); + mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, + vport->info.node_guid); + } + modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos, (vport->info.vlan || vport->info.qos)); @@ -1513,10 +1542,10 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport) mlx5_fc_destroy(dev, vport->egress.drop_counter); } -static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, +static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int enable_events) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + u16 vport_num = vport->vport; mutex_lock(&esw->state_lock); WARN_ON(vport->enabled); @@ -1539,8 +1568,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, vport->enabled_events = enable_events; vport->enabled = true; - /* only PF is trusted by default */ - if (!vport_num) + /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well + * in smartNIC as it's a vport group manager. + */ + if (esw->manager_vport == vport_num || + (!vport_num && mlx5_core_is_ecpf(esw->dev))) vport->info.trusted = true; esw_vport_change_handle_locked(vport); @@ -1550,9 +1582,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, mutex_unlock(&esw->state_lock); } -static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) +static void esw_disable_vport(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + u16 vport_num = vport->vport; if (!vport->enabled) return; @@ -1573,10 +1606,11 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) esw_vport_change_handle_locked(vport); vport->enabled_events = 0; esw_vport_disable_qos(esw, vport_num); - if (vport_num && esw->mode == SRIOV_LEGACY) { + if (esw->manager_vport != vport_num && + esw->mode == SRIOV_LEGACY) { mlx5_modify_vport_admin_state(esw->dev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - vport_num, + vport_num, 1, MLX5_VPORT_ADMIN_STATE_DOWN); esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_ingress_acl(esw, vport); @@ -1595,7 +1629,7 @@ static int eswitch_vport_event(struct notifier_block *nb, u16 vport_num; vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); - vport = &esw->vports[vport_num]; + vport = mlx5_eswitch_get_vport(esw, vport_num); if (vport->enabled) queue_work(esw->work_queue, &vport->vport_change_handler); @@ -1607,6 +1641,8 @@ static int eswitch_vport_event(struct notifier_block *nb, int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { + int vf_nvports = 0, total_nvports = 0; + struct mlx5_vport *vport; int err; int i, enabled_events; @@ -1624,6 +1660,18 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); + if (mode == SRIOV_OFFLOADS) { + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + err = mlx5_query_host_params_num_vfs(esw->dev, &vf_nvports); + if (err) + return err; + total_nvports = esw->total_vports; + } else { + vf_nvports = nvfs; + total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev); + } + } + esw->mode = mode; mlx5_lag_update(esw->dev); @@ -1633,7 +1681,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) } else { mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - err = esw_offloads_init(esw, nvfs + 1); + err = esw_offloads_init(esw, vf_nvports, total_nvports); } if (err) @@ -1648,8 +1696,20 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) * 2. FDB/Eswitch is programmed by user space tools */ enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0; - for (i = 0; i <= nvfs; i++) - esw_enable_vport(esw, i, enabled_events); + + /* Enable PF vport */ + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); + esw_enable_vport(esw, vport, enabled_events); + + /* Enable ECPF vports */ + if (mlx5_ecpf_vport_exists(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); + esw_enable_vport(esw, vport, enabled_events); + } + + /* Enable VF vports */ + mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) + esw_enable_vport(esw, vport, enabled_events); if (mode == SRIOV_LEGACY) { MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); @@ -1674,8 +1734,8 @@ abort: void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) { struct esw_mc_addr *mc_promisc; + struct mlx5_vport *vport; int old_mode; - int nvports; int i; if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE) @@ -1685,13 +1745,12 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) esw->enabled_vports, esw->mode); mc_promisc = &esw->mc_promisc; - nvports = esw->enabled_vports; if (esw->mode == SRIOV_LEGACY) mlx5_eq_notifier_unregister(esw->dev, &esw->nb); - for (i = 0; i < esw->total_vports; i++) - esw_disable_vport(esw, i); + mlx5_esw_for_all_vports(esw, i, vport) + esw_disable_vport(esw, vport); if (mc_promisc && mc_promisc->uplink_rule) mlx5_del_flow_rules(mc_promisc->uplink_rule); @@ -1701,7 +1760,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) if (esw->mode == SRIOV_LEGACY) esw_destroy_legacy_fdb_table(esw); else if (esw->mode == SRIOV_OFFLOADS) - esw_offloads_cleanup(esw, nvports); + esw_offloads_cleanup(esw); old_mode = esw->mode; esw->mode = SRIOV_NONE; @@ -1718,8 +1777,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) { int total_vports = MLX5_TOTAL_VPORTS(dev); struct mlx5_eswitch *esw; - int vport_num; - int err; + struct mlx5_vport *vport; + int err, i; if (!MLX5_VPORT_MANAGER(dev)) return 0; @@ -1735,6 +1794,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) return -ENOMEM; esw->dev = dev; + esw->manager_vport = mlx5_eswitch_manager_vport(dev); esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); if (!esw->work_queue) { @@ -1749,6 +1809,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) goto abort; } + esw->total_vports = total_vports; + err = esw_offloads_init_reps(esw); if (err) goto abort; @@ -1757,17 +1819,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) hash_init(esw->offloads.mod_hdr_tbl); mutex_init(&esw->state_lock); - for (vport_num = 0; vport_num < total_vports; vport_num++) { - struct mlx5_vport *vport = &esw->vports[vport_num]; - - vport->vport = vport_num; + mlx5_esw_for_all_vports(esw, i, vport) { + vport->vport = mlx5_eswitch_index_to_vport_num(esw, i); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; vport->dev = dev; INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); } - esw->total_vports = total_vports; esw->enabled_vports = 0; esw->mode = SRIOV_NONE; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; @@ -1866,7 +1925,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, err = mlx5_modify_vport_admin_state(esw->dev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - vport, link_state); + vport, 1, link_state); if (err) { mlx5_core_warn(esw->dev, "Failed to set vport %d link state, err = %d", @@ -2009,8 +2068,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) u32 max_guarantee = 0; int i; - for (i = 0; i < esw->total_vports; i++) { - evport = &esw->vports[i]; + mlx5_esw_for_all_vports(esw, i, evport) { if (!evport->enabled || evport->info.min_rate < max_guarantee) continue; max_guarantee = evport->info.min_rate; @@ -2029,8 +2087,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) int err; int i; - for (i = 0; i < esw->total_vports; i++) { - evport = &esw->vports[i]; + mlx5_esw_for_all_vports(esw, i, evport) { if (!evport->enabled) continue; vport_min_rate = evport->info.min_rate; @@ -2045,7 +2102,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) if (bw_share == evport->qos.bw_share) continue; - err = esw_vport_qos_config(esw, i, vport_max_rate, + err = esw_vport_qos_config(esw, evport->vport, vport_max_rate, bw_share); if (!err) evport->qos.bw_share = bw_share; @@ -2128,7 +2185,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) return 0; - err = mlx5_query_vport_down_stats(dev, vport_idx, + err = mlx5_query_vport_down_stats(dev, vport_idx, 1, &rx_discard_vport_down, &tx_discard_vport_down); if (err) @@ -2165,8 +2222,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, MLX5_CMD_OP_QUERY_VPORT_COUNTER); MLX5_SET(query_vport_counter_in, in, op_mod, 0); MLX5_SET(query_vport_counter_in, in, vport_number, vport); - if (vport) - MLX5_SET(query_vport_counter_in, in, other_vport, 1); + MLX5_SET(query_vport_counter_in, in, other_vport, 1); memset(out, 0, outlen); err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 9c89eea9b2c3..af5581a57e56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -38,6 +38,7 @@ #include <net/devlink.h> #include <linux/mlx5/device.h> #include <linux/mlx5/eswitch.h> +#include <linux/mlx5/vport.h> #include <linux/mlx5/fs.h> #include "lib/mpfs.h" @@ -49,8 +50,6 @@ #define MLX5_MAX_MC_PER_VPORT(dev) \ (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) -#define FDB_UPLINK_VPORT 0xffff - #define MLX5_MIN_BW_SHARE 1 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ @@ -183,6 +182,16 @@ struct esw_mc_addr { /* SRIOV only */ u32 refcnt; }; +struct mlx5_host_work { + struct work_struct work; + struct mlx5_eswitch *esw; +}; + +struct mlx5_host_info { + struct mlx5_nb nb; + u16 num_vfs; +}; + struct mlx5_eswitch { struct mlx5_core_dev *dev; struct mlx5_nb nb; @@ -206,10 +215,13 @@ struct mlx5_eswitch { struct mlx5_esw_offload offloads; int mode; int nvports; + u16 manager_vport; + struct mlx5_host_info host_info; }; -void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); -int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); +void esw_offloads_cleanup(struct mlx5_eswitch *esw); +int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, + int total_nvports); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw); @@ -312,6 +324,7 @@ struct mlx5_esw_flow_attr { } dests[MLX5_MAX_FLOW_FWD_VPORTS]; u32 mod_hdr_id; u8 match_level; + u8 tunnel_match_level; struct mlx5_fc *counter; u32 chain; u16 prio; @@ -364,6 +377,53 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, #define esw_debug(dev, format, ...) \ mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) + +/* The returned number is valid only when the dev is eswitch manager. */ +static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) +{ + return mlx5_core_is_ecpf_esw_manager(dev) ? + MLX5_VPORT_ECPF : MLX5_VPORT_PF; +} + +static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw) +{ + /* Uplink always locate at the last element of the array.*/ + return esw->total_vports - 1; +} + +static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw) +{ + return esw->total_vports - 2; +} + +static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw, + u16 vport_num) +{ + if (vport_num == MLX5_VPORT_ECPF) { + if (!mlx5_ecpf_vport_exists(esw->dev)) + esw_warn(esw->dev, "ECPF vport doesn't exist!\n"); + return mlx5_eswitch_ecpf_idx(esw); + } + + if (vport_num == MLX5_VPORT_UPLINK) + return mlx5_eswitch_uplink_idx(esw); + + return vport_num; +} + +static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, + int index) +{ + if (index == mlx5_eswitch_ecpf_idx(esw) && + mlx5_ecpf_vport_exists(esw->dev)) + return MLX5_VPORT_ECPF; + + if (index == mlx5_eswitch_uplink_idx(esw)) + return MLX5_VPORT_UPLINK; + + return index; +} + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 53065b6ae593..f2260391be5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -40,15 +40,59 @@ #include "en.h" #include "fs_core.h" #include "lib/devcom.h" +#include "ecpf.h" +#include "lib/eq.h" enum { FDB_FAST_PATH = 0, FDB_SLOW_PATH }; +/* There are two match-all miss flows, one for unicast dst mac and + * one for multicast. + */ +#define MLX5_ESW_MISS_FLOWS (2) + #define fdb_prio_table(esw, chain, prio, level) \ (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)] +#define UPLINK_REP_INDEX 0 + +/* The rep getter/iterator are only valid after esw->total_vports + * and vport->vport are initialized in mlx5_eswitch_init. + */ +#define mlx5_esw_for_all_reps(esw, i, rep) \ + for ((i) = MLX5_VPORT_PF; \ + (rep) = &(esw)->offloads.vport_reps[i], \ + (i) < (esw)->total_vports; (i)++) + +#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ + for ((i) = MLX5_VPORT_FIRST_VF; \ + (rep) = &(esw)->offloads.vport_reps[i], \ + (i) <= (nvfs); (i)++) + +#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \ + for ((i) = (nvfs); \ + (rep) = &(esw)->offloads.vport_reps[i], \ + (i) >= MLX5_VPORT_FIRST_VF; (i)--) + +#define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \ + for ((vport) = MLX5_VPORT_FIRST_VF; \ + (vport) <= (nvfs); (vport)++) + +#define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \ + for ((vport) = (nvfs); \ + (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) + +static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, + u16 vport_num) +{ + u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); + + WARN_ON(idx > esw->total_vports - 1); + return &esw->offloads.vport_reps[idx]; +} + static struct mlx5_flow_table * esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); static void @@ -160,14 +204,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_eswitch_owner_vhca_id); - if (attr->match_level == MLX5_MATCH_NONE) - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; - else - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS; - - if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) - spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { + if (attr->tunnel_match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + if (attr->match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; + } else if (attr->match_level != MLX5_MATCH_NONE) { + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_id = attr->mod_hdr_id; @@ -318,7 +363,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { rep = &esw->offloads.vport_reps[vf_vport]; - if (!rep->rep_if[REP_ETH].valid) + if (rep->rep_if[REP_ETH].state != REP_LOADED) continue; err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); @@ -359,15 +404,15 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, in_rep = attr->in_rep; out_rep = attr->dests[0].rep; - if (push && in_rep->vport == FDB_UPLINK_VPORT) + if (push && in_rep->vport == MLX5_VPORT_UPLINK) goto out_notsupp; - if (pop && out_rep->vport == FDB_UPLINK_VPORT) + if (pop && out_rep->vport == MLX5_VPORT_UPLINK) goto out_notsupp; /* vport has vlan push configured, can't offload VF --> wire rules w.o it */ if (!push && !pop && fwd) - if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT) + if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK) goto out_notsupp; /* protects against (1) setting rules with different vlans to push and @@ -409,7 +454,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) { + if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) { vport->vlan_refcount++; attr->vlan_handled = true; } @@ -469,7 +514,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) + if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) vport->vlan_refcount--; return 0; @@ -516,7 +561,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); - MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */ + /* source vport is the esw manager */ + MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); @@ -561,7 +607,7 @@ static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev, source_eswitch_owner_vhca_id); dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest->vport.num = 0; + dest->vport.num = peer_dev->priv.eswitch->manager_vport; dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; } @@ -595,14 +641,35 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); - for (i = 1; i < nvports; i++) { + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_PF); + flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, + spec, &flow_act, &dest, 1); + if (IS_ERR(flow)) { + err = PTR_ERR(flow); + goto add_pf_flow_err; + } + flows[MLX5_VPORT_PF] = flow; + } + + if (mlx5_ecpf_vport_exists(esw->dev)) { + MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); + flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, + spec, &flow_act, &dest, 1); + if (IS_ERR(flow)) { + err = PTR_ERR(flow); + goto add_ecpf_flow_err; + } + flows[mlx5_eswitch_ecpf_idx(esw)] = flow; + } + + mlx5_esw_for_each_vf_vport(esw, i, mlx5_core_max_vfs(esw->dev)) { MLX5_SET(fte_match_set_misc, misc, source_port, i); flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow)) { err = PTR_ERR(flow); - esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); - goto add_flow_err; + goto add_vf_flow_err; } flows[i] = flow; } @@ -612,9 +679,18 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, kvfree(spec); return 0; -add_flow_err: - for (i--; i > 0; i--) +add_vf_flow_err: + nvports = --i; + mlx5_esw_for_each_vf_vport_reverse(esw, i, nvports) mlx5_del_flow_rules(flows[i]); + + if (mlx5_ecpf_vport_exists(esw->dev)) + mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); +add_ecpf_flow_err: + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) + mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); +add_pf_flow_err: + esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); kvfree(flows); alloc_flows_err: kvfree(spec); @@ -628,9 +704,15 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) flows = esw->fdb_table.offloads.peer_miss_rules; - for (i = 1; i < esw->total_vports; i++) + mlx5_esw_for_each_vf_vport_reverse(esw, i, mlx5_core_max_vfs(esw->dev)) mlx5_del_flow_rules(flows[i]); + if (mlx5_ecpf_vport_exists(esw->dev)) + mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); + + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) + mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); + kvfree(flows); } @@ -660,7 +742,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dmac_c[0] = 0x01; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport.num = 0; + dest.vport.num = esw->manager_vport; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, @@ -904,8 +986,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) esw->fdb_table.offloads.fdb_left[i] = ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0; - table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 + - esw->total_vports; + table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + + MLX5_ESW_MISS_FLOWS + esw->total_vports; /* create the slow path fdb with encap set, so further table instances * can be created at run time while VFs are probed if the FW allows that. @@ -999,7 +1081,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) dmac[0] = 0x01; MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, + ix + MLX5_ESW_MISS_FLOWS); g = mlx5_create_flow_group(fdb, flow_group_in); if (IS_ERR(g)) { @@ -1048,7 +1131,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) esw_destroy_offloads_fast_fdb_tables(esw); } -static int esw_create_offloads_table(struct mlx5_eswitch *esw) +static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports) { struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; @@ -1062,7 +1145,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) return -EOPNOTSUPP; } - ft_attr.max_fte = dev->priv.sriov.num_vfs + 2; + ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS; ft_offloads = mlx5_create_flow_table(ns, &ft_attr); if (IS_ERR(ft_offloads)) { @@ -1082,16 +1165,15 @@ static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) mlx5_destroy_flow_table(offloads->ft_offloads); } -static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) +static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_group *g; - struct mlx5_priv *priv = &esw->dev->priv; u32 *flow_group_in; void *match_criteria, *misc; int err = 0; - int nvports = priv->sriov.num_vfs + 2; + nvports = nvports + MLX5_ESW_MISS_FLOWS; flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; @@ -1168,7 +1250,8 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, { int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; - if (esw->mode != SRIOV_LEGACY) { + if (esw->mode != SRIOV_LEGACY && + !mlx5_core_is_ecpf_esw_manager(esw->dev)) { NL_SET_ERR_MSG_MOD(extack, "Can't set offloads mode, SRIOV legacy not enabled"); return -EINVAL; @@ -1206,9 +1289,8 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw) { int total_vfs = MLX5_TOTAL_VPORTS(esw->dev); struct mlx5_core_dev *dev = esw->dev; - struct mlx5_esw_offload *offloads; struct mlx5_eswitch_rep *rep; - u8 hw_id[ETH_ALEN]; + u8 hw_id[ETH_ALEN], rep_type; int vport; esw->offloads.vport_reps = kcalloc(total_vfs, @@ -1217,75 +1299,203 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw) if (!esw->offloads.vport_reps) return -ENOMEM; - offloads = &esw->offloads; mlx5_query_nic_vport_mac_address(dev, 0, hw_id); - for (vport = 0; vport < total_vfs; vport++) { - rep = &offloads->vport_reps[vport]; - - rep->vport = vport; + mlx5_esw_for_all_reps(esw, vport, rep) { + rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport); ether_addr_copy(rep->hw_id, hw_id); - } - offloads->vport_reps[0].vport = FDB_UPLINK_VPORT; + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) + rep->rep_if[rep_type].state = REP_UNREGISTERED; + } return 0; } -static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports, - u8 rep_type) +static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, u8 rep_type) +{ + if (rep->rep_if[rep_type].state != REP_LOADED) + return; + + rep->rep_if[rep_type].unload(rep); + rep->rep_if[rep_type].state = REP_REGISTERED; +} + +static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) { struct mlx5_eswitch_rep *rep; - int vport; - for (vport = nvports - 1; vport >= 0; vport--) { - rep = &esw->offloads.vport_reps[vport]; - if (!rep->rep_if[rep_type].valid) - continue; + if (mlx5_ecpf_vport_exists(esw->dev)) { + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF); + __esw_offloads_unload_rep(esw, rep, rep_type); + } - rep->rep_if[rep_type].unload(rep); + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); + __esw_offloads_unload_rep(esw, rep, rep_type); } + + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); + __esw_offloads_unload_rep(esw, rep, rep_type); } -static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports) +static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + struct mlx5_eswitch_rep *rep; + int i; + + mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports) + __esw_offloads_unload_rep(esw, rep, rep_type); +} + +static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports) +{ + u8 rep_type = NUM_REP_TYPES; + + while (rep_type-- > 0) + __unload_reps_vf_vport(esw, nvports, rep_type); +} + +static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + __unload_reps_vf_vport(esw, nvports, rep_type); + + /* Special vports must be the last to unload. */ + __unload_reps_special_vport(esw, rep_type); +} + +static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports) { u8 rep_type = NUM_REP_TYPES; while (rep_type-- > 0) - esw_offloads_unload_reps_type(esw, nvports, rep_type); + __unload_reps_all_vport(esw, nvports, rep_type); +} + +static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, u8 rep_type) +{ + int err = 0; + + if (rep->rep_if[rep_type].state != REP_REGISTERED) + return 0; + + err = rep->rep_if[rep_type].load(esw->dev, rep); + if (err) + return err; + + rep->rep_if[rep_type].state = REP_LOADED; + + return 0; } -static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports, - u8 rep_type) +static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) { struct mlx5_eswitch_rep *rep; - int vport; int err; - for (vport = 0; vport < nvports; vport++) { - rep = &esw->offloads.vport_reps[vport]; - if (!rep->rep_if[rep_type].valid) - continue; + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); + err = __esw_offloads_load_rep(esw, rep, rep_type); + if (err) + return err; - err = rep->rep_if[rep_type].load(esw->dev, rep); + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); + err = __esw_offloads_load_rep(esw, rep, rep_type); if (err) - goto err_reps; + goto err_pf; + } + + if (mlx5_ecpf_vport_exists(esw->dev)) { + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF); + err = __esw_offloads_load_rep(esw, rep, rep_type); + if (err) + goto err_ecpf; + } + + return 0; + +err_ecpf: + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); + __esw_offloads_unload_rep(esw, rep, rep_type); + } + +err_pf: + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); + __esw_offloads_unload_rep(esw, rep, rep_type); + return err; +} + +static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + struct mlx5_eswitch_rep *rep; + int err, i; + + mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) { + err = __esw_offloads_load_rep(esw, rep, rep_type); + if (err) + goto err_vf; } return 0; +err_vf: + __unload_reps_vf_vport(esw, --i, rep_type); + return err; +} + +static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports) +{ + u8 rep_type = 0; + int err; + + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { + err = __load_reps_vf_vport(esw, nvports, rep_type); + if (err) + goto err_reps; + } + + return err; + err_reps: - esw_offloads_unload_reps_type(esw, vport, rep_type); + while (rep_type-- > 0) + __unload_reps_vf_vport(esw, nvports, rep_type); + return err; +} + +static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + int err; + + /* Special vports must be loaded first. */ + err = __load_reps_special_vport(esw, rep_type); + if (err) + return err; + + err = __load_reps_vf_vport(esw, nvports, rep_type); + if (err) + goto err_vfs; + + return 0; + +err_vfs: + __unload_reps_special_vport(esw, rep_type); return err; } -static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports) +static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports) { u8 rep_type = 0; int err; for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { - err = esw_offloads_load_reps_type(esw, nvports, rep_type); + err = __load_reps_all_vport(esw, nvports, rep_type); if (err) goto err_reps; } @@ -1294,7 +1504,7 @@ static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports) err_reps: while (rep_type-- > 0) - esw_offloads_unload_reps_type(esw, nvports, rep_type); + __unload_reps_all_vport(esw, nvports, rep_type); return err; } @@ -1397,7 +1607,7 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); } -int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) +static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports) { int err; @@ -1407,24 +1617,16 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) if (err) return err; - err = esw_create_offloads_table(esw); + err = esw_create_offloads_table(esw, nvports); if (err) goto create_ft_err; - err = esw_create_vport_rx_group(esw); + err = esw_create_vport_rx_group(esw, nvports); if (err) goto create_fg_err; - err = esw_offloads_load_reps(esw, nvports); - if (err) - goto err_reps; - - esw_offloads_devcom_init(esw); return 0; -err_reps: - esw_destroy_vport_rx_group(esw); - create_fg_err: esw_destroy_offloads_table(esw); @@ -1434,6 +1636,95 @@ create_ft_err: return err; } +static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) +{ + esw_destroy_vport_rx_group(esw); + esw_destroy_offloads_table(esw); + esw_destroy_offloads_fdb_tables(esw); +} + +static void esw_host_params_event_handler(struct work_struct *work) +{ + struct mlx5_host_work *host_work; + struct mlx5_eswitch *esw; + int err, num_vf = 0; + + host_work = container_of(work, struct mlx5_host_work, work); + esw = host_work->esw; + + err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf); + if (err || num_vf == esw->host_info.num_vfs) + goto out; + + /* Number of VFs can only change from "0 to x" or "x to 0". */ + if (esw->host_info.num_vfs > 0) { + esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs); + } else { + err = esw_offloads_load_vf_reps(esw, num_vf); + + if (err) + goto out; + } + + esw->host_info.num_vfs = num_vf; + +out: + kfree(host_work); +} + +static int esw_host_params_event(struct notifier_block *nb, + unsigned long type, void *data) +{ + struct mlx5_host_work *host_work; + struct mlx5_host_info *host_info; + struct mlx5_eswitch *esw; + + host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC); + if (!host_work) + return NOTIFY_DONE; + + host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb); + esw = container_of(host_info, struct mlx5_eswitch, host_info); + + host_work->esw = esw; + + INIT_WORK(&host_work->work, esw_host_params_event_handler); + queue_work(esw->work_queue, &host_work->work); + + return NOTIFY_OK; +} + +int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, + int total_nvports) +{ + int err; + + mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); + + err = esw_offloads_steering_init(esw, total_nvports); + if (err) + return err; + + err = esw_offloads_load_all_reps(esw, vf_nvports); + if (err) + goto err_reps; + + esw_offloads_devcom_init(esw); + + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event, + HOST_PARAMS_CHANGE); + mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb); + esw->host_info.num_vfs = vf_nvports; + } + + return 0; + +err_reps: + esw_offloads_steering_cleanup(esw); + return err; +} + static int esw_offloads_stop(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { @@ -1453,13 +1744,21 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, return err; } -void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) +void esw_offloads_cleanup(struct mlx5_eswitch *esw) { + u16 num_vfs; + + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb); + flush_workqueue(esw->work_queue); + num_vfs = esw->host_info.num_vfs; + } else { + num_vfs = esw->dev->priv.sriov.num_vfs; + } + esw_offloads_devcom_cleanup(esw); - esw_offloads_unload_reps(esw, nvports); - esw_destroy_vport_rx_group(esw); - esw_destroy_offloads_table(esw); - esw_destroy_offloads_fdb_tables(esw); + esw_offloads_unload_all_reps(esw, num_vfs); + esw_offloads_steering_cleanup(esw); } static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) @@ -1548,7 +1847,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) if(!MLX5_ESWITCH_MANAGER(dev)) return -EPERM; - if (dev->priv.eswitch->mode == SRIOV_NONE) + if (dev->priv.eswitch->mode == SRIOV_NONE && + !mlx5_core_is_ecpf_esw_manager(dev)) return -EOPNOTSUPP; return 0; @@ -1760,47 +2060,45 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) return 0; } -void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, - int vport_index, - struct mlx5_eswitch_rep_if *__rep_if, - u8 rep_type) +void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep_if *__rep_if, + u8 rep_type) { - struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_eswitch_rep_if *rep_if; + struct mlx5_eswitch_rep *rep; + int i; - rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type]; - - rep_if->load = __rep_if->load; - rep_if->unload = __rep_if->unload; - rep_if->get_proto_dev = __rep_if->get_proto_dev; - rep_if->priv = __rep_if->priv; + mlx5_esw_for_all_reps(esw, i, rep) { + rep_if = &rep->rep_if[rep_type]; + rep_if->load = __rep_if->load; + rep_if->unload = __rep_if->unload; + rep_if->get_proto_dev = __rep_if->get_proto_dev; + rep_if->priv = __rep_if->priv; - rep_if->valid = true; + rep_if->state = REP_REGISTERED; + } } -EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep); +EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); -void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, - int vport_index, u8 rep_type) +void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) { - struct mlx5_esw_offload *offloads = &esw->offloads; + u16 max_vf = mlx5_core_max_vfs(esw->dev); struct mlx5_eswitch_rep *rep; + int i; - rep = &offloads->vport_reps[vport_index]; - - if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled) - rep->rep_if[rep_type].unload(rep); + if (esw->mode == SRIOV_OFFLOADS) + __unload_reps_all_vport(esw, max_vf, rep_type); - rep->rep_if[rep_type].valid = false; + mlx5_esw_for_all_reps(esw, i, rep) + rep->rep_if[rep_type].state = REP_UNREGISTERED; } -EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep); +EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) { -#define UPLINK_REP_INDEX 0 - struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_eswitch_rep *rep; - rep = &offloads->vport_reps[UPLINK_REP_INDEX]; + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); return rep->rep_if[rep_type].priv; } @@ -1808,15 +2106,11 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, int vport, u8 rep_type) { - struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_eswitch_rep *rep; - if (vport == FDB_UPLINK_VPORT) - vport = UPLINK_REP_INDEX; - - rep = &offloads->vport_reps[vport]; + rep = mlx5_eswitch_get_rep(esw, vport); - if (rep->rep_if[rep_type].valid && + if (rep->rep_if[rep_type].state == REP_LOADED && rep->rep_if[rep_type].get_proto_dev) return rep->rep_if[rep_type].get_proto_dev(rep); return NULL; @@ -1825,13 +2119,13 @@ EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) { - return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type); + return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type); } EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, int vport) { - return &esw->offloads.vport_reps[vport]; + return mlx5_eswitch_get_rep(esw, vport); } EXPORT_SYMBOL(mlx5_eswitch_vport_rep); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index fbc42b7252a9..5d5864e8df3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -103,6 +103,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_STALL_EVENT"; case MLX5_EVENT_TYPE_CMD: return "MLX5_EVENT_TYPE_CMD"; + case MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE: + return "MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE"; case MLX5_EVENT_TYPE_PAGE_REQUEST: return "MLX5_EVENT_TYPE_PAGE_REQUEST"; case MLX5_EVENT_TYPE_PAGE_FAULT: @@ -211,11 +213,10 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data enum port_module_event_status_type module_status; enum port_module_event_error_type error_type; struct mlx5_eqe_port_module *module_event_eqe; - const char *status_str, *error_str; + const char *status_str; u8 module_num; module_event_eqe = &eqe->data.port_module; - module_num = module_event_eqe->module; module_status = module_event_eqe->module_status & PORT_MODULE_EVENT_MODULE_STATUS_MASK; error_type = module_event_eqe->error_type & @@ -223,25 +224,27 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data if (module_status < MLX5_MODULE_STATUS_NUM) events->pme_stats.status_counters[module_status]++; - status_str = mlx5_pme_status_to_string(module_status); - if (module_status == MLX5_MODULE_STATUS_ERROR) { + if (module_status == MLX5_MODULE_STATUS_ERROR) if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) events->pme_stats.error_counters[error_type]++; - error_str = mlx5_pme_error_to_string(error_type); - } if (!printk_ratelimit()) return NOTIFY_OK; - if (module_status == MLX5_MODULE_STATUS_ERROR) + module_num = module_event_eqe->module; + status_str = mlx5_pme_status_to_string(module_status); + if (module_status == MLX5_MODULE_STATUS_ERROR) { + const char *error_str = mlx5_pme_error_to_string(error_type); + mlx5_core_err(events->dev, "Port module event[error]: module %u, %s, %s\n", module_num, status_str, error_str); - else + } else { mlx5_core_info(events->dev, "Port module event: module %u, %s\n", module_num, status_str); + } return NOTIFY_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 27c5f6c7d36a..d046d1ec2a86 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -317,7 +317,6 @@ static int mlx5_fpga_event(struct mlx5_fpga_device *fdev, const char *event_name; bool teardown = false; unsigned long flags; - u32 fpga_qpn; u8 syndrome; switch (event) { @@ -328,7 +327,6 @@ static int mlx5_fpga_event(struct mlx5_fpga_device *fdev, case MLX5_EVENT_TYPE_FPGA_QP_ERROR: syndrome = MLX5_GET(fpga_qp_error_event, data, syndrome); event_name = mlx5_fpga_qp_syndrome_to_string(syndrome); - fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn); break; default: return NOTIFY_DONE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index b354f3ee94c5..f2cfa012315e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -32,6 +32,7 @@ #include <linux/mutex.h> #include <linux/mlx5/driver.h> +#include <linux/mlx5/vport.h> #include <linux/mlx5/eswitch.h> #include "mlx5_core.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 196c07383082..cb9fa3430c53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -103,7 +103,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) mlx5_core_err(dev, "start\n"); if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; - mlx5_cmd_trigger_completions(dev); + mlx5_cmd_flush(dev); } mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index bfc0f6581729..4eac42555c7d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -446,11 +446,11 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) new_channels.params = *params; new_channels.params.sw_mtu = new_mtu; - err = mlx5e_open_channels(priv, &new_channels); + + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (err) goto out; - mlx5e_switch_priv_channels(priv, &new_channels, NULL); netdev->mtu = new_channels.params.sw_mtu; out: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 2d223385dc81..04c5aca7f8c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -343,6 +343,11 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) roce_lag = !mlx5_sriov_is_enabled(dev0) && !mlx5_sriov_is_enabled(dev1); +#ifdef CONFIG_MLX5_ESWITCH + roce_lag &= dev0->priv.eswitch->mode == SRIOV_NONE && + dev1->priv.eswitch->mode == SRIOV_NONE; +#endif + if (roce_lag) mlx5_lag_remove_ib_devices(ldev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c deleted file mode 100644 index 3a3b0005fd2b..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mlx5/driver.h> -#include <linux/mlx5/cmd.h> -#include "mlx5_core.h" - -int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, - u16 opmod, u8 port) -{ - int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out); - int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in); - int err = -ENOMEM; - void *data; - void *resp; - u32 *out; - u32 *in; - - in = kzalloc(inlen, GFP_KERNEL); - out = kzalloc(outlen, GFP_KERNEL); - if (!in || !out) - goto out; - - MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC); - MLX5_SET(mad_ifc_in, in, op_mod, opmod); - MLX5_SET(mad_ifc_in, in, port, port); - - data = MLX5_ADDR_OF(mad_ifc_in, in, mad); - memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad)); - - err = mlx5_cmd_exec(dev, in, inlen, out, outlen); - if (err) - goto out; - - resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet); - memcpy(outb, resp, - MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet)); - -out: - kfree(out); - kfree(in); - return err; -} -EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index be81b319b0dc..40d591c8e76c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -65,6 +65,7 @@ #include "lib/vxlan.h" #include "lib/devcom.h" #include "diag/fw_tracer.h" +#include "ecpf.h" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); @@ -459,6 +460,50 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) return err; } +static int handle_hca_cap_odp(struct mlx5_core_dev *dev) +{ + void *set_hca_cap; + void *set_ctx; + int set_sz; + int err; + + if (!MLX5_CAP_GEN(dev, pg)) + return 0; + + err = mlx5_core_get_caps(dev, MLX5_CAP_ODP); + if (err) + return err; + + if (!(MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive) || + MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive) || + MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive))) + return 0; + + set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + set_ctx = kzalloc(set_sz, GFP_KERNEL); + if (!set_ctx) + return -ENOMEM; + + set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); + memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP], + MLX5_ST_SZ_BYTES(odp_cap)); + + /* set ODP SRQ support for RC/UD and XRC transports */ + MLX5_SET(odp_cap, set_hca_cap, ud_odp_caps.srq_receive, + MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive)); + + MLX5_SET(odp_cap, set_hca_cap, rc_odp_caps.srq_receive, + MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive)); + + MLX5_SET(odp_cap, set_hca_cap, xrc_odp_caps.srq_receive, + MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive)); + + err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ODP); + + kfree(set_ctx); + return err; +} + static int handle_hca_cap(struct mlx5_core_dev *dev) { void *set_ctx = NULL; @@ -567,6 +612,8 @@ int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); MLX5_SET(enable_hca_in, in, function_id, func_id); + MLX5_SET(enable_hca_in, in, embedded_cpu_function, + dev->caps.embedded_cpu); return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); } @@ -577,6 +624,8 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id) MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); MLX5_SET(disable_hca_in, in, function_id, func_id); + MLX5_SET(enable_hca_in, in, embedded_cpu_function, + dev->caps.embedded_cpu); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } @@ -693,6 +742,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto err_clr_master; } + if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) && + pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) && + pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128)) + mlx5_core_dbg(dev, "Enabling pci atomics failed\n"); + dev->iseg_base = pci_resource_start(dev->pdev, 0); dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); if (!dev->iseg) { @@ -849,6 +903,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, struct pci_dev *pdev = dev->pdev; int err; + dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); mutex_lock(&dev->intf_state_mutex); if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", @@ -926,6 +981,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto reclaim_boot_pages; } + err = handle_hca_cap_odp(dev); + if (err) { + dev_err(&pdev->dev, "handle_hca_cap_odp failed\n"); + goto reclaim_boot_pages; + } + err = mlx5_satisfy_startup_pages(dev, 0); if (err) { dev_err(&pdev->dev, "failed to allocate init pages\n"); @@ -1014,6 +1075,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_sriov; } + err = mlx5_ec_init(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init embedded CPU\n"); + goto err_ec; + } + if (mlx5_device_registered(dev)) { mlx5_attach_device(dev); } else { @@ -1031,6 +1098,9 @@ out: return 0; err_reg_dev: + mlx5_ec_cleanup(dev); + +err_ec: mlx5_sriov_detach(dev); err_sriov: @@ -1105,6 +1175,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, if (mlx5_device_registered(dev)) mlx5_detach_device(dev); + mlx5_ec_cleanup(dev); mlx5_sriov_detach(dev); mlx5_cleanup_fs(dev); mlx5_accel_ipsec_cleanup(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 5300b0b6d836..9529cf9623e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -121,11 +121,12 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, u32 modify_bitmask); int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, u32 element_id); -int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); +int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages); u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, struct ptp_system_timestamp *sts); void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); +void mlx5_cmd_flush(struct mlx5_core_dev *dev); int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 0670165afd5f..ea744d8466ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -51,9 +51,10 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev) int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - u32 *in, int inlen, - u32 *out, int outlen, - mlx5_cmd_cbk_t callback, void *context) + struct mlx5_async_ctx *async_ctx, u32 *in, + int inlen, u32 *out, int outlen, + mlx5_async_cbk_t callback, + struct mlx5_async_work *context) { struct mlx5_mkey_table *table = &dev->priv.mkey_table; u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; @@ -71,7 +72,7 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, MLX5_SET(mkc, mkc, mkey_7_0, key); if (callback) - return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen, + return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen, callback, context); err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout)); @@ -105,7 +106,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *in, int inlen) { - return mlx5_core_create_mkey_cb(dev, mkey, in, inlen, + return mlx5_core_create_mkey_cb(dev, mkey, NULL, in, inlen, NULL, 0, NULL, NULL); } EXPORT_SYMBOL(mlx5_core_create_mkey); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index a83b517b0714..41025387ff2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -48,6 +48,7 @@ enum { struct mlx5_pages_req { struct mlx5_core_dev *dev; u16 func_id; + u8 ec_function; s32 npages; struct work_struct work; }; @@ -143,6 +144,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, MLX5_SET(query_pages_in, in, op_mod, boot ? MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES : MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES); + MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev)); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) @@ -253,7 +255,8 @@ err_mapping: return err; } -static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) +static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id, + bool ec_function) { u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; @@ -262,6 +265,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE); MLX5_SET(manage_pages_in, in, function_id, func_id); + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) @@ -270,7 +274,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) } static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, - int notify_fail) + int notify_fail, bool ec_function) { u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); @@ -305,6 +309,7 @@ retry: MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE); MLX5_SET(manage_pages_in, in, function_id, func_id); MLX5_SET(manage_pages_in, in, input_num_entries, npages); + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (err) { @@ -316,8 +321,11 @@ retry: dev->priv.fw_pages += npages; if (func_id) dev->priv.vfs_pages += npages; + else if (mlx5_core_is_ecpf(dev) && !ec_function) + dev->priv.peer_pf_pages += npages; - mlx5_core_dbg(dev, "err %d\n", err); + mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n", + npages, ec_function, func_id, err); kvfree(in); return 0; @@ -328,7 +336,7 @@ out_4k: out_free: kvfree(in); if (notify_fail) - page_notify_fail(dev, func_id); + page_notify_fail(dev, func_id, ec_function); return err; } @@ -364,7 +372,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, } static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, - int *nclaimed) + int *nclaimed, bool ec_function) { int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; @@ -385,6 +393,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE); MLX5_SET(manage_pages_in, in, function_id, func_id); MLX5_SET(manage_pages_in, in, input_num_entries, npages); + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); @@ -410,6 +419,8 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, dev->priv.fw_pages -= num_claimed; if (func_id) dev->priv.vfs_pages -= num_claimed; + else if (mlx5_core_is_ecpf(dev) && !ec_function) + dev->priv.peer_pf_pages -= num_claimed; out_free: kvfree(out); @@ -423,9 +434,10 @@ static void pages_work_handler(struct work_struct *work) int err = 0; if (req->npages < 0) - err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); + err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL, + req->ec_function); else if (req->npages > 0) - err = give_pages(dev, req->func_id, req->npages, 1); + err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function); if (err) mlx5_core_warn(dev, "%s fail %d\n", @@ -434,6 +446,10 @@ static void pages_work_handler(struct work_struct *work) kfree(req); } +enum { + EC_FUNCTION_MASK = 0x8000, +}; + static int req_pages_handler(struct notifier_block *nb, unsigned long type, void *data) { @@ -441,6 +457,7 @@ static int req_pages_handler(struct notifier_block *nb, struct mlx5_core_dev *dev; struct mlx5_priv *priv; struct mlx5_eqe *eqe; + bool ec_function; u16 func_id; s32 npages; @@ -450,6 +467,7 @@ static int req_pages_handler(struct notifier_block *nb, func_id = be16_to_cpu(eqe->data.req_pages.func_id); npages = be32_to_cpu(eqe->data.req_pages.num_pages); + ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK; mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", func_id, npages); req = kzalloc(sizeof(*req), GFP_ATOMIC); @@ -461,6 +479,7 @@ static int req_pages_handler(struct notifier_block *nb, req->dev = dev; req->func_id = func_id; req->npages = npages; + req->ec_function = ec_function; INIT_WORK(&req->work, pages_work_handler); queue_work(dev->priv.pg_wq, &req->work); return NOTIFY_OK; @@ -479,7 +498,7 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", npages, boot ? "boot" : "init", func_id); - return give_pages(dev, func_id, npages, 0); + return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev)); } enum { @@ -513,7 +532,7 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) fwp = rb_entry(p, struct fw_page, rb_node); err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), - &nclaimed); + &nclaimed, mlx5_core_is_ecpf(dev)); if (err) { mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", @@ -535,6 +554,9 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) WARN(dev->priv.vfs_pages, "VFs FW pages counter is %d after reclaiming all pages\n", dev->priv.vfs_pages); + WARN(dev->priv.peer_pf_pages, + "Peer PF FW pages counter is %d after reclaiming all pages\n", + dev->priv.peer_pf_pages); return 0; } @@ -567,10 +589,10 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) flush_workqueue(dev->priv.pg_wq); } -int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) +int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) { unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); - int prev_vfs_pages = dev->priv.vfs_pages; + int prev_pages = *pages; /* In case of internal error we will free the pages manually later */ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { @@ -578,16 +600,16 @@ int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) return 0; } - mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages, + mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_pages, dev->priv.name); - while (dev->priv.vfs_pages) { + while (*pages) { if (time_after(jiffies, end)) { - mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages); + mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages); return -ETIMEDOUT; } - if (dev->priv.vfs_pages < prev_vfs_pages) { + if (*pages < prev_pages) { end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); - prev_vfs_pages = dev->priv.vfs_pages; + prev_pages = *pages; } msleep(50); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 2b82f35f4c35..b81542820528 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -30,10 +30,7 @@ * SOFTWARE. */ -#include <linux/module.h> -#include <linux/mlx5/driver.h> #include <linux/mlx5/port.h> -#include <linux/mlx5/cmd.h> #include "mlx5_core.h" int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, @@ -157,44 +154,6 @@ int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration) sizeof(out), MLX5_REG_MLCR, 0, 1); } -int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, - u32 *proto_cap, int proto_mask) -{ - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - int err; - - err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); - if (err) - return err; - - if (proto_mask == MLX5_PTYS_EN) - *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); - else - *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability); - - return 0; -} -EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap); - -int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, - u32 *proto_admin, int proto_mask) -{ - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - int err; - - err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); - if (err) - return err; - - if (proto_mask == MLX5_PTYS_EN) - *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); - else - *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin); - - return 0; -} -EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin); - int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, u8 *link_width_oper, u8 local_port) { @@ -211,23 +170,6 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper); -int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev, - u32 *proto_oper, u8 local_port) -{ - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - int err; - - err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, - local_port); - if (err) - return err; - - *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - - return 0; -} -EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper); - int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, u8 *proto_oper, u8 local_port) { @@ -245,35 +187,6 @@ int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, } EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper); -int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, - u32 proto_admin, int proto_mask) -{ - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - u32 in[MLX5_ST_SZ_DW(ptys_reg)]; - u8 an_disable_admin; - u8 an_disable_cap; - u8 an_status; - - mlx5_query_port_autoneg(dev, proto_mask, &an_status, - &an_disable_cap, &an_disable_admin); - if (!an_disable_cap && an_disable) - return -EPERM; - - memset(in, 0, sizeof(in)); - - MLX5_SET(ptys_reg, in, local_port, 1); - MLX5_SET(ptys_reg, in, an_disable_admin, an_disable); - MLX5_SET(ptys_reg, in, proto_mask, proto_mask); - if (proto_mask == MLX5_PTYS_EN) - MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); - else - MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin); - - return mlx5_core_access_reg(dev, in, sizeof(in), out, - sizeof(out), MLX5_REG_PTYS, 0, 1); -} -EXPORT_SYMBOL_GPL(mlx5_set_port_ptys); - /* This function should be used after setting a port register only */ void mlx5_toggle_port_link(struct mlx5_core_dev *dev) { @@ -606,25 +519,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) } EXPORT_SYMBOL_GPL(mlx5_query_port_pfc); -void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, - u8 *an_status, - u8 *an_disable_cap, u8 *an_disable_admin) -{ - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - - *an_status = 0; - *an_disable_cap = 0; - *an_disable_admin = 0; - - if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1)) - return; - - *an_status = MLX5_GET(ptys_reg, out, an_status); - *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap); - *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); -} -EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg); - int mlx5_max_tc(struct mlx5_core_dev *mdev) { u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 6e178030d8fb..7b23fa8d2d60 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -147,7 +147,7 @@ out: if (MLX5_ESWITCH_MANAGER(dev)) mlx5_eswitch_disable_sriov(dev->priv.eswitch); - if (mlx5_wait_for_vf_pages(dev)) + if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 9b150ce9d315..ef95feca9961 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -64,7 +64,7 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) } int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, - u16 vport, u8 state) + u16 vport, u8 other_vport, u8 state) { u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0}; u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0}; @@ -73,8 +73,7 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, MLX5_CMD_OP_MODIFY_VPORT_STATE); MLX5_SET(modify_vport_state_in, in, op_mod, opmod); MLX5_SET(modify_vport_state_in, in, vport_number, vport); - if (vport) - MLX5_SET(modify_vport_state_in, in, other_vport, 1); + MLX5_SET(modify_vport_state_in, in, other_vport, other_vport); MLX5_SET(modify_vport_state_in, in, admin_state, state); return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); @@ -255,7 +254,7 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu) EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu); int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, - u32 vport, + u16 vport, enum mlx5_list_type list_type, u8 addr_list[][ETH_ALEN], int *list_size) @@ -373,7 +372,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list); int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, - u32 vport, + u16 vport, u16 vlans[], int *size) { @@ -526,7 +525,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, - u32 vport, u64 node_guid) + u16 vport, u64 node_guid) { int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); void *nic_vport_context; @@ -827,7 +826,7 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid); int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, - u32 vport, + u16 vport, int *promisc_uc, int *promisc_mc, int *promisc_all) @@ -1057,7 +1056,7 @@ free: EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter); int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, - u64 *rx_discard_vport_down, + u8 other_vport, u64 *rx_discard_vport_down, u64 *tx_discard_vport_down) { u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0}; @@ -1068,8 +1067,7 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, MLX5_CMD_OP_QUERY_VNIC_ENV); MLX5_SET(query_vnic_env_in, in, op_mod, 0); MLX5_SET(query_vnic_env_in, in, vport_number, vport); - if (vport) - MLX5_SET(query_vnic_env_in, in, other_vport, 1); + MLX5_SET(query_vnic_env_in, in, other_vport, other_vport); err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index bbf45f10c208..a01d15546e37 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o mlxsw_core-objs := core.o core_acl_flex_keys.o \ - core_acl_flex_actions.o + core_acl_flex_actions.o core_env.o mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index ddedf8ab5b64..b505d3858235 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1062,6 +1062,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_driver_init; } + if (mlxsw_driver->params_register && !reload) + devlink_params_publish(devlink); + return 0; err_driver_init: @@ -1131,6 +1134,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, return; } + if (mlxsw_core->driver->params_unregister && !reload) + devlink_params_unpublish(devlink); if (mlxsw_core->driver->fini) mlxsw_core->driver->fini(mlxsw_core); mlxsw_thermal_fini(mlxsw_core->thermal); @@ -1460,13 +1465,17 @@ static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) if (trans->retries) dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); - if (err) + if (err) { dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", trans->tid, trans->reg->id, mlxsw_reg_id_str(trans->reg->id), mlxsw_core_reg_access_type_str(trans->type), trans->emad_status, mlxsw_emad_op_tlv_status_str(trans->emad_status)); + trace_devlink_hwerr(priv_to_devlink(mlxsw_core), + trans->emad_status, + mlxsw_emad_op_tlv_status_str(trans->emad_status)); + } list_del(&trans->bulk_list); kfree_rcu(trans, rcu); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 4e114f35ee0d..cd0c6aa0dff9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -344,6 +344,7 @@ struct mlxsw_bus_info { struct mlxsw_fw_rev fw_rev; u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN]; u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN]; + u8 low_frequency; }; struct mlxsw_hwmon; @@ -394,4 +395,9 @@ static inline void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) #endif +enum mlxsw_devlink_param_id { + MLXSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, +}; + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c new file mode 100644 index 000000000000..160d6cd164f4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#include <linux/kernel.h> +#include <linux/err.h> + +#include "core.h" +#include "core_env.h" +#include "item.h" +#include "reg.h" + +static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, + bool *qsfp) +{ + char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE]; + char mcia_pl[MLXSW_REG_MCIA_LEN]; + u8 ident; + int err; + + mlxsw_reg_mcia_pack(mcia_pl, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1, + MLXSW_REG_MCIA_I2C_ADDR_LOW); + err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl); + if (err) + return err; + mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); + ident = eeprom_tmp[0]; + switch (ident) { + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP: + *qsfp = false; + break; + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD: + *qsfp = true; + break; + default: + return -EINVAL; + } + + return 0; +} + +int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, + int off, int *temp) +{ + char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE]; + union { + u8 buf[MLXSW_REG_MCIA_TH_ITEM_SIZE]; + u16 temp; + } temp_thresh; + char mcia_pl[MLXSW_REG_MCIA_LEN] = {0}; + char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0}; + u16 module_temp; + bool qsfp; + int err; + + mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, + 1); + err = mlxsw_reg_query(core, MLXSW_REG(mtbr), mtbr_pl); + if (err) + return err; + + /* Don't read temperature thresholds for module with no valid info. */ + mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &module_temp, NULL); + switch (module_temp) { + case MLXSW_REG_MTBR_BAD_SENS_INFO: /* fall-through */ + case MLXSW_REG_MTBR_NO_CONN: /* fall-through */ + case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */ + case MLXSW_REG_MTBR_INDEX_NA: + *temp = 0; + return 0; + default: + /* Do not consider thresholds for zero temperature. */ + if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) { + *temp = 0; + return 0; + } + break; + } + + /* Read Free Side Device Temperature Thresholds from page 03h + * (MSB at lower byte address). + * Bytes: + * 128-129 - Temp High Alarm (SFP_TEMP_HIGH_ALARM); + * 130-131 - Temp Low Alarm (SFP_TEMP_LOW_ALARM); + * 132-133 - Temp High Warning (SFP_TEMP_HIGH_WARN); + * 134-135 - Temp Low Warning (SFP_TEMP_LOW_WARN); + */ + + /* Validate module identifier value. */ + err = mlxsw_env_validate_cable_ident(core, module, &qsfp); + if (err) + return err; + + if (qsfp) + mlxsw_reg_mcia_pack(mcia_pl, module, 0, + MLXSW_REG_MCIA_TH_PAGE_NUM, + MLXSW_REG_MCIA_TH_PAGE_OFF + off, + MLXSW_REG_MCIA_TH_ITEM_SIZE, + MLXSW_REG_MCIA_I2C_ADDR_LOW); + else + mlxsw_reg_mcia_pack(mcia_pl, module, 0, + MLXSW_REG_MCIA_PAGE0_LO, + off, MLXSW_REG_MCIA_TH_ITEM_SIZE, + MLXSW_REG_MCIA_I2C_ADDR_HIGH); + + err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl); + if (err) + return err; + + mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); + memcpy(temp_thresh.buf, eeprom_tmp, MLXSW_REG_MCIA_TH_ITEM_SIZE); + *temp = temp_thresh.temp * 1000; + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h new file mode 100644 index 000000000000..6dbdf63f3ee1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#ifndef _MLXSW_CORE_ENV_H +#define _MLXSW_CORE_ENV_H + +int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, + int off, int *temp); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index e04e8162aa14..6956bbebe2f1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -7,8 +7,10 @@ #include <linux/sysfs.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/sfp.h> #include "core.h" +#include "core_env.h" #define MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT 127 #define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT * 4 + \ @@ -30,6 +32,7 @@ struct mlxsw_hwmon { struct attribute *attrs[MLXSW_HWMON_ATTR_COUNT + 1]; struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT]; unsigned int attrs_count; + u8 sensor_count; }; static ssize_t mlxsw_hwmon_temp_show(struct device *dev, @@ -121,6 +124,27 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev, return sprintf(buf, "%u\n", mlxsw_reg_mfsm_rpm_get(mfsm_pl)); } +static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char fore_pl[MLXSW_REG_FORE_LEN]; + bool fault; + int err; + + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(fore), fore_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n"); + return err; + } + mlxsw_reg_fore_unpack(fore_pl, mlwsw_hwmon_attr->type_index, &fault); + + return sprintf(buf, "%u\n", fault); +} + static ssize_t mlxsw_hwmon_pwm_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -167,12 +191,160 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev, return len; } +static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0}; + u16 temp; + u8 module; + int err; + + module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, + 1); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl); + if (err) { + dev_err(dev, "Failed to query module temperature sensor\n"); + return err; + } + + mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL); + /* Update status and temperature cache. */ + switch (temp) { + case MLXSW_REG_MTBR_NO_CONN: /* fall-through */ + case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */ + case MLXSW_REG_MTBR_INDEX_NA: + temp = 0; + break; + case MLXSW_REG_MTBR_BAD_SENS_INFO: + /* Untrusted cable is connected. Reading temperature from its + * sensor is faulty. + */ + temp = 0; + break; + default: + temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + break; + } + + return sprintf(buf, "%u\n", temp); +} + +static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0}; + u8 module, fault; + u16 temp; + int err; + + module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, + 1); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl); + if (err) { + dev_err(dev, "Failed to query module temperature sensor\n"); + return err; + } + + mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL); + + /* Update status and temperature cache. */ + switch (temp) { + case MLXSW_REG_MTBR_BAD_SENS_INFO: + /* Untrusted cable is connected. Reading temperature from its + * sensor is faulty. + */ + fault = 1; + break; + case MLXSW_REG_MTBR_NO_CONN: /* fall-through */ + case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */ + case MLXSW_REG_MTBR_INDEX_NA: + default: + fault = 0; + break; + } + + return sprintf(buf, "%u\n", fault); +} + +static ssize_t +mlxsw_hwmon_module_temp_critical_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + int temp; + u8 module; + int err; + + module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, + SFP_TEMP_HIGH_WARN, &temp); + if (err) { + dev_err(dev, "Failed to query module temperature thresholds\n"); + return err; + } + + return sprintf(buf, "%u\n", temp); +} + +static ssize_t +mlxsw_hwmon_module_temp_emergency_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + u8 module; + int temp; + int err; + + module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, + SFP_TEMP_HIGH_ALARM, &temp); + if (err) { + dev_err(dev, "Failed to query module temperature thresholds\n"); + return err; + } + + return sprintf(buf, "%u\n", temp); +} + +static ssize_t +mlxsw_hwmon_module_temp_label_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + + return sprintf(buf, "front panel %03u\n", + mlwsw_hwmon_attr->type_index); +} + enum mlxsw_hwmon_attr_type { MLXSW_HWMON_ATTR_TYPE_TEMP, MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, MLXSW_HWMON_ATTR_TYPE_TEMP_RST, MLXSW_HWMON_ATTR_TYPE_FAN_RPM, + MLXSW_HWMON_ATTR_TYPE_FAN_FAULT, MLXSW_HWMON_ATTR_TYPE_PWM, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL, }; static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, @@ -209,6 +381,12 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "fan%u_input", num + 1); break; + case MLXSW_HWMON_ATTR_TYPE_FAN_FAULT: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_fault_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "fan%u_fault", num + 1); + break; case MLXSW_HWMON_ATTR_TYPE_PWM: mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_pwm_show; mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_pwm_store; @@ -216,6 +394,40 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "pwm%u", num + 1); break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_module_temp_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_input", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT: + mlxsw_hwmon_attr->dev_attr.show = + mlxsw_hwmon_module_temp_fault_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_fault", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT: + mlxsw_hwmon_attr->dev_attr.show = + mlxsw_hwmon_module_temp_critical_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_crit", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG: + mlxsw_hwmon_attr->dev_attr.show = + mlxsw_hwmon_module_temp_emergency_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_emergency", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL: + mlxsw_hwmon_attr->dev_attr.show = + mlxsw_hwmon_module_temp_label_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_label", num + 1); + break; default: WARN_ON(1); } @@ -233,7 +445,6 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) { char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0}; char mtmp_pl[MLXSW_REG_MTMP_LEN]; - u8 sensor_count; int i; int err; @@ -242,8 +453,8 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n"); return err; } - sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl); - for (i = 0; i < sensor_count; i++) { + mlxsw_hwmon->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl); + for (i = 0; i < mlxsw_hwmon->sensor_count; i++) { mlxsw_reg_mtmp_pack(mtmp_pl, i, true, true); err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); @@ -280,10 +491,14 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active); num = 0; for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) { - if (tacho_active & BIT(type_index)) + if (tacho_active & BIT(type_index)) { mlxsw_hwmon_attr_add(mlxsw_hwmon, MLXSW_HWMON_ATTR_TYPE_FAN_RPM, + type_index, num); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_FAN_FAULT, type_index, num++); + } } num = 0; for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) { @@ -295,6 +510,53 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) return 0; } +static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon) +{ + unsigned int module_count = mlxsw_core_max_ports(mlxsw_hwmon->core); + char pmlp_pl[MLXSW_REG_PMLP_LEN] = {0}; + int i, index; + u8 width; + int err; + + /* Add extra attributes for module temperature. Sensor index is + * assigned to sensor_count value, while all indexed before + * sensor_count are already utilized by the sensors connected through + * mtmp register by mlxsw_hwmon_temp_init(). + */ + index = mlxsw_hwmon->sensor_count; + for (i = 1; i < module_count; i++) { + mlxsw_reg_pmlp_pack(pmlp_pl, i); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(pmlp), + pmlp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to read module index %d\n", + i); + return err; + } + width = mlxsw_reg_pmlp_width_get(pmlp_pl); + if (!width) + continue; + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, index, + index); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT, + index, index); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, + index, index); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG, + index, index); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL, + index, index); + index++; + } + + return 0; +} + int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info, struct mlxsw_hwmon **p_hwmon) @@ -317,6 +579,10 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, if (err) goto err_fans_init; + err = mlxsw_hwmon_module_init(mlxsw_hwmon); + if (err) + goto err_temp_module_init; + mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group; mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs; @@ -333,6 +599,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, return 0; err_hwmon_register: +err_temp_module_init: err_fans_init: err_temp_init: kfree(mlxsw_hwmon); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 61f897b40f82..0b85c7252f9e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -9,11 +9,20 @@ #include <linux/sysfs.h> #include <linux/thermal.h> #include <linux/err.h> +#include <linux/sfp.h> #include "core.h" +#include "core_env.h" #define MLXSW_THERMAL_POLL_INT 1000 /* ms */ -#define MLXSW_THERMAL_MAX_TEMP 110000 /* 110C */ +#define MLXSW_THERMAL_SLOW_POLL_INT 20000 /* ms */ +#define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */ +#define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */ +#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */ +#define MLXSW_THERMAL_ASIC_TEMP_CRIT 110000 /* 110C */ +#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */ +#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2) +#define MLXSW_THERMAL_ZONE_MAX_NAME 16 #define MLXSW_THERMAL_MAX_STATE 10 #define MLXSW_THERMAL_MAX_DUTY 255 /* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values @@ -26,9 +35,22 @@ #define MLXSW_THERMAL_SPEED_MAX (MLXSW_THERMAL_MAX_STATE * 2) #define MLXSW_THERMAL_SPEED_MIN_LEVEL 2 /* 20% */ +/* External cooling devices, allowed for binding to mlxsw thermal zones. */ +static char * const mlxsw_thermal_external_allowed_cdev[] = { + "mlxreg_fan", +}; + +enum mlxsw_thermal_trips { + MLXSW_THERMAL_TEMP_TRIP_NORM, + MLXSW_THERMAL_TEMP_TRIP_HIGH, + MLXSW_THERMAL_TEMP_TRIP_HOT, + MLXSW_THERMAL_TEMP_TRIP_CRIT, +}; + struct mlxsw_thermal_trip { int type; int temp; + int hyst; int min_state; int max_state; }; @@ -36,32 +58,29 @@ struct mlxsw_thermal_trip { static const struct mlxsw_thermal_trip default_thermal_trips[] = { { /* In range - 0-40% PWM */ .type = THERMAL_TRIP_ACTIVE, - .temp = 75000, + .temp = MLXSW_THERMAL_ASIC_TEMP_NORM, + .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP, .min_state = 0, .max_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10, }, - { /* High - 40-100% PWM */ - .type = THERMAL_TRIP_ACTIVE, - .temp = 80000, - .min_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10, - .max_state = MLXSW_THERMAL_MAX_STATE, - }, { - /* Very high - 100% PWM */ + /* In range - 40-100% PWM */ .type = THERMAL_TRIP_ACTIVE, - .temp = 85000, - .min_state = MLXSW_THERMAL_MAX_STATE, + .temp = MLXSW_THERMAL_ASIC_TEMP_HIGH, + .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP, + .min_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10, .max_state = MLXSW_THERMAL_MAX_STATE, }, { /* Warning */ .type = THERMAL_TRIP_HOT, - .temp = 105000, + .temp = MLXSW_THERMAL_ASIC_TEMP_HOT, + .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP, .min_state = MLXSW_THERMAL_MAX_STATE, .max_state = MLXSW_THERMAL_MAX_STATE, }, { /* Critical - soft poweroff */ .type = THERMAL_TRIP_CRITICAL, - .temp = MLXSW_THERMAL_MAX_TEMP, + .temp = MLXSW_THERMAL_ASIC_TEMP_CRIT, .min_state = MLXSW_THERMAL_MAX_STATE, .max_state = MLXSW_THERMAL_MAX_STATE, } @@ -72,14 +91,27 @@ static const struct mlxsw_thermal_trip default_thermal_trips[] = { /* Make sure all trips are writable */ #define MLXSW_THERMAL_TRIP_MASK (BIT(MLXSW_THERMAL_NUM_TRIPS) - 1) +struct mlxsw_thermal; + +struct mlxsw_thermal_module { + struct mlxsw_thermal *parent; + struct thermal_zone_device *tzdev; + struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; + enum thermal_device_mode mode; + int module; +}; + struct mlxsw_thermal { struct mlxsw_core *core; const struct mlxsw_bus_info *bus_info; struct thermal_zone_device *tzdev; + int polling_delay; struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX]; u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1]; struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; enum thermal_device_mode mode; + struct mlxsw_thermal_module *tz_module_arr; + unsigned int tz_module_num; }; static inline u8 mlxsw_state_to_duty(int state) @@ -103,9 +135,67 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal, if (thermal->cdevs[i] == cdev) return i; + /* Allow mlxsw thermal zone binding to an external cooling device */ + for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) { + if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i], + sizeof(cdev->type))) + return 0; + } + return -ENODEV; } +static void +mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz) +{ + tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = 0; + tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = 0; + tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = 0; + tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = 0; +} + +static int +mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core, + struct mlxsw_thermal_module *tz) +{ + int crit_temp, emerg_temp; + int err; + + err = mlxsw_env_module_temp_thresholds_get(core, tz->module, + SFP_TEMP_HIGH_WARN, + &crit_temp); + if (err) + return err; + + err = mlxsw_env_module_temp_thresholds_get(core, tz->module, + SFP_TEMP_HIGH_ALARM, + &emerg_temp); + if (err) + return err; + + /* According to the system thermal requirements, the thermal zones are + * defined with four trip points. The critical and emergency + * temperature thresholds, provided by QSFP module are set as "active" + * and "hot" trip points, "normal" and "critical" trip points are + * derived from "active" and "hot" by subtracting or adding double + * hysteresis value. + */ + if (crit_temp >= MLXSW_THERMAL_MODULE_TEMP_SHIFT) + tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp - + MLXSW_THERMAL_MODULE_TEMP_SHIFT; + else + tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp; + tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp; + tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp; + if (emerg_temp > crit_temp) + tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp + + MLXSW_THERMAL_MODULE_TEMP_SHIFT; + else + tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp; + + return 0; +} + static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev, struct thermal_cooling_device *cdev) { @@ -172,7 +262,7 @@ static int mlxsw_thermal_set_mode(struct thermal_zone_device *tzdev, mutex_lock(&tzdev->lock); if (mode == THERMAL_DEVICE_ENABLED) - tzdev->polling_delay = MLXSW_THERMAL_POLL_INT; + tzdev->polling_delay = thermal->polling_delay; else tzdev->polling_delay = 0; @@ -237,13 +327,31 @@ static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev, struct mlxsw_thermal *thermal = tzdev->devdata; if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS || - temp > MLXSW_THERMAL_MAX_TEMP) + temp > MLXSW_THERMAL_ASIC_TEMP_CRIT) return -EINVAL; thermal->trips[trip].temp = temp; return 0; } +static int mlxsw_thermal_get_trip_hyst(struct thermal_zone_device *tzdev, + int trip, int *p_hyst) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + + *p_hyst = thermal->trips[trip].hyst; + return 0; +} + +static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev, + int trip, int hyst) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + + thermal->trips[trip].hyst = hyst; + return 0; +} + static struct thermal_zone_device_ops mlxsw_thermal_ops = { .bind = mlxsw_thermal_bind, .unbind = mlxsw_thermal_unbind, @@ -253,6 +361,206 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = { .get_trip_type = mlxsw_thermal_get_trip_type, .get_trip_temp = mlxsw_thermal_get_trip_temp, .set_trip_temp = mlxsw_thermal_set_trip_temp, + .get_trip_hyst = mlxsw_thermal_get_trip_hyst, + .set_trip_hyst = mlxsw_thermal_set_trip_hyst, +}; + +static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev, + struct thermal_cooling_device *cdev) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + struct mlxsw_thermal *thermal = tz->parent; + int i, j, err; + + /* If the cooling device is one of ours bind it */ + if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0) + return 0; + + for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) { + const struct mlxsw_thermal_trip *trip = &tz->trips[i]; + + err = thermal_zone_bind_cooling_device(tzdev, i, cdev, + trip->max_state, + trip->min_state, + THERMAL_WEIGHT_DEFAULT); + if (err < 0) + goto err_bind_cooling_device; + } + return 0; + +err_bind_cooling_device: + for (j = i - 1; j >= 0; j--) + thermal_zone_unbind_cooling_device(tzdev, j, cdev); + return err; +} + +static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev, + struct thermal_cooling_device *cdev) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + struct mlxsw_thermal *thermal = tz->parent; + int i; + int err; + + /* If the cooling device is one of ours unbind it */ + if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0) + return 0; + + for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) { + err = thermal_zone_unbind_cooling_device(tzdev, i, cdev); + WARN_ON(err); + } + return err; +} + +static int mlxsw_thermal_module_mode_get(struct thermal_zone_device *tzdev, + enum thermal_device_mode *mode) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + *mode = tz->mode; + + return 0; +} + +static int mlxsw_thermal_module_mode_set(struct thermal_zone_device *tzdev, + enum thermal_device_mode mode) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + struct mlxsw_thermal *thermal = tz->parent; + + mutex_lock(&tzdev->lock); + + if (mode == THERMAL_DEVICE_ENABLED) + tzdev->polling_delay = thermal->polling_delay; + else + tzdev->polling_delay = 0; + + mutex_unlock(&tzdev->lock); + + tz->mode = mode; + thermal_zone_device_update(tzdev, THERMAL_EVENT_UNSPECIFIED); + + return 0; +} + +static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev, + int *p_temp) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + struct mlxsw_thermal *thermal = tz->parent; + struct device *dev = thermal->bus_info->dev; + char mtbr_pl[MLXSW_REG_MTBR_LEN]; + u16 temp; + int err; + + /* Read module temperature. */ + mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + + tz->module, 1); + err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtbr), mtbr_pl); + if (err) + return err; + + mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL); + /* Update temperature. */ + switch (temp) { + case MLXSW_REG_MTBR_NO_CONN: /* fall-through */ + case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */ + case MLXSW_REG_MTBR_INDEX_NA: /* fall-through */ + case MLXSW_REG_MTBR_BAD_SENS_INFO: + temp = 0; + break; + default: + temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + /* Reset all trip point. */ + mlxsw_thermal_module_trips_reset(tz); + /* Update trip points. */ + err = mlxsw_thermal_module_trips_update(dev, thermal->core, + tz); + if (err) + return err; + break; + } + + *p_temp = (int) temp; + return 0; +} + +static int +mlxsw_thermal_module_trip_type_get(struct thermal_zone_device *tzdev, int trip, + enum thermal_trip_type *p_type) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) + return -EINVAL; + + *p_type = tz->trips[trip].type; + return 0; +} + +static int +mlxsw_thermal_module_trip_temp_get(struct thermal_zone_device *tzdev, + int trip, int *p_temp) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) + return -EINVAL; + + *p_temp = tz->trips[trip].temp; + return 0; +} + +static int +mlxsw_thermal_module_trip_temp_set(struct thermal_zone_device *tzdev, + int trip, int temp) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS || + temp > tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp) + return -EINVAL; + + tz->trips[trip].temp = temp; + return 0; +} + +static int +mlxsw_thermal_module_trip_hyst_get(struct thermal_zone_device *tzdev, int trip, + int *p_hyst) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + *p_hyst = tz->trips[trip].hyst; + return 0; +} + +static int +mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip, + int hyst) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + tz->trips[trip].hyst = hyst; + return 0; +} + +static struct thermal_zone_params mlxsw_thermal_module_params = { + .governor_name = "user_space", +}; + +static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { + .bind = mlxsw_thermal_module_bind, + .unbind = mlxsw_thermal_module_unbind, + .get_mode = mlxsw_thermal_module_mode_get, + .set_mode = mlxsw_thermal_module_mode_set, + .get_temp = mlxsw_thermal_module_temp_get, + .get_trip_type = mlxsw_thermal_module_trip_type_get, + .get_trip_temp = mlxsw_thermal_module_trip_temp_get, + .set_trip_temp = mlxsw_thermal_module_trip_temp_set, + .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, + .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, }; static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev, @@ -355,6 +663,123 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = { .set_cur_state = mlxsw_thermal_set_cur_state, }; +static int +mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz) +{ + char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME]; + int err; + + snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d", + module_tz->module + 1); + module_tz->tzdev = thermal_zone_device_register(tz_name, + MLXSW_THERMAL_NUM_TRIPS, + MLXSW_THERMAL_TRIP_MASK, + module_tz, + &mlxsw_thermal_module_ops, + &mlxsw_thermal_module_params, + 0, 0); + if (IS_ERR(module_tz->tzdev)) { + err = PTR_ERR(module_tz->tzdev); + return err; + } + + return 0; +} + +static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev) +{ + thermal_zone_device_unregister(tzdev); +} + +static int +mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core, + struct mlxsw_thermal *thermal, u8 local_port) +{ + struct mlxsw_thermal_module *module_tz; + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + u8 width, module; + int err; + + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); + err = mlxsw_reg_query(core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + return err; + + width = mlxsw_reg_pmlp_width_get(pmlp_pl); + if (!width) + return 0; + + module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); + module_tz = &thermal->tz_module_arr[module]; + module_tz->module = module; + module_tz->parent = thermal; + memcpy(module_tz->trips, default_thermal_trips, + sizeof(thermal->trips)); + /* Initialize all trip point. */ + mlxsw_thermal_module_trips_reset(module_tz); + /* Update trip point according to the module data. */ + err = mlxsw_thermal_module_trips_update(dev, core, module_tz); + if (err) + return err; + + thermal->tz_module_num++; + + return 0; +} + +static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz) +{ + if (module_tz && module_tz->tzdev) { + mlxsw_thermal_module_tz_fini(module_tz->tzdev); + module_tz->tzdev = NULL; + } +} + +static int +mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, + struct mlxsw_thermal *thermal) +{ + unsigned int module_count = mlxsw_core_max_ports(core); + int i, err; + + thermal->tz_module_arr = kcalloc(module_count, + sizeof(*thermal->tz_module_arr), + GFP_KERNEL); + if (!thermal->tz_module_arr) + return -ENOMEM; + + for (i = 1; i < module_count; i++) { + err = mlxsw_thermal_module_init(dev, core, thermal, i); + if (err) + goto err_unreg_tz_module_arr; + } + + for (i = 0; i < thermal->tz_module_num; i++) { + err = mlxsw_thermal_module_tz_init(&thermal->tz_module_arr[i]); + if (err) + goto err_unreg_tz_module_arr; + } + + return 0; + +err_unreg_tz_module_arr: + for (i = module_count - 1; i >= 0; i--) + mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]); + kfree(thermal->tz_module_arr); + return err; +} + +static void +mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal) +{ + unsigned int module_count = mlxsw_core_max_ports(thermal->core); + int i; + + for (i = module_count - 1; i >= 0; i--) + mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]); + kfree(thermal->tz_module_arr); +} + int mlxsw_thermal_init(struct mlxsw_core *core, const struct mlxsw_bus_info *bus_info, struct mlxsw_thermal **p_thermal) @@ -407,8 +832,9 @@ int mlxsw_thermal_init(struct mlxsw_core *core, if (pwm_active & BIT(i)) { struct thermal_cooling_device *cdev; - cdev = thermal_cooling_device_register("Fan", thermal, - &mlxsw_cooling_ops); + cdev = thermal_cooling_device_register("mlxsw_fan", + thermal, + &mlxsw_cooling_ops); if (IS_ERR(cdev)) { err = PTR_ERR(cdev); dev_err(dev, "Failed to register cooling device\n"); @@ -423,22 +849,36 @@ int mlxsw_thermal_init(struct mlxsw_core *core, thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL, i); + thermal->polling_delay = bus_info->low_frequency ? + MLXSW_THERMAL_SLOW_POLL_INT : + MLXSW_THERMAL_POLL_INT; + thermal->tzdev = thermal_zone_device_register("mlxsw", MLXSW_THERMAL_NUM_TRIPS, MLXSW_THERMAL_TRIP_MASK, thermal, &mlxsw_thermal_ops, NULL, 0, - MLXSW_THERMAL_POLL_INT); + thermal->polling_delay); if (IS_ERR(thermal->tzdev)) { err = PTR_ERR(thermal->tzdev); dev_err(dev, "Failed to register thermal zone\n"); goto err_unreg_cdevs; } + err = mlxsw_thermal_modules_init(dev, core, thermal); + if (err) + goto err_unreg_tzdev; + thermal->mode = THERMAL_DEVICE_ENABLED; *p_thermal = thermal; return 0; + +err_unreg_tzdev: + if (thermal->tzdev) { + thermal_zone_device_unregister(thermal->tzdev); + thermal->tzdev = NULL; + } err_unreg_cdevs: for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) if (thermal->cdevs[i]) @@ -452,6 +892,7 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) { int i; + mlxsw_thermal_modules_fini(thermal); if (thermal->tzdev) { thermal_zone_device_unregister(thermal->tzdev); thermal->tzdev = NULL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index 798bd5aca384..a87ca6b6580d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -503,6 +503,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client, mlxsw_i2c->bus_info.device_kind = id->name; mlxsw_i2c->bus_info.device_name = client->name; mlxsw_i2c->bus_info.dev = &client->dev; + mlxsw_i2c->bus_info.low_frequency = true; mlxsw_i2c->dev = &client->dev; err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info, diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5f8066ab7d40..cbd0193ec3f6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2199,6 +2199,14 @@ MLXSW_ITEM32(reg, pagt, size, 0x00, 0, 8); */ MLXSW_ITEM32(reg, pagt, acl_group_id, 0x08, 0, 16); +/* reg_pagt_multi + * Multi-ACL + * 0 - This ACL is the last ACL in the multi-ACL + * 1 - This ACL is part of a multi-ACL + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pagt, multi, 0x30, 31, 1, 0x04, 0x00, false); + /* reg_pagt_acl_id * ACL identifier * Access: RW @@ -2212,12 +2220,13 @@ static inline void mlxsw_reg_pagt_pack(char *payload, u16 acl_group_id) } static inline void mlxsw_reg_pagt_acl_id_pack(char *payload, int index, - u16 acl_id) + u16 acl_id, bool multi) { u8 size = mlxsw_reg_pagt_size_get(payload); if (index >= size) mlxsw_reg_pagt_size_set(payload, index + 1); + mlxsw_reg_pagt_multi_set(payload, index, multi); mlxsw_reg_pagt_acl_id_set(payload, index, acl_id); } @@ -7866,6 +7875,35 @@ static inline void mlxsw_reg_mfsl_unpack(char *payload, u8 tacho, *p_tach_max = mlxsw_reg_mfsl_tach_max_get(payload); } +/* FORE - Fan Out of Range Event Register + * -------------------------------------- + * This register reports the status of the controlled fans compared to the + * range defined by the MFSL register. + */ +#define MLXSW_REG_FORE_ID 0x9007 +#define MLXSW_REG_FORE_LEN 0x0C + +MLXSW_REG_DEFINE(fore, MLXSW_REG_FORE_ID, MLXSW_REG_FORE_LEN); + +/* fan_under_limit + * Fan speed is below the low limit defined in MFSL register. Each bit relates + * to a single tachometer and indicates the specific tachometer reading is + * below the threshold. + * Access: RO + */ +MLXSW_ITEM32(reg, fore, fan_under_limit, 0x00, 16, 10); + +static inline void mlxsw_reg_fore_unpack(char *payload, u8 tacho, + bool *fault) +{ + u16 limit; + + if (fault) { + limit = mlxsw_reg_fore_fan_under_limit_get(payload); + *fault = limit & BIT(tacho); + } +} + /* MTCAP - Management Temperature Capabilities * ------------------------------------------- * This register exposes the capabilities of the device and @@ -7992,6 +8030,80 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name); } +/* MTBR - Management Temperature Bulk Register + * ------------------------------------------- + * This register is used for bulk temperature reading. + */ +#define MLXSW_REG_MTBR_ID 0x900F +#define MLXSW_REG_MTBR_BASE_LEN 0x10 /* base length, without records */ +#define MLXSW_REG_MTBR_REC_LEN 0x04 /* record length */ +#define MLXSW_REG_MTBR_REC_MAX_COUNT 47 /* firmware limitation */ +#define MLXSW_REG_MTBR_LEN (MLXSW_REG_MTBR_BASE_LEN + \ + MLXSW_REG_MTBR_REC_LEN * \ + MLXSW_REG_MTBR_REC_MAX_COUNT) + +MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN); + +/* reg_mtbr_base_sensor_index + * Base sensors index to access (0 - ASIC sensor, 1-63 - ambient sensors, + * 64-127 are mapped to the SFP+/QSFP modules sequentially). + * Access: Index + */ +MLXSW_ITEM32(reg, mtbr, base_sensor_index, 0x00, 0, 7); + +/* reg_mtbr_num_rec + * Request: Number of records to read + * Response: Number of records read + * See above description for more details. + * Range 1..255 + * Access: RW + */ +MLXSW_ITEM32(reg, mtbr, num_rec, 0x04, 0, 8); + +/* reg_mtbr_rec_max_temp + * The highest measured temperature from the sensor. + * When the bit mte is cleared, the field max_temperature is reserved. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16, + 16, MLXSW_REG_MTBR_REC_LEN, 0x00, false); + +/* reg_mtbr_rec_temp + * Temperature reading from the sensor. Reading is in 0..125 Celsius + * degrees units. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16, + MLXSW_REG_MTBR_REC_LEN, 0x00, false); + +static inline void mlxsw_reg_mtbr_pack(char *payload, u8 base_sensor_index, + u8 num_rec) +{ + MLXSW_REG_ZERO(mtbr, payload); + mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index); + mlxsw_reg_mtbr_num_rec_set(payload, num_rec); +} + +/* Error codes from temperatute reading */ +enum mlxsw_reg_mtbr_temp_status { + MLXSW_REG_MTBR_NO_CONN = 0x8000, + MLXSW_REG_MTBR_NO_TEMP_SENS = 0x8001, + MLXSW_REG_MTBR_INDEX_NA = 0x8002, + MLXSW_REG_MTBR_BAD_SENS_INFO = 0x8003, +}; + +/* Base index for reading modules temperature */ +#define MLXSW_REG_MTBR_BASE_MODULE_INDEX 64 + +static inline void mlxsw_reg_mtbr_temp_unpack(char *payload, int rec_ind, + u16 *p_temp, u16 *p_max_temp) +{ + if (p_temp) + *p_temp = mlxsw_reg_mtbr_rec_temp_get(payload, rec_ind); + if (p_max_temp) + *p_max_temp = mlxsw_reg_mtbr_rec_max_temp_get(payload, rec_ind); +} + /* MCIA - Management Cable Info Access * ----------------------------------- * MCIA register is used to access the SFP+ and QSFP connector's EPROM. @@ -8046,13 +8158,41 @@ MLXSW_ITEM32(reg, mcia, device_address, 0x04, 0, 16); */ MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16); -#define MLXSW_SP_REG_MCIA_EEPROM_SIZE 48 +#define MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH 256 +#define MLXSW_REG_MCIA_EEPROM_SIZE 48 +#define MLXSW_REG_MCIA_I2C_ADDR_LOW 0x50 +#define MLXSW_REG_MCIA_I2C_ADDR_HIGH 0x51 +#define MLXSW_REG_MCIA_PAGE0_LO_OFF 0xa0 +#define MLXSW_REG_MCIA_TH_ITEM_SIZE 2 +#define MLXSW_REG_MCIA_TH_PAGE_NUM 3 +#define MLXSW_REG_MCIA_PAGE0_LO 0 +#define MLXSW_REG_MCIA_TH_PAGE_OFF 0x80 + +enum mlxsw_reg_mcia_eeprom_module_info_rev_id { + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, +}; + +enum mlxsw_reg_mcia_eeprom_module_info_id { + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP = 0x03, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD = 0x18, +}; + +enum mlxsw_reg_mcia_eeprom_module_info { + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE, +}; /* reg_mcia_eeprom * Bytes to read/write. * Access: RW */ -MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_SP_REG_MCIA_EEPROM_SIZE); +MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE); static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock, u8 page_number, u16 device_addr, @@ -9740,8 +9880,10 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mfsc), MLXSW_REG(mfsm), MLXSW_REG(mfsl), + MLXSW_REG(fore), MLXSW_REG(mtcap), MLXSW_REG(mtmp), + MLXSW_REG(mtbr), MLXSW_REG(mcia), MLXSW_REG(mpat), MLXSW_REG(mpar), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index b8b3a01c2a9e..773ef7fdb285 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -26,6 +26,7 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_LAG_MEMBERS, MLXSW_RES_ID_MAX_BUFFER_SIZE, MLXSW_RES_ID_CELL_SIZE, + MLXSW_RES_ID_MAX_HEADROOM_SIZE, MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS, MLXSW_RES_ID_ACL_MAX_TCAM_RULES, MLXSW_RES_ID_ACL_MAX_REGIONS, @@ -79,6 +80,7 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */ [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */ + [MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */ [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901, [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902, [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index a88169738b4a..f018b0607dac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -852,8 +852,12 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; u16 delay = !!my_pfc ? my_pfc->delay : 0; char pbmc_pl[MLXSW_REG_PBMC_LEN]; + u32 taken_headroom_cells = 0; + u32 max_headroom_cells; int i, j, err; + max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); + mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); if (err) @@ -862,8 +866,10 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { bool configure = false; bool pfc = false; + u16 thres_cells; + u16 delay_cells; + u16 total_cells; bool lossy; - u16 thres; for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { if (prio_tc[j] == i) { @@ -877,10 +883,17 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, continue; lossy = !(pfc || pause_en); - thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); - delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, - pause_en); - mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); + thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); + delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, + pfc, pause_en); + total_cells = thres_cells + delay_cells; + + taken_headroom_cells += total_cells; + if (taken_headroom_cells > max_headroom_cells) + return -ENOBUFS; + + mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, + thres_cells, lossy); } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); @@ -1700,6 +1713,18 @@ static int mlxsw_sp_set_features(struct net_device *dev, mlxsw_sp_feature_hw_tc); } +static int mlxsw_sp_port_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + ppid->id_len = sizeof(mlxsw_sp->base_mac); + memcpy(&ppid->id, &mlxsw_sp->base_mac, ppid->id_len); + + return 0; +} + static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_open = mlxsw_sp_port_open, .ndo_stop = mlxsw_sp_port_stop, @@ -1715,6 +1740,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, .ndo_set_features = mlxsw_sp_set_features, + .ndo_get_port_parent_id = mlxsw_sp_port_get_port_parent_id, }; static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, @@ -2710,23 +2736,23 @@ static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, unsigned int *p_read_size) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; + char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE]; char mcia_pl[MLXSW_REG_MCIA_LEN]; u16 i2c_addr; int status; int err; - size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); + size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE); - if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && - offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) + if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH && + offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) /* Cross pages read, read until offset 256 in low page */ - size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; + size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset; - i2c_addr = MLXSW_SP_I2C_ADDR_LOW; - if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { - i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; - offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; + i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW; + if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) { + i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH; + offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH; } mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, @@ -2747,55 +2773,37 @@ static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } -enum mlxsw_sp_eeprom_module_info_rev_id { - MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, - MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, - MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, -}; - -enum mlxsw_sp_eeprom_module_info_id { - MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, - MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, - MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, - MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, -}; - -enum mlxsw_sp_eeprom_module_info { - MLXSW_SP_EEPROM_MODULE_INFO_ID, - MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, - MLXSW_SP_EEPROM_MODULE_INFO_SIZE, -}; - static int mlxsw_sp_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); - u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; + u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE]; + u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE; u8 module_rev_id, module_id; unsigned int read_size; int err; - err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, - MLXSW_SP_EEPROM_MODULE_INFO_SIZE, + err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, offset, module_info, &read_size); if (err) return err; - if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) + if (read_size < offset) return -EIO; - module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; - module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; + module_rev_id = module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID]; + module_id = module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID]; switch (module_id) { - case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: modinfo->type = ETH_MODULE_SFF_8436; modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; break; - case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: - case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: - if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || - module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: + if (module_id == MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 || + module_rev_id >= + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636) { modinfo->type = ETH_MODULE_SFF_8636; modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; } else { @@ -2803,7 +2811,7 @@ static int mlxsw_sp_get_module_info(struct net_device *netdev, modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; } break; - case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; break; @@ -3744,8 +3752,8 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) burst_size = 7; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: - rate = 4 * 1024; - burst_size = 4; + rate = 1024; + burst_size = 7; break; default: continue; @@ -4095,6 +4103,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; + mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); } @@ -4112,6 +4121,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; + mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); } @@ -4400,6 +4410,71 @@ static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) ARRAY_SIZE(mlxsw_sp_devlink_params)); } +static int +mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); + return 0; +} + +static int +mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); +} + +static const struct devlink_param mlxsw_sp2_devlink_params[] = { + DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, + "acl_region_rehash_interval", + DEVLINK_PARAM_TYPE_U32, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + mlxsw_sp_params_acl_region_rehash_intrvl_get, + mlxsw_sp_params_acl_region_rehash_intrvl_set, + NULL), +}; + +static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + union devlink_param_value value; + int err; + + err = mlxsw_sp_params_register(mlxsw_core); + if (err) + return err; + + err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, + ARRAY_SIZE(mlxsw_sp2_devlink_params)); + if (err) + goto err_devlink_params_register; + + value.vu32 = 0; + devlink_param_driverinit_value_set(devlink, + MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, + value); + return 0; + +err_devlink_params_register: + mlxsw_sp_params_unregister(mlxsw_core); + return err; +} + +static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) +{ + devlink_params_unregister(priv_to_devlink(mlxsw_core), + mlxsw_sp2_devlink_params, + ARRAY_SIZE(mlxsw_sp2_devlink_params)); + mlxsw_sp_params_unregister(mlxsw_core); +} + static struct mlxsw_driver mlxsw_sp1_driver = { .kind = mlxsw_sp1_driver_name, .priv_size = sizeof(struct mlxsw_sp), @@ -4448,8 +4523,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = { .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, .txhdr_construct = mlxsw_sp_txhdr_construct, .resources_register = mlxsw_sp2_resources_register, - .params_register = mlxsw_sp_params_register, - .params_unregister = mlxsw_sp_params_unregister, + .params_register = mlxsw_sp2_params_register, + .params_unregister = mlxsw_sp2_params_unregister, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp2_config_profile, .res_query_enabled = true, @@ -4693,9 +4768,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); if (err) goto err_col_port_add; - err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); - if (err) - goto err_col_port_enable; mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, mlxsw_sp_port->local_port); @@ -4709,8 +4781,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, return 0; -err_col_port_enable: - mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); err_col_port_add: if (!lag->ref_count) mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); @@ -4729,7 +4799,6 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); WARN_ON(lag->ref_count == 0); - mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); /* Any VLANs configured on the port are no longer valid */ @@ -4774,21 +4843,56 @@ static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); } -static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, - bool lag_tx_enabled) +static int +mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) { - if (lag_tx_enabled) - return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, - mlxsw_sp_port->lag_id); - else - return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, - mlxsw_sp_port->lag_id); + int err; + + err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + if (err) + return err; + + err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); + if (err) + goto err_dist_port_add; + + return 0; + +err_dist_port_add: + mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); + return err; +} + +static int +mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + if (err) + return err; + + err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + if (err) + goto err_col_port_disable; + + return 0; + +err_col_port_disable: + mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); + return err; } static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, struct netdev_lag_lower_state_info *info) { - return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); + if (info->tx_enabled) + return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); + else + return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); } static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -5011,8 +5115,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, err = mlxsw_sp_port_lag_join(mlxsw_sp_port, upper_dev); } else { - mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, - false); + mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); mlxsw_sp_port_lag_leave(mlxsw_sp_port, upper_dev); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4fe0996c7cdd..8bb83d0facc2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -133,6 +133,7 @@ struct mlxsw_sp_kvdl_ops; struct mlxsw_sp_mr_tcam_ops; struct mlxsw_sp_acl_tcam_ops; struct mlxsw_sp_nve_ops; +struct mlxsw_sp_sb_vals; struct mlxsw_sp { struct mlxsw_sp_port **ports; @@ -167,6 +168,7 @@ struct mlxsw_sp { const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops; const struct mlxsw_sp_nve_ops **nve_ops_arr; const struct mlxsw_sp_rif_ops **rif_ops_arr; + const struct mlxsw_sp_sb_vals *sb_vals; }; static inline struct mlxsw_sp_upper * @@ -371,6 +373,10 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, u32 *p_cur, u32 *p_max); u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells); u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes); +u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp); + +extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals; +extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals; /* spectrum_switchdev.c */ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); @@ -690,6 +696,8 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); +u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val); /* spectrum_acl_tcam.c */ struct mlxsw_sp_acl_tcam; @@ -704,10 +712,13 @@ struct mlxsw_sp_acl_tcam_ops { size_t region_priv_size; int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv, void *tcam_priv, - struct mlxsw_sp_acl_tcam_region *region); + struct mlxsw_sp_acl_tcam_region *region, + void *hints_priv); void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv); int (*region_associate)(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region); + void * (*region_rehash_hints_get)(void *region_priv); + void (*region_rehash_hints_put)(void *hints_priv); size_t chunk_priv_size; void (*chunk_init)(void *region_priv, void *chunk_priv, unsigned int priority); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c index 6e444525713f..3a636f753607 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -112,7 +112,8 @@ mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, void *tcam_priv, - struct mlxsw_sp_acl_tcam_region *_region) + struct mlxsw_sp_acl_tcam_region *_region, + void *hints_priv) { struct mlxsw_sp1_acl_tcam_region *region = region_priv; int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index d380b3403960..6c66a0f1b79e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -139,7 +139,8 @@ static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) static int mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, void *tcam_priv, - struct mlxsw_sp_acl_tcam_region *_region) + struct mlxsw_sp_acl_tcam_region *_region, + void *hints_priv) { struct mlxsw_sp2_acl_tcam_region *region = region_priv; struct mlxsw_sp2_acl_tcam *tcam = tcam_priv; @@ -147,7 +148,8 @@ mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, region->region = _region; return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam, - ®ion->aregion, _region, + ®ion->aregion, + _region, hints_priv, &mlxsw_sp2_acl_ctcam_region_ops); } @@ -166,6 +168,18 @@ mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id); } +static void *mlxsw_sp2_acl_tcam_region_rehash_hints_get(void *region_priv) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + + return mlxsw_sp_acl_atcam_rehash_hints_get(®ion->aregion); +} + +static void mlxsw_sp2_acl_tcam_region_rehash_hints_put(void *hints_priv) +{ + mlxsw_sp_acl_atcam_rehash_hints_put(hints_priv); +} + static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, unsigned int priority) { @@ -243,6 +257,8 @@ const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = { .region_init = mlxsw_sp2_acl_tcam_region_init, .region_fini = mlxsw_sp2_acl_tcam_region_fini, .region_associate = mlxsw_sp2_acl_tcam_region_associate, + .region_rehash_hints_get = mlxsw_sp2_acl_tcam_region_rehash_hints_get, + .region_rehash_hints_put = mlxsw_sp2_acl_tcam_region_rehash_hints_put, .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk), .chunk_init = mlxsw_sp2_acl_tcam_chunk_init, .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index a69e3462b65e..a146a44634e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -588,7 +588,7 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, { u8 ethertype; - if (action == TCA_VLAN_ACT_MODIFY) { + if (action == FLOW_ACTION_VLAN_MANGLE) { switch (proto) { case ETH_P_8021Q: ethertype = 0; @@ -640,7 +640,7 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_sp_acl_ruleset_ref_inc(ruleset); - rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp), + rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL); if (!rule) { err = -ENOMEM; @@ -912,3 +912,19 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_afk_destroy(acl->afk); kfree(acl); } + +u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + + return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp, + &acl->tcam); +} + +int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val) +{ + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + + return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp, + &acl->tcam, val); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index a74a390901ac..ded4cf658680 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -318,6 +318,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam, struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_tcam_region *region, + void *hints_priv, const struct mlxsw_sp_acl_ctcam_region_ops *ops) { int err; @@ -334,7 +335,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, err = aregion->ops->init(aregion); if (err) goto err_ops_init; - err = mlxsw_sp_acl_erp_region_init(aregion); + err = mlxsw_sp_acl_erp_region_init(aregion, hints_priv); if (err) goto err_erp_region_init; err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion, @@ -634,3 +635,14 @@ void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, { mlxsw_sp_acl_erps_fini(mlxsw_sp, atcam); } + +void * +mlxsw_sp_acl_atcam_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion) +{ + return mlxsw_sp_acl_erp_rehash_hints_get(aregion); +} + +void mlxsw_sp_acl_atcam_rehash_hints_put(void *hints_priv) +{ + mlxsw_sp_acl_erp_rehash_hints_put(hints_priv); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index 2941967e1cc5..e935c36638d9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -1200,6 +1200,32 @@ mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key, return 0; } +static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj, + const void *obj) +{ + const struct mlxsw_sp_acl_erp_key *parent_key = parent_obj; + const struct mlxsw_sp_acl_erp_key *key = obj; + u16 delta_start; + u8 delta_mask; + int err; + + err = mlxsw_sp_acl_erp_delta_fill(parent_key, key, + &delta_start, &delta_mask); + return err ? false : true; +} + +static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2) +{ + const struct mlxsw_sp_acl_erp_key *key1 = obj1; + const struct mlxsw_sp_acl_erp_key *key2 = obj2; + + /* For hints purposes, two objects are considered equal + * in case the masks are the same. Does not matter what + * the "ctcam" value is. + */ + return memcmp(key1->mask, key2->mask, sizeof(key1->mask)); +} + static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj, void *obj) { @@ -1254,12 +1280,17 @@ static void mlxsw_sp_acl_erp_delta_destroy(void *priv, void *delta_priv) kfree(delta); } -static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj) +static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj, + unsigned int root_id) { struct mlxsw_sp_acl_atcam_region *aregion = priv; struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; struct mlxsw_sp_acl_erp_key *key = obj; + if (!key->ctcam && + root_id != OBJAGG_OBJ_ROOT_ID_INVALID && + root_id >= MLXSW_SP_ACL_ERP_MAX_PER_REGION) + return ERR_PTR(-ENOBUFS); return erp_table->ops->erp_create(erp_table, key); } @@ -1273,6 +1304,8 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv) static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = { .obj_size = sizeof(struct mlxsw_sp_acl_erp_key), + .delta_check = mlxsw_sp_acl_erp_delta_check, + .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp, .delta_create = mlxsw_sp_acl_erp_delta_create, .delta_destroy = mlxsw_sp_acl_erp_delta_destroy, .root_create = mlxsw_sp_acl_erp_root_create, @@ -1280,7 +1313,8 @@ static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = { }; static struct mlxsw_sp_acl_erp_table * -mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion) +mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion, + struct objagg_hints *hints) { struct mlxsw_sp_acl_erp_table *erp_table; int err; @@ -1290,7 +1324,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion) return ERR_PTR(-ENOMEM); erp_table->objagg = objagg_create(&mlxsw_sp_acl_erp_objagg_ops, - aregion); + hints, aregion); if (IS_ERR(erp_table->objagg)) { err = PTR_ERR(erp_table->objagg); goto err_objagg_create; @@ -1337,12 +1371,88 @@ mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); } -int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion) +static int +mlxsw_sp_acl_erp_hints_check(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct objagg_hints *hints, bool *p_rehash_needed) +{ + struct objagg *objagg = aregion->erp_table->objagg; + const struct objagg_stats *ostats; + const struct objagg_stats *hstats; + int err; + + *p_rehash_needed = false; + + ostats = objagg_stats_get(objagg); + if (IS_ERR(ostats)) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get ERP stats\n"); + return PTR_ERR(ostats); + } + + hstats = objagg_hints_stats_get(hints); + if (IS_ERR(hstats)) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get ERP hints stats\n"); + err = PTR_ERR(hstats); + goto err_hints_stats_get; + } + + /* Very basic criterion for now. */ + if (hstats->root_count < ostats->root_count) + *p_rehash_needed = true; + + err = 0; + + objagg_stats_put(hstats); +err_hints_stats_get: + objagg_stats_put(ostats); + return err; +} + +void * +mlxsw_sp_acl_erp_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; + struct objagg_hints *hints; + bool rehash_needed; + int err; + + hints = objagg_hints_get(aregion->erp_table->objagg, + OBJAGG_OPT_ALGO_SIMPLE_GREEDY); + if (IS_ERR(hints)) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to create ERP hints\n"); + return ERR_CAST(hints); + } + err = mlxsw_sp_acl_erp_hints_check(mlxsw_sp, aregion, hints, + &rehash_needed); + if (err) + goto errout; + + if (!rehash_needed) { + err = -EAGAIN; + goto errout; + } + return hints; + +errout: + objagg_hints_put(hints); + return ERR_PTR(err); +} + +void mlxsw_sp_acl_erp_rehash_hints_put(void *hints_priv) +{ + struct objagg_hints *hints = hints_priv; + + objagg_hints_put(hints); +} + +int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion, + void *hints_priv) { struct mlxsw_sp_acl_erp_table *erp_table; + struct objagg_hints *hints = hints_priv; int err; - erp_table = mlxsw_sp_acl_erp_table_create(aregion); + erp_table = mlxsw_sp_acl_erp_table_create(aregion, hints); if (IS_ERR(erp_table)) return PTR_ERR(erp_table); aregion->erp_table = erp_table; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 11456e1f236f..7e225a86e3a8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -8,6 +8,7 @@ #include <linux/list.h> #include <linux/rhashtable.h> #include <linux/netdevice.h> +#include <trace/events/mlxsw.h> #include "reg.h" #include "core.h" @@ -23,6 +24,9 @@ size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp) return ops->priv_size; } +#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */ +#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */ + int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam) { @@ -33,6 +37,10 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, size_t alloc_size; int err; + tcam->vregion_rehash_intrvl = + MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT; + INIT_LIST_HEAD(&tcam->vregion_list); + max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_REGIONS); max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS); @@ -153,9 +161,9 @@ struct mlxsw_sp_acl_tcam_pattern { struct mlxsw_sp_acl_tcam_group { struct mlxsw_sp_acl_tcam *tcam; u16 id; - struct list_head region_list; + struct list_head vregion_list; unsigned int region_count; - struct rhashtable chunk_ht; + struct rhashtable vchunk_ht; struct mlxsw_sp_acl_tcam_group_ops *ops; const struct mlxsw_sp_acl_tcam_pattern *patterns; unsigned int patterns_count; @@ -163,40 +171,77 @@ struct mlxsw_sp_acl_tcam_group { struct mlxsw_afk_element_usage tmplt_elusage; }; -struct mlxsw_sp_acl_tcam_chunk { - struct list_head list; /* Member of a TCAM region */ - struct rhash_head ht_node; /* Member of a chunk HT */ - unsigned int priority; /* Priority within the region and group */ +struct mlxsw_sp_acl_tcam_vregion { + struct mlxsw_sp_acl_tcam_region *region; + struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */ + struct list_head list; /* Member of a TCAM group */ + struct list_head tlist; /* Member of a TCAM */ + struct list_head vchunk_list; /* List of vchunks under this vregion */ struct mlxsw_sp_acl_tcam_group *group; + struct mlxsw_afk_key_info *key_info; + struct mlxsw_sp_acl_tcam *tcam; + struct delayed_work rehash_dw; + struct mlxsw_sp *mlxsw_sp; + bool failed_rollback; /* Indicates failed rollback during migration */ +}; + +struct mlxsw_sp_acl_tcam_vchunk; + +struct mlxsw_sp_acl_tcam_chunk { + struct mlxsw_sp_acl_tcam_vchunk *vchunk; struct mlxsw_sp_acl_tcam_region *region; - unsigned int ref_count; unsigned long priv[0]; /* priv has to be always the last item */ }; +struct mlxsw_sp_acl_tcam_vchunk { + struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */ + struct list_head list; /* Member of a TCAM vregion */ + struct rhash_head ht_node; /* Member of a chunk HT */ + struct list_head ventry_list; + unsigned int priority; /* Priority within the vregion and group */ + struct mlxsw_sp_acl_tcam_group *group; + struct mlxsw_sp_acl_tcam_vregion *vregion; + unsigned int ref_count; +}; + struct mlxsw_sp_acl_tcam_entry { + struct mlxsw_sp_acl_tcam_ventry *ventry; struct mlxsw_sp_acl_tcam_chunk *chunk; unsigned long priv[0]; /* priv has to be always the last item */ }; -static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = { +struct mlxsw_sp_acl_tcam_ventry { + struct mlxsw_sp_acl_tcam_entry *entry; + struct list_head list; /* Member of a TCAM vchunk */ + struct mlxsw_sp_acl_tcam_vchunk *vchunk; + struct mlxsw_sp_acl_rule_info *rulei; +}; + +static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = { .key_len = sizeof(unsigned int), - .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority), - .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node), + .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority), + .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node), .automatic_shrinking = true, }; static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_group *group) { - struct mlxsw_sp_acl_tcam_region *region; + struct mlxsw_sp_acl_tcam_vregion *vregion; char pagt_pl[MLXSW_REG_PAGT_LEN]; int acl_index = 0; mlxsw_reg_pagt_pack(pagt_pl, group->id); - list_for_each_entry(region, &group->region_list, list) - mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id); + list_for_each_entry(vregion, &group->vregion_list, list) { + if (vregion->region2) + mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, + vregion->region2->id, true); + mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, + vregion->region->id, false); + } mlxsw_reg_pagt_size_set(pagt_pl, acl_index); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl); } @@ -219,13 +264,13 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, memcpy(&group->tmplt_elusage, tmplt_elusage, sizeof(group->tmplt_elusage)); } - INIT_LIST_HEAD(&group->region_list); + INIT_LIST_HEAD(&group->vregion_list); err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); if (err) return err; - err = rhashtable_init(&group->chunk_ht, - &mlxsw_sp_acl_tcam_chunk_ht_params); + err = rhashtable_init(&group->vchunk_ht, + &mlxsw_sp_acl_tcam_vchunk_ht_params); if (err) goto err_rhashtable_init; @@ -241,9 +286,9 @@ static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_tcam *tcam = group->tcam; - rhashtable_destroy(&group->chunk_ht); + rhashtable_destroy(&group->vchunk_ht); mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); - WARN_ON(!list_empty(&group->region_list)); + WARN_ON(!list_empty(&group->vregion_list)); } static int @@ -283,140 +328,153 @@ mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group) } static unsigned int -mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region) +mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion) { - struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_vchunk *vchunk; - if (list_empty(®ion->chunk_list)) + if (list_empty(&vregion->vchunk_list)) return 0; - /* As a priority of a region, return priority of the first chunk */ - chunk = list_first_entry(®ion->chunk_list, typeof(*chunk), list); - return chunk->priority; + /* As a priority of a vregion, return priority of the first vchunk */ + vchunk = list_first_entry(&vregion->vchunk_list, + typeof(*vchunk), list); + return vchunk->priority; } static unsigned int -mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region) +mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion) { - struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_vchunk *vchunk; - if (list_empty(®ion->chunk_list)) + if (list_empty(&vregion->vchunk_list)) return 0; - chunk = list_last_entry(®ion->chunk_list, typeof(*chunk), list); - return chunk->priority; + vchunk = list_last_entry(&vregion->vchunk_list, + typeof(*vchunk), list); + return vchunk->priority; } -static void -mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group, - struct mlxsw_sp_acl_tcam_region *region) +static int +mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) { - struct mlxsw_sp_acl_tcam_region *region2; - struct list_head *pos; + struct mlxsw_sp_acl_tcam_group *group = region->vregion->group; + int err; + + if (group->region_count == group->tcam->max_group_size) + return -ENOBUFS; + + err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + if (err) + return err; - /* Position the region inside the list according to priority */ - list_for_each(pos, &group->region_list) { - region2 = list_entry(pos, typeof(*region2), list); - if (mlxsw_sp_acl_tcam_region_prio(region2) > - mlxsw_sp_acl_tcam_region_prio(region)) - break; - } - list_add_tail(®ion->list, pos); group->region_count++; + return 0; } static void -mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group, - struct mlxsw_sp_acl_tcam_region *region) +mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) { + struct mlxsw_sp_acl_tcam_group *group = region->vregion->group; + group->region_count--; - list_del(®ion->list); + mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); } static int -mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group, - struct mlxsw_sp_acl_tcam_region *region) +mlxsw_sp_acl_tcam_group_vregion_attach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_vregion *vregion) { + struct mlxsw_sp_acl_tcam_vregion *vregion2; + struct list_head *pos; int err; - if (group->region_count == group->tcam->max_group_size) - return -ENOBUFS; - - mlxsw_sp_acl_tcam_group_list_add(group, region); + /* Position the vregion inside the list according to priority */ + list_for_each(pos, &group->vregion_list) { + vregion2 = list_entry(pos, typeof(*vregion2), list); + if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > + mlxsw_sp_acl_tcam_vregion_prio(vregion)) + break; + } + list_add_tail(&vregion->list, pos); + vregion->group = group; - err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, vregion->region); if (err) - goto err_group_update; - region->group = group; + goto err_region_attach; return 0; -err_group_update: - mlxsw_sp_acl_tcam_group_list_del(group, region); - mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); +err_region_attach: + list_del(&vregion->list); return err; } static void -mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region) +mlxsw_sp_acl_tcam_group_vregion_detach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion) { - struct mlxsw_sp_acl_tcam_group *group = region->group; - - mlxsw_sp_acl_tcam_group_list_del(group, region); - mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + list_del(&vregion->list); + if (vregion->region2) + mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, + vregion->region2); + mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region); } -static struct mlxsw_sp_acl_tcam_region * -mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group, - unsigned int priority, - struct mlxsw_afk_element_usage *elusage, - bool *p_need_split) +static struct mlxsw_sp_acl_tcam_vregion * +mlxsw_sp_acl_tcam_group_vregion_find(struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage, + bool *p_need_split) { - struct mlxsw_sp_acl_tcam_region *region, *region2; + struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2; struct list_head *pos; bool issubset; - list_for_each(pos, &group->region_list) { - region = list_entry(pos, typeof(*region), list); + list_for_each(pos, &group->vregion_list) { + vregion = list_entry(pos, typeof(*vregion), list); /* First, check if the requested priority does not rather belong - * under some of the next regions. + * under some of the next vregions. */ - if (pos->next != &group->region_list) { /* not last */ - region2 = list_entry(pos->next, typeof(*region2), list); - if (priority >= mlxsw_sp_acl_tcam_region_prio(region2)) + if (pos->next != &group->vregion_list) { /* not last */ + vregion2 = list_entry(pos->next, typeof(*vregion2), + list); + if (priority >= + mlxsw_sp_acl_tcam_vregion_prio(vregion2)) continue; } - issubset = mlxsw_afk_key_info_subset(region->key_info, elusage); + issubset = mlxsw_afk_key_info_subset(vregion->key_info, + elusage); /* If requested element usage would not fit and the priority - * is lower than the currently inspected region we cannot - * use this region, so return NULL to indicate new region has + * is lower than the currently inspected vregion we cannot + * use this region, so return NULL to indicate new vregion has * to be created. */ if (!issubset && - priority < mlxsw_sp_acl_tcam_region_prio(region)) + priority < mlxsw_sp_acl_tcam_vregion_prio(vregion)) return NULL; /* If requested element usage would not fit and the priority - * is higher than the currently inspected region we cannot - * use this region. There is still some hope that the next - * region would be the fit. So let it be processed and + * is higher than the currently inspected vregion we cannot + * use this vregion. There is still some hope that the next + * vregion would be the fit. So let it be processed and * eventually break at the check right above this. */ if (!issubset && - priority > mlxsw_sp_acl_tcam_region_max_prio(region)) + priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion)) continue; - /* Indicate if the region needs to be split in order to add + /* Indicate if the vregion needs to be split in order to add * the requested priority. Split is needed when requested - * element usage won't fit into the found region. + * element usage won't fit into the found vregion. */ *p_need_split = !issubset; - return region; + return vregion; } - return NULL; /* New region has to be created. */ + return NULL; /* New vregion has to be created. */ } static void @@ -511,24 +569,19 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_acl_tcam_region * mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam, - struct mlxsw_afk_element_usage *elusage) + struct mlxsw_sp_acl_tcam_vregion *vregion, + void *hints_priv) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); struct mlxsw_sp_acl_tcam_region *region; int err; region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL); if (!region) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(®ion->chunk_list); region->mlxsw_sp = mlxsw_sp; - - region->key_info = mlxsw_afk_key_info_get(afk, elusage); - if (IS_ERR(region->key_info)) { - err = PTR_ERR(region->key_info); - goto err_key_info_get; - } + region->vregion = vregion; + region->key_info = vregion->key_info; err = mlxsw_sp_acl_tcam_region_id_get(tcam, ®ion->id); if (err) @@ -547,7 +600,8 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_tcam_region_enable; - err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region); + err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, + region, hints_priv); if (err) goto err_tcam_region_init; @@ -561,8 +615,6 @@ err_tcam_region_alloc: err_tcam_region_associate: mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); err_region_id_get: - mlxsw_afk_key_info_put(region->key_info); -err_key_info_get: kfree(region); return ERR_PTR(err); } @@ -576,217 +628,372 @@ mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, ops->region_fini(mlxsw_sp, region->priv); mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); - mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id); - mlxsw_afk_key_info_put(region->key_info); + mlxsw_sp_acl_tcam_region_id_put(region->vregion->group->tcam, + region->id); kfree(region); } +static void +mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion) +{ + unsigned long interval = vregion->tcam->vregion_rehash_intrvl; + + if (!interval) + return; + mlxsw_core_schedule_dw(&vregion->rehash_dw, + msecs_to_jiffies(interval)); +} + static int -mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group, - unsigned int priority, - struct mlxsw_afk_element_usage *elusage, - struct mlxsw_sp_acl_tcam_chunk *chunk) +mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion); + +static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work) { - struct mlxsw_sp_acl_tcam_region *region; - bool region_created = false; + struct mlxsw_sp_acl_tcam_vregion *vregion = + container_of(work, struct mlxsw_sp_acl_tcam_vregion, + rehash_dw.work); + + /* TODO: Take rtnl lock here as the rest of the code counts on it + * now. Later, this should be replaced by per-vregion lock. + */ + rtnl_lock(); + mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion); + rtnl_unlock(); + mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion); +} + +static struct mlxsw_sp_acl_tcam_vregion * +mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + struct mlxsw_afk_element_usage *elusage) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); + struct mlxsw_sp_acl_tcam_vregion *vregion; + int err; + + vregion = kzalloc(sizeof(*vregion), GFP_KERNEL); + if (!vregion) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&vregion->vchunk_list); + vregion->tcam = tcam; + vregion->mlxsw_sp = mlxsw_sp; + + vregion->key_info = mlxsw_afk_key_info_get(afk, elusage); + if (IS_ERR(vregion->key_info)) { + err = PTR_ERR(vregion->key_info); + goto err_key_info_get; + } + + vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam, + vregion, NULL); + if (IS_ERR(vregion->region)) { + err = PTR_ERR(vregion->region); + goto err_region_create; + } + + list_add_tail(&vregion->tlist, &tcam->vregion_list); + + if (ops->region_rehash_hints_get) { + /* Create the delayed work for vregion periodic rehash */ + INIT_DELAYED_WORK(&vregion->rehash_dw, + mlxsw_sp_acl_tcam_vregion_rehash_work); + mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion); + } + + return vregion; + +err_region_create: + mlxsw_afk_key_info_put(vregion->key_info); +err_key_info_get: + kfree(vregion); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + if (ops->region_rehash_hints_get) + cancel_delayed_work_sync(&vregion->rehash_dw); + list_del(&vregion->tlist); + if (vregion->region2) + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2); + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region); + mlxsw_afk_key_info_put(vregion->key_info); + kfree(vregion); +} + +u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + u32 vregion_rehash_intrvl; + + if (WARN_ON(!ops->region_rehash_hints_get)) + return 0; + vregion_rehash_intrvl = tcam->vregion_rehash_intrvl; + return vregion_rehash_intrvl; +} + +int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + u32 val) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + struct mlxsw_sp_acl_tcam_vregion *vregion; + + if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val) + return -EINVAL; + if (WARN_ON(!ops->region_rehash_hints_get)) + return -EOPNOTSUPP; + tcam->vregion_rehash_intrvl = val; + rtnl_lock(); + list_for_each_entry(vregion, &tcam->vregion_list, tlist) { + if (val) + mlxsw_core_schedule_dw(&vregion->rehash_dw, 0); + else + cancel_delayed_work_sync(&vregion->rehash_dw); + } + rtnl_unlock(); + return 0; +} + +static int +mlxsw_sp_acl_tcam_vchunk_assoc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage, + struct mlxsw_sp_acl_tcam_vchunk *vchunk) +{ + struct mlxsw_sp_acl_tcam_vregion *vregion; + bool vregion_created = false; bool need_split; int err; - region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage, - &need_split); - if (region && need_split) { - /* According to priority, the chunk should belong to an - * existing region. However, this chunk needs elements - * that region does not contain. We need to split the existing - * region into two and create a new region for this chunk + vregion = mlxsw_sp_acl_tcam_group_vregion_find(group, priority, elusage, + &need_split); + if (vregion && need_split) { + /* According to priority, the vchunk should belong to an + * existing vregion. However, this vchunk needs elements + * that vregion does not contain. We need to split the existing + * vregion into two and create a new vregion for this vchunk * in between. This is not supported now. */ return -EOPNOTSUPP; } - if (!region) { - struct mlxsw_afk_element_usage region_elusage; + if (!vregion) { + struct mlxsw_afk_element_usage vregion_elusage; mlxsw_sp_acl_tcam_group_use_patterns(group, elusage, - ®ion_elusage); - region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam, - ®ion_elusage); - if (IS_ERR(region)) - return PTR_ERR(region); - region_created = true; + &vregion_elusage); + vregion = mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, + group->tcam, + &vregion_elusage); + if (IS_ERR(vregion)) + return PTR_ERR(vregion); + vregion_created = true; } - chunk->region = region; - list_add_tail(&chunk->list, ®ion->chunk_list); + vchunk->vregion = vregion; + list_add_tail(&vchunk->list, &vregion->vchunk_list); - if (!region_created) + if (!vregion_created) return 0; - err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region); + err = mlxsw_sp_acl_tcam_group_vregion_attach(mlxsw_sp, group, vregion); if (err) - goto err_group_region_attach; + goto err_group_vregion_attach; return 0; -err_group_region_attach: - mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); +err_group_vregion_attach: + mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion); return err; } static void -mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_chunk *chunk) +mlxsw_sp_acl_tcam_vchunk_deassoc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vchunk *vchunk) { - struct mlxsw_sp_acl_tcam_region *region = chunk->region; + struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion; - list_del(&chunk->list); - if (list_empty(®ion->chunk_list)) { - mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region); - mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); + list_del(&vchunk->list); + if (list_empty(&vregion->vchunk_list)) { + mlxsw_sp_acl_tcam_group_vregion_detach(mlxsw_sp, vregion); + mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion); } } static struct mlxsw_sp_acl_tcam_chunk * mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group, - unsigned int priority, - struct mlxsw_afk_element_usage *elusage) + struct mlxsw_sp_acl_tcam_vchunk *vchunk, + struct mlxsw_sp_acl_tcam_region *region) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk; + + chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL); + if (!chunk) + return ERR_PTR(-ENOMEM); + chunk->vchunk = vchunk; + chunk->region = region; + + ops->chunk_init(region->priv, chunk->priv, vchunk->priority); + return chunk; +} + +static void +mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + ops->chunk_fini(chunk->priv); + kfree(chunk); +} + +static struct mlxsw_sp_acl_tcam_vchunk * +mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_sp_acl_tcam_vchunk *vchunk; int err; if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) return ERR_PTR(-EINVAL); - chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL); - if (!chunk) + vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL); + if (!vchunk) return ERR_PTR(-ENOMEM); - chunk->priority = priority; - chunk->group = group; - chunk->ref_count = 1; + INIT_LIST_HEAD(&vchunk->ventry_list); + vchunk->priority = priority; + vchunk->group = group; + vchunk->ref_count = 1; - err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority, - elusage, chunk); + err = mlxsw_sp_acl_tcam_vchunk_assoc(mlxsw_sp, group, priority, + elusage, vchunk); if (err) - goto err_chunk_assoc; + goto err_vchunk_assoc; - ops->chunk_init(chunk->region->priv, chunk->priv, priority); - - err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node, - mlxsw_sp_acl_tcam_chunk_ht_params); + err = rhashtable_insert_fast(&group->vchunk_ht, &vchunk->ht_node, + mlxsw_sp_acl_tcam_vchunk_ht_params); if (err) goto err_rhashtable_insert; - return chunk; + vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, + vchunk->vregion->region); + if (IS_ERR(vchunk->chunk)) { + err = PTR_ERR(vchunk->chunk); + goto err_chunk_create; + } + + return vchunk; +err_chunk_create: + rhashtable_remove_fast(&group->vchunk_ht, &vchunk->ht_node, + mlxsw_sp_acl_tcam_vchunk_ht_params); err_rhashtable_insert: - ops->chunk_fini(chunk->priv); - mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); -err_chunk_assoc: - kfree(chunk); + mlxsw_sp_acl_tcam_vchunk_deassoc(mlxsw_sp, vchunk); +err_vchunk_assoc: + kfree(vchunk); return ERR_PTR(err); } static void -mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_chunk *chunk) +mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vchunk *vchunk) { - const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_sp_acl_tcam_group *group = chunk->group; - - rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node, - mlxsw_sp_acl_tcam_chunk_ht_params); - ops->chunk_fini(chunk->priv); - mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); - kfree(chunk); + struct mlxsw_sp_acl_tcam_group *group = vchunk->group; + + if (vchunk->chunk2) + mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2); + mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk); + rhashtable_remove_fast(&group->vchunk_ht, &vchunk->ht_node, + mlxsw_sp_acl_tcam_vchunk_ht_params); + mlxsw_sp_acl_tcam_vchunk_deassoc(mlxsw_sp, vchunk); + kfree(vchunk); } -static struct mlxsw_sp_acl_tcam_chunk * -mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group, - unsigned int priority, - struct mlxsw_afk_element_usage *elusage) +static struct mlxsw_sp_acl_tcam_vchunk * +mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) { - struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_vchunk *vchunk; - chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority, - mlxsw_sp_acl_tcam_chunk_ht_params); - if (chunk) { - if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info, + vchunk = rhashtable_lookup_fast(&group->vchunk_ht, &priority, + mlxsw_sp_acl_tcam_vchunk_ht_params); + if (vchunk) { + if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info, elusage))) return ERR_PTR(-EINVAL); - chunk->ref_count++; - return chunk; + vchunk->ref_count++; + return vchunk; } - return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group, - priority, elusage); + return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, group, + priority, elusage); } -static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_chunk *chunk) +static void +mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vchunk *vchunk) { - if (--chunk->ref_count) + if (--vchunk->ref_count) return; - mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk); + mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk); } -static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp) -{ - const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - - return ops->entry_priv_size; -} - -static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group, - struct mlxsw_sp_acl_tcam_entry *entry, - struct mlxsw_sp_acl_rule_info *rulei) +static struct mlxsw_sp_acl_tcam_entry * +mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_ventry *ventry, + struct mlxsw_sp_acl_tcam_chunk *chunk) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_sp_acl_tcam_chunk *chunk; - struct mlxsw_sp_acl_tcam_region *region; + struct mlxsw_sp_acl_tcam_entry *entry; int err; - chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority, - &rulei->values.elusage); - if (IS_ERR(chunk)) - return PTR_ERR(chunk); - - region = chunk->region; + entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL); + if (!entry) + return ERR_PTR(-ENOMEM); + entry->ventry = ventry; + entry->chunk = chunk; - err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv, - entry->priv, rulei); + err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv, + entry->priv, ventry->rulei); if (err) goto err_entry_add; - entry->chunk = chunk; - return 0; + return entry; err_entry_add: - mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); - return err; + kfree(entry); + return ERR_PTR(err); } -static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_entry *entry) +static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_entry *entry) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; - struct mlxsw_sp_acl_tcam_region *region = chunk->region; - ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv); - mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); + ops->entry_del(mlxsw_sp, entry->chunk->region->priv, + entry->chunk->priv, entry->priv); + kfree(entry); } static int mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, struct mlxsw_sp_acl_tcam_entry *entry, struct mlxsw_sp_acl_rule_info *rulei) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; - struct mlxsw_sp_acl_tcam_region *region = chunk->region; return ops->entry_action_replace(mlxsw_sp, region->priv, entry->priv, rulei); @@ -798,13 +1005,249 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, bool *activity) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; - struct mlxsw_sp_acl_tcam_region *region = chunk->region; - return ops->entry_activity_get(mlxsw_sp, region->priv, + return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv, entry->priv, activity); } +static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_ventry *ventry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_vchunk *vchunk; + int err; + + vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, group, rulei->priority, + &rulei->values.elusage); + if (IS_ERR(vchunk)) + return PTR_ERR(vchunk); + + ventry->vchunk = vchunk; + ventry->rulei = rulei; + ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, + vchunk->chunk); + if (IS_ERR(ventry->entry)) { + err = PTR_ERR(ventry->entry); + goto err_entry_create; + } + + list_add_tail(&ventry->list, &vchunk->ventry_list); + + return 0; + +err_entry_create: + mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk); + return err; +} + +static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_ventry *ventry) +{ + struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk; + + list_del(&ventry->list); + mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry); + mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk); +} + +static int +mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_ventry *ventry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk; + + return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, + vchunk->vregion->region, + ventry->entry, rulei); +} + +static int +mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_ventry *ventry, + bool *activity) +{ + return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, + ventry->entry, activity); +} + +static int +mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_ventry *ventry, + struct mlxsw_sp_acl_tcam_chunk *chunk2) +{ + struct mlxsw_sp_acl_tcam_entry *entry2; + + entry2 = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk2); + if (IS_ERR(entry2)) + return PTR_ERR(entry2); + mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry); + ventry->entry = entry2; + return 0; +} + +static int +mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vchunk *vchunk, + struct mlxsw_sp_acl_tcam_region *region, + bool this_is_rollback) +{ + struct mlxsw_sp_acl_tcam_ventry *ventry; + struct mlxsw_sp_acl_tcam_chunk *chunk2; + int err; + int err2; + + chunk2 = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region); + if (IS_ERR(chunk2)) { + if (this_is_rollback) + vchunk->vregion->failed_rollback = true; + return PTR_ERR(chunk2); + } + vchunk->chunk2 = chunk2; + list_for_each_entry(ventry, &vchunk->ventry_list, list) { + err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry, + vchunk->chunk2); + if (err) { + if (this_is_rollback) { + vchunk->vregion->failed_rollback = true; + return err; + } + goto rollback; + } + } + mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk); + vchunk->chunk = chunk2; + vchunk->chunk2 = NULL; + return 0; + +rollback: + /* Migrate the entries back to the original chunk. If some entry + * migration fails, there's no good way how to proceed. Set the + * vregion with "failed_rollback" flag. + */ + list_for_each_entry_continue_reverse(ventry, &vchunk->ventry_list, + list) { + err2 = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry, + vchunk->chunk); + if (err2) { + vchunk->vregion->failed_rollback = true; + goto err_rollback; + } + } + + mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2); + vchunk->chunk2 = NULL; + +err_rollback: + return err; +} + +static int +mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion) +{ + struct mlxsw_sp_acl_tcam_vchunk *vchunk; + int err; + + list_for_each_entry(vchunk, &vregion->vchunk_list, list) { + err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk, + vregion->region2, + false); + if (err) + goto rollback; + } + return 0; + +rollback: + list_for_each_entry_continue_reverse(vchunk, &vregion->vchunk_list, + list) { + mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk, + vregion->region, true); + } + return err; +} + +static int +mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion, + void *hints_priv) +{ + struct mlxsw_sp_acl_tcam_region *region2, *unused_region; + int err; + + trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion); + + region2 = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam, + vregion, hints_priv); + if (IS_ERR(region2)) + return PTR_ERR(region2); + + vregion->region2 = region2; + err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, region2); + if (err) + goto err_group_region_attach; + + err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion); + if (!vregion->failed_rollback) { + if (!err) { + /* In case of successful migration, region2 is used and + * the original is unused. + */ + unused_region = vregion->region; + vregion->region = vregion->region2; + } else { + /* In case of failure during migration, the original + * region is still used. + */ + unused_region = vregion->region2; + } + vregion->region2 = NULL; + mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region); + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region); + } + return err; + +err_group_region_attach: + vregion->region2 = NULL; + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region2); + return err; +} + +static int +mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + void *hints_priv; + int err; + + trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion); + if (vregion->failed_rollback) + return -EBUSY; + + hints_priv = ops->region_rehash_hints_get(vregion->region->priv); + if (IS_ERR(hints_priv)) { + err = PTR_ERR(hints_priv); + if (err != -EAGAIN) + dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n"); + return err; + } + + err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion, hints_priv); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n"); + if (vregion->failed_rollback) { + trace_mlxsw_sp_acl_tcam_vregion_rehash_dis(mlxsw_sp, + vregion); + dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n"); + } + } + + ops->region_rehash_hints_put(hints_priv); + return err; +} + static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_SRC_SYS_PORT, MLXSW_AFK_ELEMENT_DMAC_32_47, @@ -859,7 +1302,7 @@ struct mlxsw_sp_acl_tcam_flower_ruleset { }; struct mlxsw_sp_acl_tcam_flower_rule { - struct mlxsw_sp_acl_tcam_entry entry; + struct mlxsw_sp_acl_tcam_ventry ventry; }; static int @@ -917,12 +1360,6 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv) return mlxsw_sp_acl_tcam_group_id(&ruleset->group); } -static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp) -{ - return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) + - mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp); -} - static int mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, @@ -931,8 +1368,8 @@ mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; - return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group, - &rule->entry, rulei); + return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->group, + &rule->ventry, rulei); } static void @@ -940,7 +1377,7 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) { struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; - mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); + mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry); } static int @@ -957,8 +1394,8 @@ mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; - return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry, - activity); + return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry, + activity); } static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { @@ -968,7 +1405,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id, - .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size, + .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace, @@ -976,12 +1413,12 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { }; struct mlxsw_sp_acl_tcam_mr_ruleset { - struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_vchunk *vchunk; struct mlxsw_sp_acl_tcam_group group; }; struct mlxsw_sp_acl_tcam_mr_rule { - struct mlxsw_sp_acl_tcam_entry entry; + struct mlxsw_sp_acl_tcam_ventry ventry; }; static int @@ -1006,10 +1443,11 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp, * specific ACL Group ID which must exist in HW before multicast router * is initialized. */ - ruleset->chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, &ruleset->group, - 1, tmplt_elusage); - if (IS_ERR(ruleset->chunk)) { - err = PTR_ERR(ruleset->chunk); + ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, + &ruleset->group, 1, + tmplt_elusage); + if (IS_ERR(ruleset->vchunk)) { + err = PTR_ERR(ruleset->vchunk); goto err_chunk_get; } @@ -1025,7 +1463,7 @@ mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv) { struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; - mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, ruleset->chunk); + mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk); mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group); } @@ -1054,12 +1492,6 @@ mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv) return mlxsw_sp_acl_tcam_group_id(&ruleset->group); } -static size_t mlxsw_sp_acl_tcam_mr_rule_priv_size(struct mlxsw_sp *mlxsw_sp) -{ - return sizeof(struct mlxsw_sp_acl_tcam_mr_rule) + - mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp); -} - static int mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, @@ -1068,8 +1500,8 @@ mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; - return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group, - &rule->entry, rulei); + return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->group, + &rule->ventry, rulei); } static void @@ -1077,7 +1509,7 @@ mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) { struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; - mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); + mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry); } static int @@ -1087,8 +1519,8 @@ mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; - return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, &rule->entry, - rulei); + return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry, + rulei); } static int @@ -1097,8 +1529,8 @@ mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; - return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry, - activity); + return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry, + activity); } static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = { @@ -1108,7 +1540,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = { .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind, .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind, .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id, - .rule_priv_size = mlxsw_sp_acl_tcam_mr_rule_priv_size, + .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_rule), .rule_add = mlxsw_sp_acl_tcam_mr_rule_add, .rule_del = mlxsw_sp_acl_tcam_mr_rule_del, .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 0858d5b06353..96bd42a9fbc3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -17,6 +17,8 @@ struct mlxsw_sp_acl_tcam { unsigned long *used_groups; /* bit array */ unsigned int max_groups; unsigned int max_group_size; + struct list_head vregion_list; + u32 vregion_rehash_intrvl; /* ms */ unsigned long priv[0]; /* priv has to be always the last item */ }; @@ -26,6 +28,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam); void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam); +u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam); +int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + u32 val); int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, u32 *priority, bool fillup_priority); @@ -43,7 +50,7 @@ struct mlxsw_sp_acl_profile_ops { struct mlxsw_sp_port *mlxsw_sp_port, bool ingress); u16 (*ruleset_group_id)(void *ruleset_priv); - size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp); + size_t rule_priv_size; int (*rule_add)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, struct mlxsw_sp_acl_rule_info *rulei); @@ -67,11 +74,10 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, (MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN * BITS_PER_BYTE) struct mlxsw_sp_acl_tcam_group; +struct mlxsw_sp_acl_tcam_vregion; struct mlxsw_sp_acl_tcam_region { - struct list_head list; /* Member of a TCAM group */ - struct list_head chunk_list; /* List of chunks under this region */ - struct mlxsw_sp_acl_tcam_group *group; + struct mlxsw_sp_acl_tcam_vregion *vregion; enum mlxsw_reg_ptar_key_type key_type; u16 id; /* ACL ID and region ID - they are same */ char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; @@ -207,6 +213,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam, struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_tcam_region *region, + void *hints_priv, const struct mlxsw_sp_acl_ctcam_region_ops *ops); void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion, @@ -230,6 +237,9 @@ int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam); void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam); +void * +mlxsw_sp_acl_atcam_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion); +void mlxsw_sp_acl_atcam_rehash_hints_put(void *hints_priv); struct mlxsw_sp_acl_erp_delta; @@ -260,7 +270,11 @@ void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_erp_mask *erp_mask, struct mlxsw_sp_acl_atcam_entry *aentry); -int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion); +void * +mlxsw_sp_acl_erp_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion); +void mlxsw_sp_acl_erp_rehash_hints_put(void *hints_priv); +int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion, + void *hints_priv); void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 80066f437a65..9a79b5e11597 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -37,13 +37,19 @@ struct mlxsw_sp_sb_pm { struct mlxsw_cp_sb_occ occ; }; +struct mlxsw_sp_sb_mm { + u32 min_buff; + u32 max_buff; + u16 pool_index; +}; + struct mlxsw_sp_sb_pool_des { enum mlxsw_reg_sbxx_dir dir; u8 pool; }; /* Order ingress pools before egress pools. */ -static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = { +static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = { {MLXSW_REG_SBXX_DIR_INGRESS, 0}, {MLXSW_REG_SBXX_DIR_INGRESS, 1}, {MLXSW_REG_SBXX_DIR_INGRESS, 2}, @@ -55,7 +61,16 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = { {MLXSW_REG_SBXX_DIR_EGRESS, 15}, }; -#define MLXSW_SP_SB_POOL_DESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pool_dess) +static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = { + {MLXSW_REG_SBXX_DIR_INGRESS, 0}, + {MLXSW_REG_SBXX_DIR_INGRESS, 1}, + {MLXSW_REG_SBXX_DIR_INGRESS, 2}, + {MLXSW_REG_SBXX_DIR_INGRESS, 3}, + {MLXSW_REG_SBXX_DIR_EGRESS, 0}, + {MLXSW_REG_SBXX_DIR_EGRESS, 1}, + {MLXSW_REG_SBXX_DIR_EGRESS, 2}, + {MLXSW_REG_SBXX_DIR_EGRESS, 3}, +}; #define MLXSW_SP_SB_ING_TC_COUNT 8 #define MLXSW_SP_SB_EG_TC_COUNT 16 @@ -63,16 +78,32 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = { struct mlxsw_sp_sb_port { struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT]; struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT]; - struct mlxsw_sp_sb_pm pms[MLXSW_SP_SB_POOL_DESS_LEN]; + struct mlxsw_sp_sb_pm *pms; }; struct mlxsw_sp_sb { - struct mlxsw_sp_sb_pr prs[MLXSW_SP_SB_POOL_DESS_LEN]; + struct mlxsw_sp_sb_pr *prs; struct mlxsw_sp_sb_port *ports; u32 cell_size; + u32 max_headroom_cells; u64 sb_size; }; +struct mlxsw_sp_sb_vals { + unsigned int pool_count; + const struct mlxsw_sp_sb_pool_des *pool_dess; + const struct mlxsw_sp_sb_pm *pms; + const struct mlxsw_sp_sb_pr *prs; + const struct mlxsw_sp_sb_mm *mms; + const struct mlxsw_sp_sb_cm *cms_ingress; + const struct mlxsw_sp_sb_cm *cms_egress; + const struct mlxsw_sp_sb_cm *cms_cpu; + unsigned int mms_count; + unsigned int cms_ingress_count; + unsigned int cms_egress_count; + unsigned int cms_cpu_count; +}; + u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells) { return mlxsw_sp->sb->cell_size * cells; @@ -83,6 +114,11 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes) return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size); } +u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp) +{ + return mlxsw_sp->sb->max_headroom_cells; +} + static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, u16 pool_index) { @@ -121,7 +157,7 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index, u32 size, bool infi_size) { const struct mlxsw_sp_sb_pool_des *des = - &mlxsw_sp_sb_pool_dess[pool_index]; + &mlxsw_sp->sb_vals->pool_dess[pool_index]; char sbpr_pl[MLXSW_REG_SBPR_LEN]; struct mlxsw_sp_sb_pr *pr; int err; @@ -145,7 +181,7 @@ static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool infi_max, u16 pool_index) { const struct mlxsw_sp_sb_pool_des *des = - &mlxsw_sp_sb_pool_dess[pool_index]; + &mlxsw_sp->sb_vals->pool_dess[pool_index]; char sbcm_pl[MLXSW_REG_SBCM_LEN]; struct mlxsw_sp_sb_cm *cm; int err; @@ -174,7 +210,7 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, u16 pool_index, u32 min_buff, u32 max_buff) { const struct mlxsw_sp_sb_pool_des *des = - &mlxsw_sp_sb_pool_dess[pool_index]; + &mlxsw_sp->sb_vals->pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; struct mlxsw_sp_sb_pm *pm; int err; @@ -195,7 +231,7 @@ static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port, u16 pool_index, struct list_head *bulk_list) { const struct mlxsw_sp_sb_pool_des *des = - &mlxsw_sp_sb_pool_dess[pool_index]; + &mlxsw_sp->sb_vals->pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, @@ -217,7 +253,7 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, u16 pool_index, struct list_head *bulk_list) { const struct mlxsw_sp_sb_pool_des *des = - &mlxsw_sp_sb_pool_dess[pool_index]; + &mlxsw_sp->sb_vals->pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; struct mlxsw_sp_sb_pm *pm; @@ -230,24 +266,24 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, (unsigned long) pm); } -static const u16 mlxsw_sp_pbs[] = { - [0] = 2 * ETH_FRAME_LEN, - [9] = 2 * MLXSW_PORT_MAX_MTU, -}; - -#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) +/* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */ +#define MLXSW_SP_PB_HEADROOM 25632 #define MLXSW_SP_PB_UNUSED 8 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) { + const u32 pbs[] = { + [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width, + [9] = 2 * MLXSW_PORT_MAX_MTU, + }; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pbmc_pl[MLXSW_REG_PBMC_LEN]; int i; mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2); - for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { - u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]); + for (i = 0; i < ARRAY_SIZE(pbs); i++) { + u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]); if (i == MLXSW_SP_PB_UNUSED) continue; @@ -280,50 +316,119 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); } +static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_sb_port *sb_port) +{ + struct mlxsw_sp_sb_pm *pms; + + pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms), + GFP_KERNEL); + if (!pms) + return -ENOMEM; + sb_port->pms = pms; + return 0; +} + +static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port) +{ + kfree(sb_port->pms); +} + static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); + struct mlxsw_sp_sb_pr *prs; + int i; + int err; mlxsw_sp->sb->ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port), GFP_KERNEL); if (!mlxsw_sp->sb->ports) return -ENOMEM; + + prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs), + GFP_KERNEL); + if (!prs) { + err = -ENOMEM; + goto err_alloc_prs; + } + mlxsw_sp->sb->prs = prs; + + for (i = 0; i < max_ports; i++) { + err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]); + if (err) + goto err_sb_port_init; + } + return 0; + +err_sb_port_init: + for (i--; i >= 0; i--) + mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]); + kfree(mlxsw_sp->sb->prs); +err_alloc_prs: + kfree(mlxsw_sp->sb->ports); + return err; } static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp) { + int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); + int i; + + for (i = max_ports - 1; i >= 0; i--) + mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]); + kfree(mlxsw_sp->sb->prs); kfree(mlxsw_sp->sb->ports); } -#define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000 -#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000) -#define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000 - #define MLXSW_SP_SB_PR(_mode, _size) \ { \ .mode = _mode, \ .size = _size, \ } -static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs[] = { +#define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000 +#define MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE (200 * 1000) +#define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000 + +static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = { /* Ingress pools. */ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_SB_PR_INGRESS_SIZE), + MLXSW_SP1_SB_PR_INGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_SB_PR_INGRESS_MNG_SIZE), + MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE), /* Egress pools. */ - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP1_SB_PR_EGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), }; -#define MLXSW_SP_SB_PRS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs) +#define MLXSW_SP2_SB_PR_INGRESS_SIZE 40960000 +#define MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE (200 * 1000) +#define MLXSW_SP2_SB_PR_EGRESS_SIZE 40960000 + +static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = { + /* Ingress pools. */ + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP2_SB_PR_INGRESS_SIZE), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE), + /* Egress pools. */ + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP2_SB_PR_EGRESS_SIZE), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), +}; static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_sb_pr *prs, @@ -357,7 +462,7 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, .pool_index = _pool, \ } -static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { +static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = { MLXSW_SP_SB_CM(10000, 8, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), @@ -370,9 +475,20 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { MLXSW_SP_SB_CM(20000, 1, 3), }; -#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress) +static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = { + MLXSW_SP_SB_CM(0, 7, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ + MLXSW_SP_SB_CM(20000, 1, 3), +}; -static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { +static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = { MLXSW_SP_SB_CM(1500, 9, 4), MLXSW_SP_SB_CM(1500, 9, 4), MLXSW_SP_SB_CM(1500, 9, 4), @@ -392,7 +508,25 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { MLXSW_SP_SB_CM(1, 0xff, 4), }; -#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress) +static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = { + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(1, 0xff, 4), +}; #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4) @@ -431,9 +565,6 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, }; -#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ - ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) - static bool mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index) { @@ -447,6 +578,7 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, const struct mlxsw_sp_sb_cm *cms, size_t cms_len) { + const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals; int i; int err; @@ -458,7 +590,7 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS) continue; /* PG number 8 does not exist, skip it */ cm = &cms[i]; - if (WARN_ON(mlxsw_sp_sb_pool_dess[cm->pool_index].dir != dir)) + if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir)) continue; min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff); @@ -484,27 +616,28 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; int err; - err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, + err = __mlxsw_sp_sb_cms_init(mlxsw_sp, mlxsw_sp_port->local_port, MLXSW_REG_SBXX_DIR_INGRESS, - mlxsw_sp_sb_cms_ingress, - MLXSW_SP_SB_CMS_INGRESS_LEN); + mlxsw_sp->sb_vals->cms_ingress, + mlxsw_sp->sb_vals->cms_ingress_count); if (err) return err; return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_port->local_port, MLXSW_REG_SBXX_DIR_EGRESS, - mlxsw_sp_sb_cms_egress, - MLXSW_SP_SB_CMS_EGRESS_LEN); + mlxsw_sp->sb_vals->cms_egress, + mlxsw_sp->sb_vals->cms_egress_count); } static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) { return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS, - mlxsw_sp_cpu_port_sb_cms, - MLXSW_SP_CPU_PORT_SB_MCS_LEN); + mlxsw_sp->sb_vals->cms_cpu, + mlxsw_sp->sb_vals->cms_cpu_count); } #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \ @@ -513,7 +646,7 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) .max_buff = _max_buff, \ } -static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { +static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = { /* Ingress pools. */ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), @@ -527,7 +660,18 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { MLXSW_SP_SB_PM(10000, 90000), }; -#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) +static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = { + /* Ingress pools. */ + MLXSW_SP_SB_PM(0, 7), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), + /* Egress pools. */ + MLXSW_SP_SB_PM(0, 7), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), +}; static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) { @@ -535,8 +679,8 @@ static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) int i; int err; - for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { - const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp_sb_pms[i]; + for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { + const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp->sb_vals->pms[i]; u32 max_buff; u32 min_buff; @@ -552,12 +696,6 @@ static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } -struct mlxsw_sp_sb_mm { - u32 min_buff; - u32 max_buff; - u16 pool_index; -}; - #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \ { \ .min_buff = _min_buff, \ @@ -583,21 +721,19 @@ static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { MLXSW_SP_SB_MM(0, 6, 4), }; -#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) - static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) { char sbmm_pl[MLXSW_REG_SBMM_LEN]; int i; int err; - for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) { + for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) { const struct mlxsw_sp_sb_pool_des *des; const struct mlxsw_sp_sb_mm *mc; u32 min_buff; - mc = &mlxsw_sp_sb_mms[i]; - des = &mlxsw_sp_sb_pool_dess[mc->pool_index]; + mc = &mlxsw_sp->sb_vals->mms[i]; + des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index]; /* All pools used by sb_mm's are initialized using dynamic * thresholds, therefore 'max_buff' isn't specified in cells. */ @@ -611,22 +747,55 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) return 0; } -static void mlxsw_sp_pool_count(u16 *p_ingress_len, u16 *p_egress_len) +static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp, + u16 *p_ingress_len, u16 *p_egress_len) { int i; - for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; ++i) - if (mlxsw_sp_sb_pool_dess[i].dir == MLXSW_REG_SBXX_DIR_EGRESS) + for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) + if (mlxsw_sp->sb_vals->pool_dess[i].dir == + MLXSW_REG_SBXX_DIR_EGRESS) goto out; WARN(1, "No egress pools\n"); out: *p_ingress_len = i; - *p_egress_len = MLXSW_SP_SB_POOL_DESS_LEN - i; + *p_egress_len = mlxsw_sp->sb_vals->pool_count - i; } +const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = { + .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess), + .pool_dess = mlxsw_sp1_sb_pool_dess, + .pms = mlxsw_sp1_sb_pms, + .prs = mlxsw_sp1_sb_prs, + .mms = mlxsw_sp_sb_mms, + .cms_ingress = mlxsw_sp1_sb_cms_ingress, + .cms_egress = mlxsw_sp1_sb_cms_egress, + .cms_cpu = mlxsw_sp_cpu_port_sb_cms, + .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms), + .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress), + .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress), + .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms), +}; + +const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = { + .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess), + .pool_dess = mlxsw_sp2_sb_pool_dess, + .pms = mlxsw_sp2_sb_pms, + .prs = mlxsw_sp2_sb_prs, + .mms = mlxsw_sp_sb_mms, + .cms_ingress = mlxsw_sp2_sb_cms_ingress, + .cms_egress = mlxsw_sp2_sb_cms_egress, + .cms_cpu = mlxsw_sp_cpu_port_sb_cms, + .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms), + .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress), + .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress), + .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms), +}; + int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) { + u32 max_headroom_size; u16 ing_pool_count; u16 eg_pool_count; int err; @@ -637,18 +806,26 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE)) return -EIO; + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE)) + return -EIO; + mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL); if (!mlxsw_sp->sb) return -ENOMEM; mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE); mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE); + max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_HEADROOM_SIZE); + /* Round down, because this limit must not be overstepped. */ + mlxsw_sp->sb->max_headroom_cells = max_headroom_size / + mlxsw_sp->sb->cell_size; err = mlxsw_sp_sb_ports_init(mlxsw_sp); if (err) goto err_sb_ports_init; - err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp_sb_prs, - MLXSW_SP_SB_PRS_LEN); + err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs, + mlxsw_sp->sb_vals->pool_count); if (err) goto err_sb_prs_init; err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); @@ -657,7 +834,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) err = mlxsw_sp_sb_mms_init(mlxsw_sp); if (err) goto err_sb_mms_init; - mlxsw_sp_pool_count(&ing_pool_count, &eg_pool_count); + mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count); err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, mlxsw_sp->sb->sb_size, ing_pool_count, @@ -705,10 +882,11 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info) { - enum mlxsw_reg_sbxx_dir dir = mlxsw_sp_sb_pool_dess[pool_index].dir; struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + enum mlxsw_reg_sbxx_dir dir; struct mlxsw_sp_sb_pr *pr; + dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir; pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); pool_info->pool_type = (enum devlink_sb_pool_type) dir; pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size); @@ -834,7 +1012,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, u32 max_buff; int err; - if (dir != mlxsw_sp_sb_pool_dess[pool_index].dir) + if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) return -EINVAL; err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, @@ -932,7 +1110,7 @@ next_batch: continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); - for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) { + for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, &bulk_list); if (err) @@ -991,7 +1169,7 @@ next_batch: continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); - for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) { + for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, &bulk_list); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index ff072358d950..15f804453cd6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -17,13 +17,13 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_acl_rule_info *rulei, - struct tcf_exts *exts, + struct flow_action *flow_action, struct netlink_ext_ack *extack) { - const struct tc_action *a; + const struct flow_action_entry *act; int err, i; - if (!tcf_exts_has_actions(exts)) + if (!flow_action_has_entries(flow_action)) return 0; /* Count action is inserted first */ @@ -31,27 +31,31 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, if (err) return err; - tcf_exts_for_each_action(i, a, exts) { - if (is_tcf_gact_ok(a)) { + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_ACCEPT: err = mlxsw_sp_acl_rulei_act_terminate(rulei); if (err) { NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); return err; } - } else if (is_tcf_gact_shot(a)) { + break; + case FLOW_ACTION_DROP: err = mlxsw_sp_acl_rulei_act_drop(rulei); if (err) { NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); return err; } - } else if (is_tcf_gact_trap(a)) { + break; + case FLOW_ACTION_TRAP: err = mlxsw_sp_acl_rulei_act_trap(rulei); if (err) { NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); return err; } - } else if (is_tcf_gact_goto_chain(a)) { - u32 chain_index = tcf_gact_goto_chain_index(a); + break; + case FLOW_ACTION_GOTO: { + u32 chain_index = act->chain_index; struct mlxsw_sp_acl_ruleset *ruleset; u16 group_id; @@ -67,7 +71,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); return err; } - } else if (is_tcf_mirred_egress_redirect(a)) { + } + break; + case FLOW_ACTION_REDIRECT: { struct net_device *out_dev; struct mlxsw_sp_fid *fid; u16 fid_index; @@ -79,29 +85,33 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, if (err) return err; - out_dev = tcf_mirred_dev(a); + out_dev = act->dev; err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, out_dev, extack); if (err) return err; - } else if (is_tcf_mirred_egress_mirror(a)) { - struct net_device *out_dev = tcf_mirred_dev(a); + } + break; + case FLOW_ACTION_MIRRED: { + struct net_device *out_dev = act->dev; err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, block, out_dev, extack); if (err) return err; - } else if (is_tcf_vlan(a)) { - u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); - u32 action = tcf_vlan_action(a); - u8 prio = tcf_vlan_push_prio(a); - u16 vid = tcf_vlan_push_vid(a); + } + break; + case FLOW_ACTION_VLAN_MANGLE: { + u16 proto = be16_to_cpu(act->vlan.proto); + u8 prio = act->vlan.prio; + u16 vid = act->vlan.vid; return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, - action, vid, + act->id, vid, proto, prio, extack); - } else { + } + default: NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); return -EOPNOTSUPP; @@ -113,59 +123,49 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(f->rule, &match); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, - (char *) &key->src, - (char *) &mask->src, 4); + (char *) &match.key->src, + (char *) &match.mask->src, 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, - (char *) &key->dst, - (char *) &mask->dst, 4); + (char *) &match.key->dst, + (char *) &match.mask->dst, 4); } static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(f->rule, &match); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, - &key->src.s6_addr[0x0], - &mask->src.s6_addr[0x0], 4); + &match.key->src.s6_addr[0x0], + &match.mask->src.s6_addr[0x0], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, - &key->src.s6_addr[0x4], - &mask->src.s6_addr[0x4], 4); + &match.key->src.s6_addr[0x4], + &match.mask->src.s6_addr[0x4], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, - &key->src.s6_addr[0x8], - &mask->src.s6_addr[0x8], 4); + &match.key->src.s6_addr[0x8], + &match.mask->src.s6_addr[0x8], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, - &key->src.s6_addr[0xC], - &mask->src.s6_addr[0xC], 4); + &match.key->src.s6_addr[0xC], + &match.mask->src.s6_addr[0xC], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, - &key->dst.s6_addr[0x0], - &mask->dst.s6_addr[0x0], 4); + &match.key->dst.s6_addr[0x0], + &match.mask->dst.s6_addr[0x0], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, - &key->dst.s6_addr[0x4], - &mask->dst.s6_addr[0x4], 4); + &match.key->dst.s6_addr[0x4], + &match.mask->dst.s6_addr[0x4], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, - &key->dst.s6_addr[0x8], - &mask->dst.s6_addr[0x8], 4); + &match.key->dst.s6_addr[0x8], + &match.mask->dst.s6_addr[0x8], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, - &key->dst.s6_addr[0xC], - &mask->dst.s6_addr[0xC], 4); + &match.key->dst.s6_addr[0xC], + &match.mask->dst.s6_addr[0xC], 4); } static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, @@ -173,9 +173,10 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, struct tc_cls_flower_offload *f, u8 ip_proto) { - struct flow_dissector_key_ports *key, *mask; + const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_match_ports match; - if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) return 0; if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { @@ -184,16 +185,13 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, return -EINVAL; } - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); + flow_rule_match_ports(rule, &match); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, - ntohs(key->dst), ntohs(mask->dst)); + ntohs(match.key->dst), + ntohs(match.mask->dst)); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, - ntohs(key->src), ntohs(mask->src)); + ntohs(match.key->src), + ntohs(match.mask->src)); return 0; } @@ -202,9 +200,10 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, struct tc_cls_flower_offload *f, u8 ip_proto) { - struct flow_dissector_key_tcp *key, *mask; + const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_match_tcp match; - if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) return 0; if (ip_proto != IPPROTO_TCP) { @@ -213,14 +212,11 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, return -EINVAL; } - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_TCP, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_TCP, - f->mask); + flow_rule_match_tcp(rule, &match); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, - ntohs(key->flags), ntohs(mask->flags)); + ntohs(match.key->flags), + ntohs(match.mask->flags)); return 0; } @@ -229,9 +225,10 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, struct tc_cls_flower_offload *f, u16 n_proto) { - struct flow_dissector_key_ip *key, *mask; + const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_match_ip match; - if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) return 0; if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { @@ -240,20 +237,18 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, return -EINVAL; } - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IP, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IP, - f->mask); + flow_rule_match_ip(rule, &match); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, - key->ttl, mask->ttl); + match.key->ttl, match.mask->ttl); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, - key->tos & 0x3, mask->tos & 0x3); + match.key->tos & 0x3, + match.mask->tos & 0x3); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, - key->tos >> 6, mask->tos >> 6); + match.key->tos >> 6, + match.mask->tos >> 6); return 0; } @@ -263,13 +258,15 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0; u16 n_proto_key = 0; u16 addr_type = 0; u8 ip_proto = 0; int err; - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | @@ -286,25 +283,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->key); - addr_type = key->addr_type; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); - n_proto_key = ntohs(key->n_proto); - n_proto_mask = ntohs(mask->n_proto); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + n_proto_key = ntohs(match.key->n_proto); + n_proto_mask = ntohs(match.mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; @@ -314,60 +305,53 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, MLXSW_AFK_ELEMENT_ETHERTYPE, n_proto_key, n_proto_mask); - ip_proto = key->ip_proto; + ip_proto = match.key->ip_proto; mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_PROTO, - key->ip_proto, mask->ip_proto); + match.key->ip_proto, + match.mask->ip_proto); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->key); - struct flow_dissector_key_eth_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + flow_rule_match_eth_addrs(rule, &match); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DMAC_32_47, - key->dst, mask->dst, 2); + match.key->dst, + match.mask->dst, 2); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DMAC_0_31, - key->dst + 2, mask->dst + 2, 4); + match.key->dst + 2, + match.mask->dst + 2, 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SMAC_32_47, - key->src, mask->src, 2); + match.key->src, + match.mask->src, 2); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SMAC_0_31, - key->src + 2, mask->src + 2, 4); + match.key->src + 2, + match.mask->src + 2, 4); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->key); - struct flow_dissector_key_vlan *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + flow_rule_match_vlan(rule, &match); if (mlxsw_sp_acl_block_is_egress_bound(block)) { NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); return -EOPNOTSUPP; } - if (mask->vlan_id != 0) + if (match.mask->vlan_id != 0) mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VID, - key->vlan_id, - mask->vlan_id); - if (mask->vlan_priority != 0) + match.key->vlan_id, + match.mask->vlan_id); + if (match.mask->vlan_priority != 0) mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_PCP, - key->vlan_priority, - mask->vlan_priority); + match.key->vlan_priority, + match.mask->vlan_priority); } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) @@ -387,7 +371,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, if (err) return err; - return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts, + return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, + &f->rule->action, f->common.extack); } @@ -486,7 +471,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rule_get_stats; - tcf_exts_stats_update(f->exts, bytes, packets, lastuse); + flow_stats_update(&f->stats, bytes, packets, lastuse); mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 230e1f6e192b..52fed8c7bf1e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -364,6 +364,7 @@ enum mlxsw_sp_fib_entry_type { MLXSW_SP_FIB_ENTRY_TYPE_REMOTE, MLXSW_SP_FIB_ENTRY_TYPE_LOCAL, MLXSW_SP_FIB_ENTRY_TYPE_TRAP, + MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE, /* This is a special case of local delivery, where a packet should be * decapsulated on reception. Note that there is no corresponding ENCAP, @@ -3813,13 +3814,11 @@ mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) struct mlxsw_sp_nexthop_group *nh_grp; struct mlxsw_sp_nexthop *nh; struct fib_nh *fib_nh; - size_t alloc_size; int i; int err; - alloc_size = sizeof(*nh_grp) + - fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop); - nh_grp = kzalloc(alloc_size, GFP_KERNEL); + nh_grp = kzalloc(struct_size(nh_grp, nexthops, fi->fib_nhs), + GFP_KERNEL); if (!nh_grp) return ERR_PTR(-ENOMEM); nh_grp->priv = fi; @@ -3928,6 +3927,7 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) return !!nh_group->adj_index_valid; case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: return !!nh_group->nh_rif; + case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE: case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP: return true; @@ -3963,6 +3963,7 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) int i; if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL || + fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE || fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP || fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) { nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD; @@ -4004,7 +4005,8 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, common); - if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) { + if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL || + fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) { list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD; return; @@ -4172,6 +4174,19 @@ static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } +static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + enum mlxsw_reg_ralue_trap_action trap_action; + char ralue_pl[MLXSW_REG_RALUE_LEN]; + + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR; + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); + mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); +} + static int mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, @@ -4211,6 +4226,8 @@ static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_TRAP: return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op); + case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE: + return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, fib_entry, op); @@ -4279,8 +4296,10 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, case RTN_BROADCAST: fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; return 0; + case RTN_BLACKHOLE: + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; + return 0; case RTN_UNREACHABLE: /* fall through */ - case RTN_BLACKHOLE: /* fall through */ case RTN_PROHIBIT: /* Packets hitting these routes need to be trapped, but * can do so with a lower priority than packets directed @@ -5045,13 +5064,11 @@ mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp; struct mlxsw_sp_rt6 *mlxsw_sp_rt6; struct mlxsw_sp_nexthop *nh; - size_t alloc_size; int i = 0; int err; - alloc_size = sizeof(*nh_grp) + - fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop); - nh_grp = kzalloc(alloc_size, GFP_KERNEL); + nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6), + GFP_KERNEL); if (!nh_grp) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&nh_grp->fib_list); @@ -5229,6 +5246,8 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, */ if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + else if (rt->fib6_type == RTN_BLACKHOLE) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; else if (rt->fib6_flags & RTF_REJECT) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt)) @@ -6123,7 +6142,7 @@ static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) mlxsw_reg_ritr_rif_pack(ritr_pl, rif); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); - if (WARN_ON_ONCE(err)) + if (err) return err; mlxsw_reg_ritr_enable_set(ritr_pl, false); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index a4a9fe992193..766f5b5f1cf5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -431,46 +431,6 @@ static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan) mlxsw_sp_bridge_vlan_destroy(bridge_vlan); } -static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge, - struct net_device *dev, - unsigned long *brport_flags) -{ - struct mlxsw_sp_bridge_port *bridge_port; - - bridge_port = mlxsw_sp_bridge_port_find(bridge, dev); - if (WARN_ON(!bridge_port)) - return; - - memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags)); -} - -static int mlxsw_sp_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); - memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, - attr->u.ppid.id_len); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev, - &attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: - attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD | - BR_MCAST_FLOOD; - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - static int mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_vlan *bridge_vlan, @@ -620,6 +580,17 @@ err_port_bridge_vlan_learning_set: return err; } +static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port + *mlxsw_sp_port, + struct switchdev_trans *trans, + unsigned long brport_flags) +{ + if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) + return -EINVAL; + + return 0; +} + static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, struct net_device *orig_dev, @@ -866,6 +837,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, attr->orig_dev, attr->u.stp_state); break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port, + trans, + attr->u.brport_flags); + break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, attr->orig_dev, @@ -1963,7 +1939,6 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, } static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { - .switchdev_port_attr_get = mlxsw_sp_port_attr_get, .switchdev_port_attr_set = mlxsw_sp_port_attr_set, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 2d4f213e154d..533fe6235b7c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -11,7 +11,6 @@ #include <linux/device.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> -#include <net/switchdev.h> #include "pci.h" #include "core.h" @@ -390,6 +389,18 @@ static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name, name, len); } +static int mlxsw_sx_port_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + + ppid->id_len = sizeof(mlxsw_sx->hw_id); + memcpy(&ppid->id, &mlxsw_sx->hw_id, ppid->id_len); + + return 0; +} + static const struct net_device_ops mlxsw_sx_port_netdev_ops = { .ndo_open = mlxsw_sx_port_open, .ndo_stop = mlxsw_sx_port_stop, @@ -397,6 +408,7 @@ static const struct net_device_ops mlxsw_sx_port_netdev_ops = { .ndo_change_mtu = mlxsw_sx_port_change_mtu, .ndo_get_stats64 = mlxsw_sx_port_get_stats64, .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name, + .ndo_get_port_parent_id = mlxsw_sx_port_get_port_parent_id, }; static void mlxsw_sx_port_get_drvinfo(struct net_device *dev, @@ -901,28 +913,6 @@ static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = { .set_link_ksettings = mlxsw_sx_port_set_link_ksettings, }; -static int mlxsw_sx_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); - struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id); - memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len); - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = { - .switchdev_port_attr_get = mlxsw_sx_port_attr_get, -}; - static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx) { char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; @@ -1034,7 +1024,6 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, dev->netdev_ops = &mlxsw_sx_port_netdev_ops; dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops; - dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops; err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port); if (err) { diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index b881f5d4a7f9..6006d47707cb 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -391,7 +391,7 @@ ks8695_tx_irq(int irq, void *dev_id) ksp->tx_buffers[buff_n].dma_ptr, ksp->tx_buffers[buff_n].length, DMA_TO_DEVICE); - dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb); + dev_consume_skb_irq(ksp->tx_buffers[buff_n].skb); ksp->tx_buffers[buff_n].skb = NULL; ksp->tx_ring_used--; } diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index b34055ac476f..e1651756bf9d 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -81,11 +81,13 @@ static void moxart_mac_free_memory(struct net_device *ndev) priv->rx_buf_size, DMA_FROM_DEVICE); if (priv->tx_desc_base) - dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, + dma_free_coherent(&priv->pdev->dev, + TX_REG_DESC_SIZE * TX_DESC_NUM, priv->tx_desc_base, priv->tx_base); if (priv->rx_desc_base) - dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM, + dma_free_coherent(&priv->pdev->dev, + RX_REG_DESC_SIZE * RX_DESC_NUM, priv->rx_desc_base, priv->rx_base); kfree(priv->tx_buf_base); @@ -298,7 +300,7 @@ static void moxart_tx_finished(struct net_device *ndev) ndev->stats.tx_packets++; ndev->stats.tx_bytes += priv->tx_skb[tx_tail]->len; - dev_kfree_skb_irq(priv->tx_skb[tx_tail]); + dev_consume_skb_irq(priv->tx_skb[tx_tail]); priv->tx_skb[tx_tail] = NULL; tx_tail = TX_NEXT(tx_tail); @@ -476,6 +478,7 @@ static int moxart_mac_probe(struct platform_device *pdev) priv = netdev_priv(ndev); priv->ndev = ndev; + priv->pdev = pdev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ndev->base_addr = res->start; @@ -491,7 +494,7 @@ static int moxart_mac_probe(struct platform_device *pdev) priv->tx_buf_size = TX_BUF_SIZE; priv->rx_buf_size = RX_BUF_SIZE; - priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * + priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE * TX_DESC_NUM, &priv->tx_base, GFP_DMA | GFP_KERNEL); if (!priv->tx_desc_base) { @@ -499,7 +502,7 @@ static int moxart_mac_probe(struct platform_device *pdev) goto init_fail; } - priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * + priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE * RX_DESC_NUM, &priv->rx_base, GFP_DMA | GFP_KERNEL); if (!priv->rx_desc_base) { diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h index bee608b547d1..bf4c3029cd0c 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.h +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -292,6 +292,7 @@ #define LINK_STATUS 0x4 struct moxart_mac_priv_t { + struct platform_device *pdev; void __iomem *base; unsigned int reg_maccr; unsigned int reg_imr; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index c6a575eb0ff5..195306d05bcd 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -916,6 +916,18 @@ static int ocelot_set_features(struct net_device *dev, return 0; } +static int ocelot_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct ocelot_port *ocelot_port = netdev_priv(dev); + struct ocelot *ocelot = ocelot_port->ocelot; + + ppid->id_len = sizeof(ocelot->base_mac); + memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len); + + return 0; +} + static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_open = ocelot_port_open, .ndo_stop = ocelot_port_stop, @@ -930,6 +942,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, .ndo_set_features = ocelot_set_features, + .ndo_get_port_parent_id = ocelot_get_port_parent_id, }; static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) @@ -1013,25 +1026,6 @@ static const struct ethtool_ops ocelot_ethtool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; -static int ocelot_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - struct ocelot_port *ocelot_port = netdev_priv(dev); - struct ocelot *ocelot = ocelot_port->ocelot; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(ocelot->base_mac); - memcpy(&attr->u.ppid.id, &ocelot->base_mac, - attr->u.ppid.id_len); - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port, struct switchdev_trans *trans, u8 state) @@ -1331,7 +1325,6 @@ static int ocelot_port_obj_del(struct net_device *dev, } static const struct switchdev_ops ocelot_port_switchdev_ops = { - .switchdev_port_attr_get = ocelot_port_attr_get, .switchdev_port_attr_set = ocelot_port_attr_set, }; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 19ce0e605096..e0340f778d8f 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1408,7 +1408,7 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) if (skb) { ss->stats.tx_bytes += skb->len; ss->stats.tx_packets++; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); if (len) pci_unmap_single(pdev, dma_unmap_addr(&tx->info[idx], diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index b9a1a9f999ea..1a2634cbbb69 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -2173,7 +2173,7 @@ static void netdev_tx_done(struct net_device *dev) np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); /* Free the original skb. */ - dev_kfree_skb_irq(np->tx_skbuff[entry]); + dev_consume_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; } if (netif_queue_stopped(dev) && diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 958fced4dacf..9098ee7fe0d1 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1003,7 +1003,7 @@ static void do_tx_done(struct net_device *ndev) addr, len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); atomic_dec(&dev->nr_tx_skbs); } else pci_unmap_page(dev->pci_dev, @@ -1869,56 +1869,28 @@ static unsigned ns83820_mii_write_reg(struct ns83820 *dev, unsigned phy, unsigne static void ns83820_probe_phy(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); - static int first; - int i; -#define MII_PHYIDR1 0x02 -#define MII_PHYIDR2 0x03 - -#if 0 - if (!first) { - unsigned tmp; - ns83820_mii_read_reg(dev, 1, 0x09); - ns83820_mii_write_reg(dev, 1, 0x10, 0x0d3e); - - tmp = ns83820_mii_read_reg(dev, 1, 0x00); - ns83820_mii_write_reg(dev, 1, 0x00, tmp | 0x8000); - udelay(1300); - ns83820_mii_read_reg(dev, 1, 0x09); - } -#endif - first = 1; - - for (i=1; i<2; i++) { - int j; - unsigned a, b; - a = ns83820_mii_read_reg(dev, i, MII_PHYIDR1); - b = ns83820_mii_read_reg(dev, i, MII_PHYIDR2); - - //printk("%s: phy %d: 0x%04x 0x%04x\n", - // ndev->name, i, a, b); - - for (j=0; j<0x16; j+=4) { - dprintk("%s: [0x%02x] %04x %04x %04x %04x\n", - ndev->name, j, - ns83820_mii_read_reg(dev, i, 0 + j), - ns83820_mii_read_reg(dev, i, 1 + j), - ns83820_mii_read_reg(dev, i, 2 + j), - ns83820_mii_read_reg(dev, i, 3 + j) - ); - } - } - { - unsigned a, b; - /* read firmware version: memory addr is 0x8402 and 0x8403 */ - ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); - ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); - a = ns83820_mii_read_reg(dev, 1, 0x1d); - - ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); - ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); - b = ns83820_mii_read_reg(dev, 1, 0x1d); - dprintk("version: 0x%04x 0x%04x\n", a, b); + int j; + unsigned a, b; + + for (j = 0; j < 0x16; j += 4) { + dprintk("%s: [0x%02x] %04x %04x %04x %04x\n", + ndev->name, j, + ns83820_mii_read_reg(dev, 1, 0 + j), + ns83820_mii_read_reg(dev, 1, 1 + j), + ns83820_mii_read_reg(dev, 1, 2 + j), + ns83820_mii_read_reg(dev, 1, 3 + j) + ); } + + /* read firmware version: memory addr is 0x8402 and 0x8403 */ + ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); + ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); + a = ns83820_mii_read_reg(dev, 1, 0x1d); + + ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); + ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); + b = ns83820_mii_read_reg(dev, 1, 0x1d); + dprintk("version: 0x%04x 0x%04x\n", a, b); } #endif diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index c805dcbebd02..aaec00912ea0 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -328,7 +328,7 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) } /* We must free the original skb */ - dev_kfree_skb_irq(lp->tx_skb[entry]); + dev_consume_skb_irq(lp->tx_skb[entry]); lp->tx_skb[entry] = NULL; /* and unmap DMA buffer */ dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 82be90075695..feda9644289d 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -3055,7 +3055,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data) /* Updating the statistics block */ swstats->mem_freed += skb->truesize; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); get_info.offset++; if (get_info.offset == get_info.fifo_len + 1) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 0da7393b2ef3..b877acec5cde 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -114,7 +114,7 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) /* free SKBs */ for (temp = completed; temp != skb_ptr; temp++) - dev_kfree_skb_irq(*temp); + dev_consume_skb_irq(*temp); } while (more); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index dccae0319204..275de9f4c61c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -465,7 +465,7 @@ static int nfp_bpf_init(struct nfp_app *app) app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf); } - bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops); + bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops, bpf); err = PTR_ERR_OR_ZERO(bpf->bpf_dev); if (err) goto err_free_neutral_maps; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 55c7dbf8b421..15dce97650a5 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -185,8 +185,6 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog) static int nfp_bpf_verifier_prep(struct bpf_prog *prog) { - struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev); - struct nfp_app *app = nn->app; struct nfp_prog *nfp_prog; int ret; @@ -197,7 +195,7 @@ static int nfp_bpf_verifier_prep(struct bpf_prog *prog) INIT_LIST_HEAD(&nfp_prog->insns); nfp_prog->type = prog->type; - nfp_prog->bpf = app->priv; + nfp_prog->bpf = bpf_offload_dev_priv(prog->aux->offload->offdev); ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len); if (ret) diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 8d54b36afee8..eeda4ed98333 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -3,7 +3,6 @@ #include <linux/bitfield.h> #include <net/pkt_cls.h> -#include <net/switchdev.h> #include <net/tc_act/tc_csum.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> @@ -37,7 +36,7 @@ static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) static void nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, - const struct tc_action *action) + const struct flow_action_entry *act) { size_t act_size = sizeof(struct nfp_fl_push_vlan); u16 tmp_push_vlan_tci; @@ -45,17 +44,17 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN; push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; push_vlan->reserved = 0; - push_vlan->vlan_tpid = tcf_vlan_push_proto(action); + push_vlan->vlan_tpid = act->vlan.proto; tmp_push_vlan_tci = - FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) | - FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) | + FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | + FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) | NFP_FL_PUSH_VLAN_CFI; push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); } static int -nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, +nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act, struct nfp_fl_payload *nfp_flow, int act_len) { size_t act_size = sizeof(struct nfp_fl_pre_lag); @@ -63,7 +62,7 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, struct net_device *out_dev; int err; - out_dev = tcf_mirred_dev(action); + out_dev = act->dev; if (!out_dev || !netif_is_lag_master(out_dev)) return 0; @@ -92,7 +91,8 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, static int nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, - const struct tc_action *action, struct nfp_fl_payload *nfp_flow, + const struct flow_action_entry *act, + struct nfp_fl_payload *nfp_flow, bool last, struct net_device *in_dev, enum nfp_flower_tun_type tun_type, int *tun_out_cnt) { @@ -104,7 +104,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT; output->head.len_lw = act_size >> NFP_FL_LW_SIZ; - out_dev = tcf_mirred_dev(action); + out_dev = act->dev; if (!out_dev) return -EOPNOTSUPP; @@ -137,7 +137,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, if (nfp_netdev_is_nfp_repr(in_dev)) { /* Confirm ingress and egress are on same device. */ - if (!switchdev_port_same_parent_id(in_dev, out_dev)) + if (!netdev_port_same_parent_id(in_dev, out_dev)) return -EOPNOTSUPP; } @@ -155,9 +155,9 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, static enum nfp_flower_tun_type nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app, - const struct tc_action *action) + const struct flow_action_entry *act) { - struct ip_tunnel_info *tun = tcf_tunnel_info(action); + const struct ip_tunnel_info *tun = act->tunnel; struct nfp_flower_priv *priv = app->priv; switch (tun->key.tp_dst) { @@ -195,9 +195,9 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) static int nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len, - const struct tc_action *action) + const struct flow_action_entry *act) { - struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); + struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel; int opt_len, opt_cnt, act_start, tot_push_len; u8 *src = ip_tunnel_info_opts(ip_tun); @@ -259,13 +259,13 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len, static int nfp_fl_set_ipv4_udp_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_udp_tun *set_tun, - const struct tc_action *action, + const struct flow_action_entry *act, struct nfp_fl_pre_tunnel *pre_tun, enum nfp_flower_tun_type tun_type, struct net_device *netdev) { size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); - struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); + const struct ip_tunnel_info *ip_tun = act->tunnel; struct nfp_flower_priv *priv = app->priv; u32 tmp_set_ip_tun_type_index = 0; /* Currently support one pre-tunnel so index is always 0. */ @@ -345,7 +345,7 @@ static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask) } static int -nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off, +nfp_fl_set_eth(const struct flow_action_entry *act, u32 off, struct nfp_fl_set_eth *set_eth) { u32 exact, mask; @@ -353,8 +353,8 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off, if (off + 4 > ETH_ALEN * 2) return -EOPNOTSUPP; - mask = ~tcf_pedit_mask(action, idx); - exact = tcf_pedit_val(action, idx); + mask = ~act->mangle.mask; + exact = act->mangle.val; if (exact & ~mask) return -EOPNOTSUPP; @@ -376,7 +376,7 @@ struct ipv4_ttl_word { }; static int -nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, +nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off, struct nfp_fl_set_ip4_addrs *set_ip_addr, struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos) { @@ -387,8 +387,8 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, __be32 exact, mask; /* We are expecting tcf_pedit to return a big endian value */ - mask = (__force __be32)~tcf_pedit_mask(action, idx); - exact = (__force __be32)tcf_pedit_val(action, idx); + mask = (__force __be32)~act->mangle.mask; + exact = (__force __be32)act->mangle.val; if (exact & ~mask) return -EOPNOTSUPP; @@ -505,7 +505,7 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask, } static int -nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, +nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off, struct nfp_fl_set_ipv6_addr *ip_dst, struct nfp_fl_set_ipv6_addr *ip_src, struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl) @@ -515,8 +515,8 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, u8 word; /* We are expecting tcf_pedit to return a big endian value */ - mask = (__force __be32)~tcf_pedit_mask(action, idx); - exact = (__force __be32)tcf_pedit_val(action, idx); + mask = (__force __be32)~act->mangle.mask; + exact = (__force __be32)act->mangle.val; if (exact & ~mask) return -EOPNOTSUPP; @@ -541,7 +541,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, } static int -nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, +nfp_fl_set_tport(const struct flow_action_entry *act, u32 off, struct nfp_fl_set_tport *set_tport, int opcode) { u32 exact, mask; @@ -549,8 +549,8 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, if (off) return -EOPNOTSUPP; - mask = ~tcf_pedit_mask(action, idx); - exact = tcf_pedit_val(action, idx); + mask = ~act->mangle.mask; + exact = act->mangle.val; if (exact & ~mask) return -EOPNOTSUPP; @@ -584,20 +584,22 @@ static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto) } static int -nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, +nfp_fl_pedit(const struct flow_action_entry *act, + struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len, u32 *csum_updated) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl; struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos; struct nfp_fl_set_ip4_addrs set_ip_addr; + enum flow_action_mangle_base htype; struct nfp_fl_set_tport set_tport; struct nfp_fl_set_eth set_eth; - enum pedit_header_type htype; - int idx, nkeys, err; size_t act_size = 0; - u32 offset, cmd; u8 ip_proto = 0; + u32 offset; + int err; memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl)); memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos)); @@ -606,50 +608,41 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, memset(&set_ip_addr, 0, sizeof(set_ip_addr)); memset(&set_tport, 0, sizeof(set_tport)); memset(&set_eth, 0, sizeof(set_eth)); - nkeys = tcf_pedit_nkeys(action); - - for (idx = 0; idx < nkeys; idx++) { - cmd = tcf_pedit_cmd(action, idx); - htype = tcf_pedit_htype(action, idx); - offset = tcf_pedit_offset(action, idx); - if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) - return -EOPNOTSUPP; + htype = act->mangle.htype; + offset = act->mangle.offset; - switch (htype) { - case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: - err = nfp_fl_set_eth(action, idx, offset, &set_eth); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: - err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr, - &set_ip_ttl_tos); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: - err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst, - &set_ip6_src, &set_ip6_tc_hl_fl); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: - err = nfp_fl_set_tport(action, idx, offset, &set_tport, - NFP_FL_ACTION_OPCODE_SET_TCP); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: - err = nfp_fl_set_tport(action, idx, offset, &set_tport, - NFP_FL_ACTION_OPCODE_SET_UDP); - break; - default: - return -EOPNOTSUPP; - } - if (err) - return err; + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + err = nfp_fl_set_eth(act, offset, &set_eth); + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + err = nfp_fl_set_ip4(act, offset, &set_ip_addr, + &set_ip_ttl_tos); + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + err = nfp_fl_set_ip6(act, offset, &set_ip6_dst, + &set_ip6_src, &set_ip6_tc_hl_fl); + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + err = nfp_fl_set_tport(act, offset, &set_tport, + NFP_FL_ACTION_OPCODE_SET_TCP); + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + err = nfp_fl_set_tport(act, offset, &set_tport, + NFP_FL_ACTION_OPCODE_SET_UDP); + break; + default: + return -EOPNOTSUPP; } + if (err) + return err; - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *basic; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - flow->key); - ip_proto = basic->ip_proto; + flow_rule_match_basic(rule, &match); + ip_proto = match.key->ip_proto; } if (set_eth.head.len_lw) { @@ -733,7 +726,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, } static int -nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, +nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, bool last, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, @@ -753,7 +746,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, return -EOPNOTSUPP; output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type, + err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type, tun_out_cnt); if (err) return err; @@ -764,7 +757,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, /* nfp_fl_pre_lag returns -err or size of prelag action added. * This will be 0 if it is not egressing to a lag dev. */ - prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len); + prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len); if (prelag_size < 0) return prelag_size; else if (prelag_size > 0 && (!last || *out_cnt)) @@ -778,7 +771,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, } static int -nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, +nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, struct tc_cls_flower_offload *flow, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, @@ -791,23 +784,25 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, struct nfp_fl_pop_vlan *pop_v; int err; - if (is_tcf_gact_shot(a)) { + switch (act->id) { + case FLOW_ACTION_DROP: nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); - } else if (is_tcf_mirred_egress_redirect(a)) { - err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, + break; + case FLOW_ACTION_REDIRECT: + err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, true, tun_type, tun_out_cnt, out_cnt, csum_updated); if (err) return err; - - } else if (is_tcf_mirred_egress_mirror(a)) { - err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, + break; + case FLOW_ACTION_MIRRED: + err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, false, tun_type, tun_out_cnt, out_cnt, csum_updated); if (err) return err; - - } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { + break; + case FLOW_ACTION_VLAN_POP: if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) return -EOPNOTSUPP; @@ -816,19 +811,21 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, nfp_fl_pop_vlan(pop_v); *a_len += sizeof(struct nfp_fl_pop_vlan); - } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { + break; + case FLOW_ACTION_VLAN_PUSH: if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) return -EOPNOTSUPP; psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len]; nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); - nfp_fl_push_vlan(psh_v, a); + nfp_fl_push_vlan(psh_v, act); *a_len += sizeof(struct nfp_fl_push_vlan); - } else if (is_tcf_tunnel_set(a)) { - struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a); + break; + case FLOW_ACTION_TUNNEL_ENCAP: { + const struct ip_tunnel_info *ip_tun = act->tunnel; - *tun_type = nfp_fl_get_tun_from_act_l4_port(app, a); + *tun_type = nfp_fl_get_tun_from_act_l4_port(app, act); if (*tun_type == NFP_FL_TUNNEL_NONE) return -EOPNOTSUPP; @@ -847,32 +844,36 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); *a_len += sizeof(struct nfp_fl_pre_tunnel); - err = nfp_fl_push_geneve_options(nfp_fl, a_len, a); + err = nfp_fl_push_geneve_options(nfp_fl, a_len, act); if (err) return err; set_tun = (void *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun, + err = nfp_fl_set_ipv4_udp_tun(app, set_tun, act, pre_tun, *tun_type, netdev); if (err) return err; *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun); - } else if (is_tcf_tunnel_release(a)) { + } + break; + case FLOW_ACTION_TUNNEL_DECAP: /* Tunnel decap is handled by default so accept action. */ return 0; - } else if (is_tcf_pedit(a)) { - if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len], + case FLOW_ACTION_MANGLE: + if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len], a_len, csum_updated)) return -EOPNOTSUPP; - } else if (is_tcf_csum(a)) { + break; + case FLOW_ACTION_CSUM: /* csum action requests recalc of something we have not fixed */ - if (tcf_csum_update_flags(a) & ~*csum_updated) + if (act->csum_flags & ~*csum_updated) return -EOPNOTSUPP; /* If we will correctly fix the csum we can remove it from the * csum update list. Which will later be used to check support. */ - *csum_updated &= ~tcf_csum_update_flags(a); - } else { + *csum_updated &= ~act->csum_flags; + break; + default: /* Currently we do not handle any other actions. */ return -EOPNOTSUPP; } @@ -887,7 +888,7 @@ int nfp_flower_compile_action(struct nfp_app *app, { int act_len, act_cnt, err, tun_out_cnt, out_cnt, i; enum nfp_flower_tun_type tun_type; - const struct tc_action *a; + struct flow_action_entry *act; u32 csum_updated = 0; memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); @@ -898,8 +899,8 @@ int nfp_flower_compile_action(struct nfp_app *app, tun_out_cnt = 0; out_cnt = 0; - tcf_exts_for_each_action(i, a, flow->exts) { - err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, + flow_action_for_each(i, act, &flow->rule->action) { + err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, netdev, &tun_type, &tun_out_cnt, &out_cnt, &csum_updated); if (err) diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 56b22ea32474..cf9e1118ee8f 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -45,11 +45,9 @@ nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports) { struct nfp_flower_cmsg_mac_repr *msg; struct sk_buff *skb; - unsigned int size; - size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]); - skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR, - GFP_KERNEL); + skb = nfp_flower_cmsg_alloc(app, struct_size(msg, ports, num_ports), + NFP_FLOWER_CMSG_TYPE_MAC_REPR, GFP_KERNEL); if (!skb) return NULL; diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index c04a0d6b0184..e03c8ef2c28c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -8,31 +8,41 @@ #include "main.h" static void -nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame, - struct tc_cls_flower_offload *flow, u8 key_type, - bool mask_version) +nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, + struct tc_cls_flower_offload *flow, u8 key_type) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_vlan *flow_vlan; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); u16 tmp_tci; - memset(frame, 0, sizeof(struct nfp_flower_meta_tci)); + memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); + memset(msk, 0, sizeof(struct nfp_flower_meta_tci)); + /* Populate the metadata frame. */ - frame->nfp_flow_key_layer = key_type; - frame->mask_id = ~0; + ext->nfp_flow_key_layer = key_type; + ext->mask_id = ~0; + + msk->nfp_flow_key_layer = key_type; + msk->mask_id = ~0; - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - flow_vlan = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_VLAN, - target); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); /* Populate the tci field. */ - if (flow_vlan->vlan_id || flow_vlan->vlan_priority) { + if (match.key->vlan_id || match.key->vlan_priority) { tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, - flow_vlan->vlan_priority) | + match.key->vlan_priority) | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, - flow_vlan->vlan_id) | + match.key->vlan_id) | NFP_FLOWER_MASK_VLAN_CFI; - frame->tci = cpu_to_be16(tmp_tci); + ext->tci = cpu_to_be16(tmp_tci); + tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + match.mask->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + match.mask->vlan_id) | + NFP_FLOWER_MASK_VLAN_CFI; + msk->tci = cpu_to_be16(tmp_tci); } } } @@ -64,231 +74,249 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, } static void -nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, + struct nfp_flower_mac_mpls *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_eth_addrs *addr; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); + + memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); + memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); - memset(frame, 0, sizeof(struct nfp_flower_mac_mpls)); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - addr = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - target); + flow_rule_match_eth_addrs(rule, &match); /* Populate mac frame. */ - ether_addr_copy(frame->mac_dst, &addr->dst[0]); - ether_addr_copy(frame->mac_src, &addr->src[0]); + ether_addr_copy(ext->mac_dst, &match.key->dst[0]); + ether_addr_copy(ext->mac_src, &match.key->src[0]); + ether_addr_copy(msk->mac_dst, &match.mask->dst[0]); + ether_addr_copy(msk->mac_src, &match.mask->src[0]); } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { - struct flow_dissector_key_mpls *mpls; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { + struct flow_match_mpls match; u32 t_mpls; - mpls = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_MPLS, - target); - - t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) | + flow_rule_match_mpls(rule, &match); + t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) | NFP_FLOWER_MASK_MPLS_Q; - - frame->mpls_lse = cpu_to_be32(t_mpls); - } else if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC)) { + ext->mpls_lse = cpu_to_be32(t_mpls); + t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) | + NFP_FLOWER_MASK_MPLS_Q; + msk->mpls_lse = cpu_to_be32(t_mpls); + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q * bit, which indicates an mpls ether type but without any * mpls fields. */ - struct flow_dissector_key_basic *key_basic; - - key_basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - flow->key); - if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || - key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) - frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || + match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) { + ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); + msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); + } } } static void -nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, + struct nfp_flower_tp_ports *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ports *tp; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); + + memset(ext, 0, sizeof(struct nfp_flower_tp_ports)); + memset(msk, 0, sizeof(struct nfp_flower_tp_ports)); - memset(frame, 0, sizeof(struct nfp_flower_tp_ports)); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - tp = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_PORTS, - target); - frame->port_src = tp->src; - frame->port_dst = tp->dst; + flow_rule_match_ports(rule, &match); + ext->port_src = match.key->src; + ext->port_dst = match.key->dst; + msk->port_src = match.mask->src; + msk->port_dst = match.mask->dst; } } static void -nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, + struct nfp_flower_ip_ext *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *basic; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - target); - frame->proto = basic->ip_proto; + flow_rule_match_basic(rule, &match); + ext->proto = match.key->ip_proto; + msk->proto = match.mask->ip_proto; } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) { - struct flow_dissector_key_ip *flow_ip; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; - flow_ip = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_IP, - target); - frame->tos = flow_ip->tos; - frame->ttl = flow_ip->ttl; + flow_rule_match_ip(rule, &match); + ext->tos = match.key->tos; + ext->ttl = match.key->ttl; + msk->tos = match.mask->tos; + msk->ttl = match.mask->ttl; } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) { - struct flow_dissector_key_tcp *tcp; - u32 tcp_flags; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { + u16 tcp_flags, tcp_flags_mask; + struct flow_match_tcp match; - tcp = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_TCP, target); - tcp_flags = be16_to_cpu(tcp->flags); + flow_rule_match_tcp(rule, &match); + tcp_flags = be16_to_cpu(match.key->flags); + tcp_flags_mask = be16_to_cpu(match.mask->flags); if (tcp_flags & TCPHDR_FIN) - frame->flags |= NFP_FL_TCP_FLAG_FIN; + ext->flags |= NFP_FL_TCP_FLAG_FIN; + if (tcp_flags_mask & TCPHDR_FIN) + msk->flags |= NFP_FL_TCP_FLAG_FIN; + if (tcp_flags & TCPHDR_SYN) - frame->flags |= NFP_FL_TCP_FLAG_SYN; + ext->flags |= NFP_FL_TCP_FLAG_SYN; + if (tcp_flags_mask & TCPHDR_SYN) + msk->flags |= NFP_FL_TCP_FLAG_SYN; + if (tcp_flags & TCPHDR_RST) - frame->flags |= NFP_FL_TCP_FLAG_RST; + ext->flags |= NFP_FL_TCP_FLAG_RST; + if (tcp_flags_mask & TCPHDR_RST) + msk->flags |= NFP_FL_TCP_FLAG_RST; + if (tcp_flags & TCPHDR_PSH) - frame->flags |= NFP_FL_TCP_FLAG_PSH; + ext->flags |= NFP_FL_TCP_FLAG_PSH; + if (tcp_flags_mask & TCPHDR_PSH) + msk->flags |= NFP_FL_TCP_FLAG_PSH; + if (tcp_flags & TCPHDR_URG) - frame->flags |= NFP_FL_TCP_FLAG_URG; + ext->flags |= NFP_FL_TCP_FLAG_URG; + if (tcp_flags_mask & TCPHDR_URG) + msk->flags |= NFP_FL_TCP_FLAG_URG; } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key; - - key = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - target); - if (key->flags & FLOW_DIS_IS_FRAGMENT) - frame->flags |= NFP_FL_IP_FRAGMENTED; - if (key->flags & FLOW_DIS_FIRST_FRAG) - frame->flags |= NFP_FL_IP_FRAG_FIRST; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + if (match.key->flags & FLOW_DIS_IS_FRAGMENT) + ext->flags |= NFP_FL_IP_FRAGMENTED; + if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) + msk->flags |= NFP_FL_IP_FRAGMENTED; + if (match.key->flags & FLOW_DIS_FIRST_FRAG) + ext->flags |= NFP_FL_IP_FRAG_FIRST; + if (match.mask->flags & FLOW_DIS_FIRST_FRAG) + msk->flags |= NFP_FL_IP_FRAG_FIRST; } } static void -nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, + struct nfp_flower_ipv4 *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ipv4_addrs *addr; - - memset(frame, 0, sizeof(struct nfp_flower_ipv4)); - - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { - addr = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - target); - frame->ipv4_src = addr->src; - frame->ipv4_dst = addr->dst; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); + struct flow_match_ipv4_addrs match; + + memset(ext, 0, sizeof(struct nfp_flower_ipv4)); + memset(msk, 0, sizeof(struct nfp_flower_ipv4)); + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + flow_rule_match_ipv4_addrs(rule, &match); + ext->ipv4_src = match.key->src; + ext->ipv4_dst = match.key->dst; + msk->ipv4_src = match.mask->src; + msk->ipv4_dst = match.mask->dst; } - nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version); + nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); } static void -nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, + struct nfp_flower_ipv6 *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ipv6_addrs *addr; - - memset(frame, 0, sizeof(struct nfp_flower_ipv6)); - - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { - addr = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - target); - frame->ipv6_src = addr->src; - frame->ipv6_dst = addr->dst; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); + + memset(ext, 0, sizeof(struct nfp_flower_ipv6)); + memset(msk, 0, sizeof(struct nfp_flower_ipv6)); + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); + ext->ipv6_src = match.key->src; + ext->ipv6_dst = match.key->dst; + msk->ipv6_src = match.mask->src; + msk->ipv6_dst = match.mask->dst; } - nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version); + nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); } static int -nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_geneve_opt(void *ext, void *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_enc_opts *opts; + struct flow_match_enc_opts match; - opts = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_OPTS, - target); - memcpy(key_buf, opts->data, opts->len); + flow_rule_match_enc_opts(flow->rule, &match); + memcpy(ext, match.key->data, match.key->len); + memcpy(msk, match.mask->data, match.mask->len); return 0; } static void -nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext, + struct nfp_flower_ipv4_udp_tun *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ipv4_addrs *tun_ips; - struct flow_dissector_key_keyid *vni; - struct flow_dissector_key_ip *ip; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); - memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); + memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); + memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; u32 temp_vni; - vni = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - target); - temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET; - frame->tun_id = cpu_to_be32(temp_vni); + flow_rule_match_enc_keyid(rule, &match); + temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET; + ext->tun_id = cpu_to_be32(temp_vni); + temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET; + msk->tun_id = cpu_to_be32(temp_vni); } - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { - tun_ips = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, - target); - frame->ip_src = tun_ips->src; - frame->ip_dst = tun_ips->dst; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_enc_ipv4_addrs(rule, &match); + ext->ip_src = match.key->src; + ext->ip_dst = match.key->dst; + msk->ip_src = match.mask->src; + msk->ip_dst = match.mask->dst; } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) { - ip = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_IP, - target); - frame->tos = ip->tos; - frame->ttl = ip->ttl; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { + struct flow_match_ip match; + + flow_rule_match_enc_ip(rule, &match); + ext->tos = match.key->tos; + ext->ttl = match.key->ttl; + msk->tos = match.mask->tos; + msk->ttl = match.mask->ttl; } } @@ -313,12 +341,9 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ext = nfp_flow->unmasked_data; msk = nfp_flow->mask_data; - /* Populate Exact Metadata. */ nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext, - flow, key_ls->key_layer, false); - /* Populate Mask Metadata. */ - nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk, - flow, key_ls->key_layer, true); + (struct nfp_flower_meta_tci *)msk, + flow, key_ls->key_layer); ext += sizeof(struct nfp_flower_meta_tci); msk += sizeof(struct nfp_flower_meta_tci); @@ -348,45 +373,33 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, msk += sizeof(struct nfp_flower_in_port); if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { - /* Populate Exact MAC Data. */ nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, - flow, false); - /* Populate Mask MAC Data. */ - nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk, - flow, true); + (struct nfp_flower_mac_mpls *)msk, + flow); ext += sizeof(struct nfp_flower_mac_mpls); msk += sizeof(struct nfp_flower_mac_mpls); } if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) { - /* Populate Exact TP Data. */ nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext, - flow, false); - /* Populate Mask TP Data. */ - nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk, - flow, true); + (struct nfp_flower_tp_ports *)msk, + flow); ext += sizeof(struct nfp_flower_tp_ports); msk += sizeof(struct nfp_flower_tp_ports); } if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) { - /* Populate Exact IPv4 Data. */ nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext, - flow, false); - /* Populate Mask IPv4 Data. */ - nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk, - flow, true); + (struct nfp_flower_ipv4 *)msk, + flow); ext += sizeof(struct nfp_flower_ipv4); msk += sizeof(struct nfp_flower_ipv4); } if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) { - /* Populate Exact IPv4 Data. */ nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext, - flow, false); - /* Populate Mask IPv4 Data. */ - nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk, - flow, true); + (struct nfp_flower_ipv6 *)msk, + flow); ext += sizeof(struct nfp_flower_ipv6); msk += sizeof(struct nfp_flower_ipv6); } @@ -395,10 +408,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { __be32 tun_dst; - /* Populate Exact VXLAN Data. */ - nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false); - /* Populate Mask VXLAN Data. */ - nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true); + nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow); tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst; ext += sizeof(struct nfp_flower_ipv4_udp_tun); msk += sizeof(struct nfp_flower_ipv4_udp_tun); @@ -410,11 +420,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, nfp_tunnel_add_ipv4_off(app, tun_dst); if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { - err = nfp_flower_compile_geneve_opt(ext, flow, false); - if (err) - return err; - - err = nfp_flower_compile_geneve_opt(msk, flow, true); + err = nfp_flower_compile_geneve_opt(ext, msk, flow); if (err) return err; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 2cdbf29ecbe7..450d7296fd57 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -102,23 +102,22 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) { - return dissector_uses_key(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS) || - dissector_uses_key(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS) || - dissector_uses_key(f->dissector, - FLOW_DISSECTOR_KEY_PORTS) || - dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP); + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + + return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); } static int -nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, +nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts, u32 *key_layer_two, int *key_size) { - if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) + if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY) return -EOPNOTSUPP; - if (enc_opts->len > 0) { + if (enc_opts->key->len > 0) { *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP; *key_size += sizeof(struct nfp_flower_geneve_options); } @@ -133,20 +132,21 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, struct tc_cls_flower_offload *flow, enum nfp_flower_tun_type *tun_type) { - struct flow_dissector_key_basic *mask_basic = NULL; - struct flow_dissector_key_basic *key_basic = NULL; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); + struct flow_dissector *dissector = rule->match.dissector; + struct flow_match_basic basic = { NULL, NULL}; struct nfp_flower_priv *priv = app->priv; u32 key_layer_two; u8 key_layer; int key_size; int err; - if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) + if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) return -EOPNOTSUPP; /* If any tun dissector is used then the required set must be used. */ - if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && - (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) + if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && + (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) return -EOPNOTSUPP; @@ -155,76 +155,52 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, key_size = sizeof(struct nfp_flower_meta_tci) + sizeof(struct nfp_flower_in_port); - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) || - dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { key_layer |= NFP_FLOWER_LAYER_MAC; key_size += sizeof(struct nfp_flower_mac_mpls); } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *flow_vlan; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan vlan; - flow_vlan = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_VLAN, - flow->mask); + flow_rule_match_vlan(rule, &vlan); if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && - flow_vlan->vlan_priority) + vlan.key->vlan_priority) return -EOPNOTSUPP; } - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_CONTROL)) { - struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; - struct flow_dissector_key_ports *mask_enc_ports = NULL; - struct flow_dissector_key_enc_opts *enc_op = NULL; - struct flow_dissector_key_ports *enc_ports = NULL; - struct flow_dissector_key_control *mask_enc_ctl = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_CONTROL, - flow->mask); - struct flow_dissector_key_control *enc_ctl = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_CONTROL, - flow->key); - - if (mask_enc_ctl->addr_type != 0xffff || - enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_match_enc_opts enc_op = { NULL, NULL }; + struct flow_match_ipv4_addrs ipv4_addrs; + struct flow_match_control enc_ctl; + struct flow_match_ports enc_ports; + + flow_rule_match_enc_control(rule, &enc_ctl); + + if (enc_ctl.mask->addr_type != 0xffff || + enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) return -EOPNOTSUPP; /* These fields are already verified as used. */ - mask_ipv4 = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, - flow->mask); - if (mask_ipv4->dst != cpu_to_be32(~0)) + flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs); + if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) return -EOPNOTSUPP; - mask_enc_ports = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS, - flow->mask); - enc_ports = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS, - flow->key); - - if (mask_enc_ports->dst != cpu_to_be16(~0)) + flow_rule_match_enc_ports(rule, &enc_ports); + if (enc_ports.mask->dst != cpu_to_be16(~0)) return -EOPNOTSUPP; - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_OPTS)) { - enc_op = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_OPTS, - flow->key); - } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) + flow_rule_match_enc_opts(rule, &enc_op); - switch (enc_ports->dst) { + switch (enc_ports.key->dst) { case htons(NFP_FL_VXLAN_PORT): *tun_type = NFP_FL_TUNNEL_VXLAN; key_layer |= NFP_FLOWER_LAYER_VXLAN; key_size += sizeof(struct nfp_flower_ipv4_udp_tun); - if (enc_op) + if (enc_op.key) return -EOPNOTSUPP; break; case htons(NFP_FL_GENEVE_PORT): @@ -236,11 +212,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; key_size += sizeof(struct nfp_flower_ipv4_udp_tun); - if (!enc_op) + if (!enc_op.key) break; if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) return -EOPNOTSUPP; - err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two, + err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two, &key_size); if (err) return err; @@ -254,19 +230,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, return -EOPNOTSUPP; } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - mask_basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - flow->mask); - - key_basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - flow->key); - } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) + flow_rule_match_basic(rule, &basic); - if (mask_basic && mask_basic->n_proto) { + if (basic.mask && basic.mask->n_proto) { /* Ethernet type is present in the key. */ - switch (key_basic->n_proto) { + switch (basic.key->n_proto) { case cpu_to_be16(ETH_P_IP): key_layer |= NFP_FLOWER_LAYER_IPV4; key_size += sizeof(struct nfp_flower_ipv4); @@ -305,9 +274,9 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } } - if (mask_basic && mask_basic->ip_proto) { + if (basic.mask && basic.mask->ip_proto) { /* Ethernet type is present in the key. */ - switch (key_basic->ip_proto) { + switch (basic.key->ip_proto) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_SCTP: @@ -324,14 +293,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) { - struct flow_dissector_key_tcp *tcp; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { + struct flow_match_tcp tcp; u32 tcp_flags; - tcp = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_TCP, - flow->key); - tcp_flags = be16_to_cpu(tcp->flags); + flow_rule_match_tcp(rule, &tcp); + tcp_flags = be16_to_cpu(tcp.key->flags); if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) return -EOPNOTSUPP; @@ -347,12 +314,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, * space, thus we need to ensure we include a IPv4/IPv6 key * layer if we have not done so already. */ - if (!key_basic) + if (!basic.key) return -EOPNOTSUPP; if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && !(key_layer & NFP_FLOWER_LAYER_IPV6)) { - switch (key_basic->n_proto) { + switch (basic.key->n_proto) { case cpu_to_be16(ETH_P_IP): key_layer |= NFP_FLOWER_LAYER_IPV4; key_size += sizeof(struct nfp_flower_ipv4); @@ -369,14 +336,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key_ctl; - - key_ctl = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - flow->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control ctl; - if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) + flow_rule_match_control(rule, &ctl); + if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) return -EOPNOTSUPP; } @@ -589,9 +553,8 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); spin_lock_bh(&priv->stats_lock); - tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes, - priv->stats[ctx_id].pkts, - priv->stats[ctx_id].used); + flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, + priv->stats[ctx_id].pkts, priv->stats[ctx_id].used); priv->stats[ctx_id].pkts = 0; priv->stats[ctx_id].bytes = 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index dddbb0575be9..db2da99f6aa7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -178,7 +178,7 @@ static const struct nfp_devlink_versions_simple { } nfp_devlink_versions_hwinfo[] = { { DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, "assembly.partno", }, { DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, "assembly.revision", }, - { "board.vendor", /* fab */ "assembly.vendor", }, + { DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE, "assembly.vendor", }, { "board.model", /* code name */ "assembly.model", }, }; @@ -258,18 +258,33 @@ nfp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) { struct nfp_pf *pf = devlink_priv(devlink); + const char *sn, *vendor, *part; struct nfp_nsp *nsp; char *buf = NULL; - const char *sn; int err; err = devlink_info_driver_name_put(req, "nfp"); if (err) return err; + vendor = nfp_hwinfo_lookup(pf->hwinfo, "assembly.vendor"); + part = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno"); sn = nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial"); - if (sn) { - err = devlink_info_serial_number_put(req, sn); + if (vendor && part && sn) { + char *buf; + + buf = kmalloc(strlen(vendor) + strlen(part) + strlen(sn) + 1, + GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf[0] = '\0'; + strcat(buf, vendor); + strcat(buf, part); + strcat(buf, sn); + + err = devlink_info_serial_number_put(req, buf); + kfree(buf); if (err) return err; } @@ -315,6 +330,15 @@ err_close_nsp: return err; } +static int +nfp_devlink_flash_update(struct devlink *devlink, const char *path, + const char *component, struct netlink_ext_ack *extack) +{ + if (component) + return -EOPNOTSUPP; + return nfp_flash_update_common(devlink_priv(devlink), path, extack); +} + const struct devlink_ops nfp_devlink_ops = { .port_split = nfp_devlink_port_split, .port_unsplit = nfp_devlink_port_unsplit, @@ -323,6 +347,7 @@ const struct devlink_ops nfp_devlink_ops = { .eswitch_mode_get = nfp_devlink_eswitch_mode_get, .eswitch_mode_set = nfp_devlink_eswitch_mode_set, .info_get = nfp_devlink_info_get, + .flash_update = nfp_devlink_flash_update, }; int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 6c10e8d119e4..f4c8776e42b6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -300,6 +300,47 @@ static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) return nfp_pcie_sriov_enable(pdev, num_vfs); } +int nfp_flash_update_common(struct nfp_pf *pf, const char *path, + struct netlink_ext_ack *extack) +{ + struct device *dev = &pf->pdev->dev; + const struct firmware *fw; + struct nfp_nsp *nsp; + int err; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + if (extack) + NL_SET_ERR_MSG_MOD(extack, "can't access NSP"); + else + dev_err(dev, "Failed to access the NSP: %d\n", err); + return err; + } + + err = request_firmware_direct(&fw, path, dev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "unable to read flash file from disk"); + goto exit_close_nsp; + } + + dev_info(dev, "Please be patient while writing flash image: %s\n", + path); + + err = nfp_nsp_write_flash(nsp, fw); + if (err < 0) + goto exit_release_fw; + dev_info(dev, "Finished writing flash image\n"); + err = 0; + +exit_release_fw: + release_firmware(fw); +exit_close_nsp: + nfp_nsp_close(nsp); + return err; +} + static const struct firmware * nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index a3613a2e0aa5..b7211f200d22 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -164,6 +164,8 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, unsigned int min_size, struct nfp_cpp_area **area); int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, void *out_data, u64 out_length); +int nfp_flash_update_common(struct nfp_pf *pf, const char *path, + struct netlink_ext_ack *extack); enum nfp_dump_diag { NFP_DUMP_NSP_DIAG = 0, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 7d2d4241498f..776f6c07701b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -36,7 +36,6 @@ #include <linux/vmalloc.h> #include <linux/ktime.h> -#include <net/switchdev.h> #include <net/vxlan.h> #include "nfpcore/nfp_nsp.h" @@ -3531,6 +3530,7 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, .ndo_bpf = nfp_net_xdp, + .ndo_get_port_parent_id = nfp_port_get_port_parent_id, }; /** @@ -3815,8 +3815,6 @@ static void nfp_net_netdev_init(struct nfp_net *nn) netdev->netdev_ops = &nfp_net_netdev_ops; netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000); - SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); - /* MTU range: 68 - hw-specific max */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = nn->max_mtu; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index cb9c512abc76..8f189149efc5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1237,11 +1237,8 @@ static int nfp_net_set_channels(struct net_device *netdev, static int nfp_net_flash_device(struct net_device *netdev, struct ethtool_flash *flash) { - const struct firmware *fw; struct nfp_app *app; - struct nfp_nsp *nsp; - struct device *dev; - int err; + int ret; if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) return -EOPNOTSUPP; @@ -1250,39 +1247,13 @@ nfp_net_flash_device(struct net_device *netdev, struct ethtool_flash *flash) if (!app) return -EOPNOTSUPP; - dev = &app->pdev->dev; - - nsp = nfp_nsp_open(app->cpp); - if (IS_ERR(nsp)) { - err = PTR_ERR(nsp); - dev_err(dev, "Failed to access the NSP: %d\n", err); - return err; - } - - err = request_firmware_direct(&fw, flash->data, dev); - if (err) - goto exit_close_nsp; - - dev_info(dev, "Please be patient while writing flash image: %s\n", - flash->data); dev_hold(netdev); rtnl_unlock(); - - err = nfp_nsp_write_flash(nsp, fw); - if (err < 0) { - dev_err(dev, "Flash write failed: %d\n", err); - goto exit_rtnl_lock; - } - dev_info(dev, "Finished writing flash image\n"); - -exit_rtnl_lock: + ret = nfp_flash_update_common(app->pf, flash->data, NULL); rtnl_lock(); dev_put(netdev); - release_firmware(fw); -exit_close_nsp: - nfp_nsp_close(nsp); - return err; + return ret; } static const struct ethtool_ops nfp_net_ethtool_ops = { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 69d7aebda09b..62839807e21e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -5,7 +5,6 @@ #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/lockdep.h> #include <net/dst_metadata.h> -#include <net/switchdev.h> #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_nsp.h" @@ -273,6 +272,7 @@ const struct net_device_ops nfp_repr_netdev_ops = { .ndo_fix_features = nfp_repr_fix_features, .ndo_set_features = nfp_port_set_features, .ndo_set_mac_address = eth_mac_addr, + .ndo_get_port_parent_id = nfp_port_get_port_parent_id, }; void @@ -336,8 +336,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, netdev->max_mtu = pf_netdev->max_mtu; - SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); - /* Set features the lower device can support with representors */ if (repr_cap & NFP_NET_CFG_CTRL_LIVE_ADDR) netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 86bc149ca231..7e90880fa46b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -31,34 +31,22 @@ struct nfp_port *nfp_port_from_netdev(struct net_device *netdev) return NULL; } -static int -nfp_port_attr_get(struct net_device *netdev, struct switchdev_attr *attr) +int nfp_port_get_port_parent_id(struct net_device *netdev, + struct netdev_phys_item_id *ppid) { struct nfp_port *port; + const u8 *serial; port = nfp_port_from_netdev(netdev); if (!port) return -EOPNOTSUPP; - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: { - const u8 *serial; - /* N.B: attr->u.ppid.id is binary data */ - attr->u.ppid.id_len = nfp_cpp_serial(port->app->cpp, &serial); - memcpy(&attr->u.ppid.id, serial, attr->u.ppid.id_len); - break; - } - default: - return -EOPNOTSUPP; - } + ppid->id_len = nfp_cpp_serial(port->app->cpp, &serial); + memcpy(&ppid->id, serial, ppid->id_len); return 0; } -const struct switchdev_ops nfp_port_switchdev_ops = { - .switchdev_port_attr_get = nfp_port_attr_get, -}; - int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index b2479a2a49e5..90ae053f5c07 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -7,6 +7,7 @@ #include <net/devlink.h> struct net_device; +struct netdev_phys_item_id; struct nfp_app; struct nfp_pf; struct nfp_port; @@ -90,7 +91,6 @@ struct nfp_port { }; extern const struct ethtool_ops nfp_port_ethtool_ops; -extern const struct switchdev_ops nfp_port_switchdev_ops; __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...); @@ -106,6 +106,8 @@ int nfp_port_set_features(struct net_device *netdev, netdev_features_t features); struct nfp_port *nfp_port_from_netdev(struct net_device *netdev); +int nfp_port_get_port_parent_id(struct net_device *netdev, + struct netdev_phys_item_id *ppid); struct nfp_port * nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id); struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port); diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index c662c6f5bee3..67bf02b0763a 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -630,7 +630,7 @@ static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!(w90p910_send_frame(dev, skb->data, skb->len))) { ether->skb = skb; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); return 0; } return -EAGAIN; diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index c9529c29a0a7..eee883a2aa8d 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -1337,7 +1337,7 @@ static irqreturn_t hamachi_interrupt(int irq, void *dev_instance) leXX_to_cpu(hmp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); hmp->tx_skbuff[entry] = NULL; } hmp->tx_ring[entry].status_n_length = 0; diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 54224d1822e3..6f8d6584f809 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -925,7 +925,7 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) /* Free the original skb. */ pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); yp->tx_skbuff[entry] = NULL; } if (yp->tx_full && @@ -983,7 +983,7 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) pci_unmap_single(yp->pci_dev, yp->tx_ring[entry<<1].addr, skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); yp->tx_skbuff[entry] = 0; /* Mark status as empty. */ yp->tx_status[entry].tx_errs = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 3b0955d34716..43a57ec296fd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -53,7 +53,7 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_MAJOR_VERSION 8 -#define QED_MINOR_VERSION 33 +#define QED_MINOR_VERSION 37 #define QED_REVISION_VERSION 0 #define QED_ENGINEERING_VERSION 20 @@ -753,6 +753,7 @@ struct qed_dev { #define CHIP_BOND_ID_SHIFT 0 u8 num_engines; + u8 num_ports; u8 num_ports_in_engine; u8 num_funcs_in_port; @@ -892,7 +893,6 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_device_num_engines(struct qed_dev *cdev); -int qed_device_get_port_id(struct qed_dev *cdev); void qed_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); @@ -939,6 +939,10 @@ bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); writel((u32)val, (void __iomem *)((u8 __iomem *)\ (cdev->doorbells) + (db_addr))) +#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ + qed_device_num_ports((_p_hwfn)->cdev)) +int qed_device_num_ports(struct qed_dev *cdev); + /* Prototypes */ int qed_fill_dev_info(struct qed_dev *cdev, struct qed_dev_info *dev_info); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 35c9f484eb9f..e61d1d905415 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2135,12 +2135,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; - if (!p_params->num_vf_cons) - p_params->num_vf_cons = - ETH_PF_PARAMS_VF_CONS_DEFAULT; - qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, - p_params->num_cons, - p_params->num_vf_cons); + if (!p_params->num_vf_cons) + p_params->num_vf_cons = + ETH_PF_PARAMS_VF_CONS_DEFAULT; + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + p_params->num_cons, + p_params->num_vf_cons); p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; break; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index e2cbd77646a2..9df8c4b3b54e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3269,55 +3269,43 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); } -static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) -{ - u32 port_mode; - - port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); - - if (port_mode < 3) { - p_hwfn->cdev->num_ports_in_engine = 1; - } else if (port_mode <= 5) { - p_hwfn->cdev->num_ports_in_engine = 2; - } else { - DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n", - p_hwfn->cdev->num_ports_in_engine); - - /* Default num_ports_in_engine to something */ - p_hwfn->cdev->num_ports_in_engine = 1; - } -} - -static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 port; - int i; - - p_hwfn->cdev->num_ports_in_engine = 0; + u32 addr, global_offsize, global_addr, port_mode; + struct qed_dev *cdev = p_hwfn->cdev; - for (i = 0; i < MAX_NUM_PORTS_K2; i++) { - port = qed_rd(p_hwfn, p_ptt, - CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4)); - if (port & 1) - p_hwfn->cdev->num_ports_in_engine++; + /* In CMT there is always only one port */ + if (cdev->num_hwfns > 1) { + cdev->num_ports_in_engine = 1; + cdev->num_ports = 1; + return; } - if (!p_hwfn->cdev->num_ports_in_engine) { - DP_NOTICE(p_hwfn, "All NIG ports are inactive\n"); - - /* Default num_ports_in_engine to something */ - p_hwfn->cdev->num_ports_in_engine = 1; + /* Determine the number of ports per engine */ + port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE); + switch (port_mode) { + case 0x0: + cdev->num_ports_in_engine = 1; + break; + case 0x1: + cdev->num_ports_in_engine = 2; + break; + case 0x2: + cdev->num_ports_in_engine = 4; + break; + default: + DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n", port_mode); + cdev->num_ports_in_engine = 1; /* Default to something */ + break; } -} -static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) -{ - if (QED_IS_BB(p_hwfn->cdev)) - qed_hw_info_port_num_bb(p_hwfn, p_ptt); - else - qed_hw_info_port_num_ah(p_hwfn, p_ptt); + /* Get the total number of ports of the device */ + addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_GLOBAL); + global_offsize = qed_rd(p_hwfn, p_ptt, addr); + global_addr = SECTION_ADDR(global_offsize, 0); + addr = global_addr + offsetof(struct public_global, max_ports); + cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, addr); } static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -3355,7 +3343,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, return rc; } - qed_hw_info_port_num(p_hwfn, p_ptt); + if (IS_LEAD_HWFN(p_hwfn)) + qed_hw_info_port_num(p_hwfn, p_ptt); qed_mcp_get_capabilities(p_hwfn, p_ptt); @@ -4760,23 +4749,9 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); } -int qed_device_num_engines(struct qed_dev *cdev) -{ - return QED_IS_BB(cdev) ? 2 : 1; -} - -static int qed_device_num_ports(struct qed_dev *cdev) -{ - /* in CMT always only one port */ - if (cdev->num_hwfns > 1) - return 1; - - return cdev->num_ports_in_engine * qed_device_num_engines(cdev); -} - -int qed_device_get_port_id(struct qed_dev *cdev) +int qed_device_num_ports(struct qed_dev *cdev) { - return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); + return cdev->num_ports; } void qed_set_fw_mac_addr(__le16 *fw_msb, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 417121e74ee9..37edaa847512 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12796,6 +12796,7 @@ struct public_drv_mb { #define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 /* get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 #define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 #define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index beb8e5d6401a..ded556b7bab5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); + if (!ether_addr_equal(ethh->h_dest, + p_hwfn->p_rdma_info->iwarp.mac_addr)) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "Got unexpected mac %pM instead of %pM\n", + ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); + return -EINVAL; + } + ether_addr_copy(remote_mac_addr, ethh->h_source); ether_addr_copy(local_mac_addr, ethh->h_dest); @@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, struct qed_iwarp_info *iwarp_info; struct qed_ll2_acquire_data data; struct qed_ll2_cbs cbs; - u32 mpa_buff_size; + u32 buff_size; u16 n_ooo_bufs; int rc = 0; int i; @@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, memset(&data, 0, sizeof(data)); data.input.conn_type = QED_LL2_TYPE_IWARP; - data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; + data.input.mtu = params->max_mtu; data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ @@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, goto err; } + buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, QED_IWARP_LL2_SYN_RX_SIZE, - QED_IWARP_MAX_SYN_PKT_SIZE, + buff_size, iwarp_info->ll2_syn_handle); if (rc) goto err; @@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, if (rc) goto err; - mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, data.input.rx_num_desc, - mpa_buff_size, + buff_size, iwarp_info->ll2_mpa_handle); if (rc) goto err; @@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; - iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); + iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); if (!iwarp_info->mpa_intermediate_buf) goto err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index b8f612d00241..7ac959038324 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); #define QED_IWARP_LL2_SYN_TX_SIZE (128) #define QED_IWARP_LL2_SYN_RX_SIZE (256) -#define QED_IWARP_MAX_SYN_PKT_SIZE (128) #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) #define QED_IWARP_MAX_OOO (16) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index e68ca83ae915..57641728df69 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1898,6 +1898,7 @@ static void _qed_get_vport_stats(struct qed_dev *cdev, struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) : NULL; + bool b_get_port_stats; if (IS_PF(cdev)) { /* The main vport index is relative first */ @@ -1912,8 +1913,9 @@ static void _qed_get_vport_stats(struct qed_dev *cdev, continue; } + b_get_port_stats = IS_PF(cdev) && IS_LEAD_HWFN(p_hwfn); __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, - IS_PF(cdev) ? true : false); + b_get_port_stats); out: if (IS_PF(cdev) && p_ptt) @@ -2216,7 +2218,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, u16 num_queues = 0; /* Since the feature controls only queue-zones, - * make sure we have the contexts [rx, tx, xdp] to + * make sure we have the contexts [rx, xdp, tcs] to * match. */ for_each_hwfn(cdev, i) { @@ -2226,7 +2228,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, u16 cids; cids = hwfn->pf_params.eth_pf_params.num_cons; - num_queues += min_t(u16, l2_queues, cids / 3); + cids /= (2 + info->num_tc); + num_queues += min_t(u16, l2_queues, cids); } /* queues might theoretically be >256, but interrupts' @@ -2870,7 +2873,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) p_hwfn = p_cid->p_owner; rc = qed_get_queue_coalesce(p_hwfn, coal, handle); if (rc) - DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); + DP_VERBOSE(cdev, QED_MSG_DEBUG, + "Unable to read queue coalescing\n"); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index b47352643fb5..f164d4acebcb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -281,6 +281,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) dev_info->wol_support = true; + dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); + dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index bb8541847aa5..cc27fd60d689 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3654,6 +3654,12 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, } } +bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); +} + int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 mcp_resp; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 6e1d72a669ae..261c1a392e2c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -691,10 +691,6 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); rel_pfid) #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) -#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ - ((_p_hwfn)->cdev->num_ports_in_engine * \ - qed_device_num_engines((_p_hwfn)->cdev))) - struct qed_mcp_info { /* List for mailbox commands which were sent and wait for a response */ struct list_head cmd_list; @@ -1148,6 +1144,16 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, enum qed_resc_lock resource, bool b_is_permanent); + +/** + * @brief - Return whether management firmware support smart AN + * + * @param p_hwfn + * + * @return bool - true if feature is supported. + */ +bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn); + /** * @brief Learn of supported MFW features; To be done during early init * diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 5a90d69dc2f8..1302b308bd87 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -47,7 +47,7 @@ static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) { - switch (qed_device_get_port_id(p_hwfn->cdev)) { + switch (MFW_PORT(p_hwfn)) { case 0: return QED_RESC_LOCK_PTP_PORT0; case 1: diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 4179c9013fc6..96ab77ae6af5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn); * @param p_hwfn */ void qed_consq_free(struct qed_hwfn *p_hwfn); +int qed_spq_pend_post(struct qed_hwfn *p_hwfn); /** * @file diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 888274fa208b..5a495fda9e9d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn) p_ent->ramrod.pf_update.update_mf_vlan_flag = true; p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + p_ent->ramrod.pf_update.mf_vlan |= + cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); return qed_spq_post(p_hwfn, p_ent, NULL); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 3e0f7c46bb1b..79b311b86f66 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); + /* Attempt to post pending requests */ + spin_lock_bh(&p_hwfn->p_spq->lock); + rc = qed_spq_pend_post(p_hwfn); + spin_unlock_bh(&p_hwfn->p_spq->lock); + return rc; } @@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, return 0; } -static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) +int qed_spq_pend_post(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_ent = NULL; @@ -927,7 +932,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *tmp; struct qed_spq_entry *found = NULL; - int rc; if (!p_hwfn) return -EINVAL; @@ -985,12 +989,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, */ qed_spq_return_entry(p_hwfn, found); - /* Attempt to post pending requests */ - spin_lock_bh(&p_spq->lock); - rc = qed_spq_pend_post(p_hwfn); - spin_unlock_bh(&p_spq->lock); - - return rc; + return 0; } int qed_consq_alloc(struct qed_hwfn *p_hwfn) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 843416404aeb..63a78162cfaf 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -56,7 +56,7 @@ #include <net/tc_act/tc_gact.h> #define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 33 +#define QEDE_MINOR_VERSION 37 #define QEDE_REVISION_VERSION 0 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ @@ -497,6 +497,9 @@ struct qede_reload_args { /* Datapath functions definition */ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); +u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback); netdev_features_t qede_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 16331c6c6fa7..c6238083e898 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -186,11 +186,13 @@ static const struct { enum { QEDE_PRI_FLAG_CMT, + QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */ QEDE_PRI_FLAG_LEN, }; static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { "Coupled-Function", + "SmartAN capable", }; enum qede_ethtool_tests { @@ -404,8 +406,15 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) static u32 qede_get_priv_flags(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); + u32 flags = 0; - return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; + if (edev->dev_info.common.num_hwfns > 1) + flags |= BIT(QEDE_PRI_FLAG_CMT); + + if (edev->dev_info.common.smart_an) + flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT); + + return flags; } struct qede_link_mode_mapping { diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index b16ce7d93caf..add922b93d2c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1665,198 +1665,6 @@ static int qede_set_v6_tuple_to_profile(struct qede_dev *edev, return 0; } -static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - if ((fs->h_u.tcp_ip4_spec.ip4src & - fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) { - DP_INFO(edev, "Don't support IP-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip4_spec.ip4dst & - fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) { - DP_INFO(edev, "Don't support IP-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip4_spec.psrc & - fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip4_spec.pdst & - fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if (fs->h_u.tcp_ip4_spec.tos) { - DP_INFO(edev, "Don't support tos\n"); - return -EOPNOTSUPP; - } - - t->eth_proto = htons(ETH_P_IP); - t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src; - t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst; - t->src_port = fs->h_u.tcp_ip4_spec.psrc; - t->dst_port = fs->h_u.tcp_ip4_spec.pdst; - - return qede_set_v4_tuple_to_profile(edev, t); -} - -static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_TCP; - - if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_UDP; - - if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - struct in6_addr zero_addr; - - memset(&zero_addr, 0, sizeof(zero_addr)); - - if ((fs->h_u.tcp_ip6_spec.psrc & - fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip6_spec.pdst & - fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if (fs->h_u.tcp_ip6_spec.tclass) { - DP_INFO(edev, "Don't support tclass\n"); - return -EOPNOTSUPP; - } - - t->eth_proto = htons(ETH_P_IPV6); - memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src, - sizeof(struct in6_addr)); - memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst, - sizeof(struct in6_addr)); - t->src_port = fs->h_u.tcp_ip6_spec.psrc; - t->dst_port = fs->h_u.tcp_ip6_spec.pdst; - - return qede_set_v6_tuple_to_profile(edev, t, &zero_addr); -} - -static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_TCP; - - if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_UDP; - - if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - memset(t, 0, sizeof(*t)); - - if (qede_flow_spec_validate_unused(edev, fs)) - return -EOPNOTSUPP; - - switch ((fs->flow_type & ~FLOW_EXT)) { - case TCP_V4_FLOW: - return qede_flow_spec_to_tuple_tcpv4(edev, t, fs); - case UDP_V4_FLOW: - return qede_flow_spec_to_tuple_udpv4(edev, t, fs); - case TCP_V6_FLOW: - return qede_flow_spec_to_tuple_tcpv6(edev, t, fs); - case UDP_V6_FLOW: - return qede_flow_spec_to_tuple_udpv6(edev, t, fs); - default: - DP_VERBOSE(edev, NETIF_MSG_IFUP, - "Can't support flow of type %08x\n", fs->flow_type); - return -EOPNOTSUPP; - } - - return 0; -} - -static int qede_flow_spec_validate(struct qede_dev *edev, - struct ethtool_rx_flow_spec *fs, - struct qede_arfs_tuple *t) -{ - if (fs->location >= QEDE_RFS_MAX_FLTR) { - DP_INFO(edev, "Location out-of-bounds\n"); - return -EINVAL; - } - - /* Check location isn't already in use */ - if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) { - DP_INFO(edev, "Location already in use\n"); - return -EINVAL; - } - - /* Check if the filtering-mode could support the filter */ - if (edev->arfs->filter_count && - edev->arfs->mode != t->mode) { - DP_INFO(edev, - "flow_spec would require filtering mode %08x, but %08x is configured\n", - t->mode, edev->arfs->filter_count); - return -EINVAL; - } - - /* If drop requested then no need to validate other data */ - if (fs->ring_cookie == RX_CLS_FLOW_DISC) - return 0; - - if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie)) - return 0; - - if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) { - DP_INFO(edev, "Queue out-of-bounds\n"); - return -EINVAL; - } - - return 0; -} - /* Must be called while qede lock is held */ static struct qede_arfs_fltr_node * qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t) @@ -1896,72 +1704,6 @@ static void qede_flow_set_destination(struct qede_dev *edev, "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1); } -int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) -{ - struct ethtool_rx_flow_spec *fsp = &info->fs; - struct qede_arfs_fltr_node *n; - struct qede_arfs_tuple t; - int min_hlen, rc; - - __qede_lock(edev); - - if (!edev->arfs) { - rc = -EPERM; - goto unlock; - } - - /* Translate the flow specification into something fittign our DB */ - rc = qede_flow_spec_to_tuple(edev, &t, fsp); - if (rc) - goto unlock; - - /* Make sure location is valid and filter isn't already set */ - rc = qede_flow_spec_validate(edev, fsp, &t); - if (rc) - goto unlock; - - if (qede_flow_find_fltr(edev, &t)) { - rc = -EINVAL; - goto unlock; - } - - n = kzalloc(sizeof(*n), GFP_KERNEL); - if (!n) { - rc = -ENOMEM; - goto unlock; - } - - min_hlen = qede_flow_get_min_header_size(&t); - n->data = kzalloc(min_hlen, GFP_KERNEL); - if (!n->data) { - kfree(n); - rc = -ENOMEM; - goto unlock; - } - - n->sw_id = fsp->location; - set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); - n->buf_len = min_hlen; - - memcpy(&n->tuple, &t, sizeof(n->tuple)); - - qede_flow_set_destination(edev, n, fsp); - - /* Build a minimal header according to the flow */ - n->tuple.build_hdr(&n->tuple, n->data); - - rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); - if (rc) - goto unlock; - - qede_configure_arfs_fltr(edev, n, n->rxq_id, true); - rc = qede_poll_arfs_filter_config(edev, n); -unlock: - __qede_unlock(edev); - - return rc; -} - int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie) { struct qede_arfs_fltr_node *fltr = NULL; @@ -2004,190 +1746,172 @@ unlock: } static int qede_parse_actions(struct qede_dev *edev, - struct tcf_exts *exts) + struct flow_action *flow_action) { - int rc = -EINVAL, num_act = 0, i; - const struct tc_action *a; - bool is_drop = false; + const struct flow_action_entry *act; + int i; - if (!tcf_exts_has_actions(exts)) { - DP_NOTICE(edev, "No tc actions received\n"); - return rc; + if (!flow_action_has_entries(flow_action)) { + DP_NOTICE(edev, "No actions received\n"); + return -EINVAL; } - tcf_exts_for_each_action(i, a, exts) { - num_act++; + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: + break; + case FLOW_ACTION_QUEUE: + if (act->queue.vf) + break; - if (is_tcf_gact_shot(a)) - is_drop = true; + if (act->queue.index >= QEDE_RSS_COUNT(edev)) { + DP_INFO(edev, "Queue out-of-bounds\n"); + return -EINVAL; + } + break; + default: + return -EINVAL; + } } - if (num_act == 1 && is_drop) - return 0; - - return rc; + return 0; } static int -qede_tc_parse_ports(struct qede_dev *edev, - struct tc_cls_flower_offload *f, - struct qede_arfs_tuple *t) -{ - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); - - if ((key->src && mask->src != U16_MAX) || - (key->dst && mask->dst != U16_MAX)) { +qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule, + struct qede_arfs_tuple *t) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + if ((match.key->src && match.mask->src != U16_MAX) || + (match.key->dst && match.mask->dst != U16_MAX)) { DP_NOTICE(edev, "Do not support ports masks\n"); return -EINVAL; } - t->src_port = key->src; - t->dst_port = key->dst; + t->src_port = match.key->src; + t->dst_port = match.key->dst; } return 0; } static int -qede_tc_parse_v6_common(struct qede_dev *edev, - struct tc_cls_flower_offload *f, - struct qede_arfs_tuple *t) +qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule, + struct qede_arfs_tuple *t) { struct in6_addr zero_addr, addr; memset(&zero_addr, 0, sizeof(addr)); memset(&addr, 0xff, sizeof(addr)); - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { - struct flow_dissector_key_ipv6_addrs *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; - if ((memcmp(&key->src, &zero_addr, sizeof(addr)) && - memcmp(&mask->src, &addr, sizeof(addr))) || - (memcmp(&key->dst, &zero_addr, sizeof(addr)) && - memcmp(&mask->dst, &addr, sizeof(addr)))) { + flow_rule_match_ipv6_addrs(rule, &match); + if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) && + memcmp(&match.mask->src, &addr, sizeof(addr))) || + (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) && + memcmp(&match.mask->dst, &addr, sizeof(addr)))) { DP_NOTICE(edev, "Do not support IPv6 address prefix/mask\n"); return -EINVAL; } - memcpy(&t->src_ipv6, &key->src, sizeof(addr)); - memcpy(&t->dst_ipv6, &key->dst, sizeof(addr)); + memcpy(&t->src_ipv6, &match.key->src, sizeof(addr)); + memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr)); } - if (qede_tc_parse_ports(edev, f, t)) + if (qede_flow_parse_ports(edev, rule, t)) return -EINVAL; return qede_set_v6_tuple_to_profile(edev, t, &zero_addr); } static int -qede_tc_parse_v4_common(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *t) { - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { - struct flow_dissector_key_ipv4_addrs *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; - if ((key->src && mask->src != U32_MAX) || - (key->dst && mask->dst != U32_MAX)) { + flow_rule_match_ipv4_addrs(rule, &match); + if ((match.key->src && match.mask->src != U32_MAX) || + (match.key->dst && match.mask->dst != U32_MAX)) { DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n"); return -EINVAL; } - t->src_ipv4 = key->src; - t->dst_ipv4 = key->dst; + t->src_ipv4 = match.key->src; + t->dst_ipv4 = match.key->dst; } - if (qede_tc_parse_ports(edev, f, t)) + if (qede_flow_parse_ports(edev, rule, t)) return -EINVAL; return qede_set_v4_tuple_to_profile(edev, t); } static int -qede_tc_parse_tcp_v6(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_TCP; tuple->eth_proto = htons(ETH_P_IPV6); - return qede_tc_parse_v6_common(edev, f, tuple); + return qede_flow_parse_v6_common(edev, rule, tuple); } static int -qede_tc_parse_tcp_v4(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_TCP; tuple->eth_proto = htons(ETH_P_IP); - return qede_tc_parse_v4_common(edev, f, tuple); + return qede_flow_parse_v4_common(edev, rule, tuple); } static int -qede_tc_parse_udp_v6(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_UDP; tuple->eth_proto = htons(ETH_P_IPV6); - return qede_tc_parse_v6_common(edev, f, tuple); + return qede_flow_parse_v6_common(edev, rule, tuple); } static int -qede_tc_parse_udp_v4(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_UDP; tuple->eth_proto = htons(ETH_P_IP); - return qede_tc_parse_v4_common(edev, f, tuple); + return qede_flow_parse_v4_common(edev, rule, tuple); } static int -qede_parse_flower_attr(struct qede_dev *edev, __be16 proto, - struct tc_cls_flower_offload *f, - struct qede_arfs_tuple *tuple) +qede_parse_flow_attr(struct qede_dev *edev, __be16 proto, + struct flow_rule *rule, struct qede_arfs_tuple *tuple) { + struct flow_dissector *dissector = rule->match.dissector; int rc = -EINVAL; u8 ip_proto = 0; memset(tuple, 0, sizeof(*tuple)); - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS))) { DP_NOTICE(edev, "Unsupported key set:0x%x\n", - f->dissector->used_keys); + dissector->used_keys); return -EOPNOTSUPP; } @@ -2197,25 +1921,23 @@ qede_parse_flower_attr(struct qede_dev *edev, __be16 proto, return -EPROTONOSUPPORT; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - ip_proto = key->ip_proto; + flow_rule_match_basic(rule, &match); + ip_proto = match.key->ip_proto; } if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP)) - rc = qede_tc_parse_tcp_v4(edev, f, tuple); + rc = qede_flow_parse_tcp_v4(edev, rule, tuple); else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6)) - rc = qede_tc_parse_tcp_v6(edev, f, tuple); + rc = qede_flow_parse_tcp_v6(edev, rule, tuple); else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP)) - rc = qede_tc_parse_udp_v4(edev, f, tuple); + rc = qede_flow_parse_udp_v4(edev, rule, tuple); else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6)) - rc = qede_tc_parse_udp_v6(edev, f, tuple); + rc = qede_flow_parse_udp_v6(edev, rule, tuple); else - DP_NOTICE(edev, "Invalid tc protocol request\n"); + DP_NOTICE(edev, "Invalid protocol request\n"); return rc; } @@ -2235,7 +1957,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, } /* parse flower attribute and prepare filter */ - if (qede_parse_flower_attr(edev, proto, f, &t)) + if (qede_parse_flow_attr(edev, proto, f->rule, &t)) goto unlock; /* Validate profile mode and number of filters */ @@ -2248,7 +1970,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, } /* parse tc actions and get the vf_id */ - if (qede_parse_actions(edev, f->exts)) + if (qede_parse_actions(edev, &f->rule->action)) goto unlock; if (qede_flow_find_fltr(edev, &t)) { @@ -2290,3 +2012,141 @@ unlock: __qede_unlock(edev); return rc; } + +static int qede_flow_spec_validate(struct qede_dev *edev, + struct flow_action *flow_action, + struct qede_arfs_tuple *t, + __u32 location) +{ + if (location >= QEDE_RFS_MAX_FLTR) { + DP_INFO(edev, "Location out-of-bounds\n"); + return -EINVAL; + } + + /* Check location isn't already in use */ + if (test_bit(location, edev->arfs->arfs_fltr_bmap)) { + DP_INFO(edev, "Location already in use\n"); + return -EINVAL; + } + + /* Check if the filtering-mode could support the filter */ + if (edev->arfs->filter_count && + edev->arfs->mode != t->mode) { + DP_INFO(edev, + "flow_spec would require filtering mode %08x, but %08x is configured\n", + t->mode, edev->arfs->filter_count); + return -EINVAL; + } + + if (qede_parse_actions(edev, flow_action)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_rule(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_rx_flow_spec_input input = {}; + struct ethtool_rx_flow_rule *flow; + __be16 proto; + int err = 0; + + if (qede_flow_spec_validate_unused(edev, fs)) + return -EOPNOTSUPP; + + switch ((fs->flow_type & ~FLOW_EXT)) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + proto = htons(ETH_P_IP); + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + proto = htons(ETH_P_IPV6); + break; + default: + DP_VERBOSE(edev, NETIF_MSG_IFUP, + "Can't support flow of type %08x\n", fs->flow_type); + return -EOPNOTSUPP; + } + + input.fs = fs; + flow = ethtool_rx_flow_rule_create(&input); + if (IS_ERR(flow)) + return PTR_ERR(flow); + + if (qede_parse_flow_attr(edev, proto, flow->rule, t)) { + err = -EINVAL; + goto err_out; + } + + /* Make sure location is valid and filter isn't already set */ + err = qede_flow_spec_validate(edev, &flow->rule->action, t, + fs->location); +err_out: + ethtool_rx_flow_rule_destroy(flow); + return err; + +} + +int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + struct ethtool_rx_flow_spec *fsp = &info->fs; + struct qede_arfs_fltr_node *n; + struct qede_arfs_tuple t; + int min_hlen, rc; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + /* Translate the flow specification into something fittign our DB */ + rc = qede_flow_spec_to_rule(edev, &t, fsp); + if (rc) + goto unlock; + + if (qede_flow_find_fltr(edev, &t)) { + rc = -EINVAL; + goto unlock; + } + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) { + rc = -ENOMEM; + goto unlock; + } + + min_hlen = qede_flow_get_min_header_size(&t); + n->data = kzalloc(min_hlen, GFP_KERNEL); + if (!n->data) { + kfree(n); + rc = -ENOMEM; + goto unlock; + } + + n->sw_id = fsp->location; + set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); + n->buf_len = min_hlen; + + memcpy(&n->tuple, &t, sizeof(n->tuple)); + + qede_flow_set_destination(edev, n, fsp); + + /* Build a minimal header according to the flow */ + n->tuple.build_hdr(&n->tuple, n->data); + + rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); + if (rc) + goto unlock; + + qede_configure_arfs_fltr(edev, n, n->rxq_id, true); + rc = qede_poll_arfs_filter_config(edev, n); +unlock: + __qede_unlock(edev); + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index bdf816fe5a16..31b046e24565 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + struct qede_dev *edev = netdev_priv(dev); + int total_txq; + + total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; + + return QEDE_TSS_COUNT(edev) ? + fallback(dev, skb, NULL) % total_txq : 0; +} + /* 8B udp header + 8B base tunnel header + 32B option length */ #define QEDE_MAX_TUN_HDR_LEN 48 diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 6b4d96635238..02a97c659e29 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -621,6 +621,7 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -656,6 +657,7 @@ static const struct net_device_ops qede_netdev_vf_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -674,6 +676,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 5edbd532127d..a6886cc5654c 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -249,8 +249,8 @@ static void ql_update_stats(struct ql_adapter *qdev) spin_lock(&qdev->stats_lock); if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get xgmac sem.\n"); + netif_err(qdev, drv, qdev->ndev, + "Couldn't get xgmac sem.\n"); goto quit; } /* diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 059ba9429e51..096515c27263 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1159,10 +1159,10 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) map = lbq_desc->p.pg_chunk.map + lbq_desc->p.pg_chunk.offset; - dma_unmap_addr_set(lbq_desc, mapaddr, map); + dma_unmap_addr_set(lbq_desc, mapaddr, map); dma_unmap_len_set(lbq_desc, maplen, rx_ring->lbq_buf_size); - *lbq_desc->addr = cpu_to_le64(map); + *lbq_desc->addr = cpu_to_le64(map); pci_dma_sync_single_for_device(qdev->pdev, map, rx_ring->lbq_buf_size, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 8d790313ee3d..20d2400ad300 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1204,7 +1204,7 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q) if (tpbuf->skb) { pkts_compl++; bytes_compl += tpbuf->skb->len; - dev_kfree_skb_irq(tpbuf->skb); + dev_consume_skb_irq(tpbuf->skb); tpbuf->skb = NULL; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e8a112149a62..c29dde064078 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6192,7 +6192,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, struct device *d = tp_to_dev(tp); dma_addr_t mapping; u32 opts[2], len; - bool stop_queue; int frags; if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { @@ -6234,6 +6233,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); + netdev_sent_queue(dev, skb->len); + skb_tx_timestamp(skb); /* Force memory writes to complete before releasing descriptor */ @@ -6246,14 +6247,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->cur_tx += frags + 1; - stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); - if (unlikely(stop_queue)) - netif_stop_queue(dev); - - if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) - RTL_W8(tp, TxPoll, NPQ); + RTL_W8(tp, TxPoll, NPQ); - if (unlikely(stop_queue)) { + if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); /* Sync with rtl_tx: * - publish queue status and cur_tx ring index (write barrier) * - refresh dirty_tx ring index (read barrier). @@ -7110,6 +7111,30 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); } +static void rtl_read_mac_address(struct rtl8169_private *tp, + u8 mac_addr[ETH_ALEN]) +{ + u32 value; + + /* Get MAC address */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + value = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); + mac_addr[0] = (value >> 0) & 0xff; + mac_addr[1] = (value >> 8) & 0xff; + mac_addr[2] = (value >> 16) & 0xff; + mac_addr[3] = (value >> 24) & 0xff; + + value = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + mac_addr[4] = (value >> 0) & 0xff; + mac_addr[5] = (value >> 8) & 0xff; + break; + default: + break; + } +} + DECLARE_RTL_COND(rtl_link_list_ready_cond) { return RTL_R8(tp, MCU) & LINK_LIST_RDY; @@ -7301,6 +7326,8 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp) static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; + /* align to u16 for is_valid_ether_addr() */ + u8 mac_addr[ETH_ALEN] __aligned(2) = {}; struct rtl8169_private *tp; struct net_device *dev; int chipset, region, i; @@ -7403,20 +7430,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u64_stats_init(&tp->rx_stats.syncp); u64_stats_init(&tp->tx_stats.syncp); - /* Get MAC address */ - switch (tp->mac_version) { - u8 mac_addr[ETH_ALEN] __aligned(4); - case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); - *(u16 *)&mac_addr[4] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + /* get MAC address */ + rc = eth_platform_get_mac_address(&pdev->dev, mac_addr); + if (rc) + rtl_read_mac_address(tp, mac_addr); + + if (is_valid_ether_addr(mac_addr)) + rtl_rar_set(tp, mac_addr); - if (is_valid_ether_addr(mac_addr)) - rtl_rar_set(tp, mac_addr); - break; - default: - break; - } for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 748fb12260a6..2b2e1c4f0dc3 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -109,8 +109,6 @@ struct rocker_world_ops { int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port, unsigned long brport_flags, struct switchdev_trans *trans); - int (*port_attr_bridge_flags_get)(const struct rocker_port *rocker_port, - unsigned long *p_brport_flags); int (*port_attr_bridge_flags_support_get)(const struct rocker_port * rocker_port, unsigned long * diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 62a205eba9f7..309a6bf9130c 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1566,45 +1566,57 @@ static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port, } static int -rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, - unsigned long brport_flags, - struct switchdev_trans *trans) +rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port * + rocker_port, + unsigned long * + p_brport_flags_support) { struct rocker_world_ops *wops = rocker_port->rocker->wops; - if (!wops->port_attr_bridge_flags_set) + if (!wops->port_attr_bridge_flags_support_get) return -EOPNOTSUPP; - - if (switchdev_trans_ph_prepare(trans)) - return 0; - - return wops->port_attr_bridge_flags_set(rocker_port, brport_flags, - trans); + return wops->port_attr_bridge_flags_support_get(rocker_port, + p_brport_flags_support); } static int -rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, - unsigned long *p_brport_flags) +rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans) { struct rocker_world_ops *wops = rocker_port->rocker->wops; + unsigned long brport_flags_s; + int err; - if (!wops->port_attr_bridge_flags_get) + if (!wops->port_attr_bridge_flags_set) return -EOPNOTSUPP; - return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags); + + err = rocker_world_port_attr_bridge_flags_support_get(rocker_port, + &brport_flags_s); + if (err) + return err; + + if (brport_flags & ~brport_flags_s) + return -EINVAL; + + return 0; } static int -rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port * - rocker_port, - unsigned long * - p_brport_flags_support) +rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans) { struct rocker_world_ops *wops = rocker_port->rocker->wops; - if (!wops->port_attr_bridge_flags_support_get) + if (!wops->port_attr_bridge_flags_set) return -EOPNOTSUPP; - return wops->port_attr_bridge_flags_support_get(rocker_port, - p_brport_flags_support); + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + return wops->port_attr_bridge_flags_set(rocker_port, brport_flags, + trans); } static int @@ -2026,6 +2038,18 @@ static void rocker_port_neigh_destroy(struct net_device *dev, err); } +static int rocker_port_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + const struct rocker_port *rocker_port = netdev_priv(dev); + const struct rocker *rocker = rocker_port->rocker; + + ppid->id_len = sizeof(rocker->hw.id); + memcpy(&ppid->id, &rocker->hw.id, ppid->id_len); + + return 0; +} + static const struct net_device_ops rocker_port_netdev_ops = { .ndo_open = rocker_port_open, .ndo_stop = rocker_port_stop, @@ -2035,39 +2059,13 @@ static const struct net_device_ops rocker_port_netdev_ops = { .ndo_get_phys_port_name = rocker_port_get_phys_port_name, .ndo_change_proto_down = rocker_port_change_proto_down, .ndo_neigh_destroy = rocker_port_neigh_destroy, + .ndo_get_port_parent_id = rocker_port_get_port_parent_id, }; /******************** * swdev interface ********************/ -static int rocker_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - const struct rocker_port *rocker_port = netdev_priv(dev); - const struct rocker *rocker = rocker_port->rocker; - int err = 0; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(rocker->hw.id); - memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - err = rocker_world_port_attr_bridge_flags_get(rocker_port, - &attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: - err = rocker_world_port_attr_bridge_flags_support_get(rocker_port, - &attr->u.brport_flags_support); - break; - default: - return -EOPNOTSUPP; - } - - return err; -} - static int rocker_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) @@ -2081,6 +2079,11 @@ static int rocker_port_attr_set(struct net_device *dev, attr->u.stp_state, trans); break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port, + attr->u.brport_flags, + trans); + break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: err = rocker_world_port_attr_bridge_flags_set(rocker_port, attr->u.brport_flags, @@ -2140,7 +2143,6 @@ static int rocker_port_obj_del(struct net_device *dev, } static const struct switchdev_ops rocker_port_switchdev_ops = { - .switchdev_port_attr_get = rocker_port_attr_get, .switchdev_port_attr_set = rocker_port_attr_set, }; diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index bea7895930f6..fa296a7c255d 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2512,16 +2512,6 @@ static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port, } static int -ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, - unsigned long *p_brport_flags) -{ - const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; - - *p_brport_flags = ofdpa_port->brport_flags; - return 0; -} - -static int ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port * rocker_port, unsigned long * @@ -2823,7 +2813,6 @@ struct rocker_world_ops rocker_ofdpa_ops = { .port_stop = ofdpa_port_stop, .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set, .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set, - .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get, .port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get, .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set, .port_obj_vlan_add = ofdpa_port_obj_vlan_add, diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 6062e99fa69b..e888b479c596 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -6046,6 +6046,8 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0, 0, "sfc_dynamic_cfg_dflt" }, { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0, 0, "sfc_exp_rom_cfg_dflt" }, { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }, + { NVRAM_PARTITION_TYPE_BUNDLE, 0, 0, "sfc_bundle" }, + { NVRAM_PARTITION_TYPE_BUNDLE_METADATA, 0, 0, "sfc_bundle_metadata" }, }; #define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types) @@ -6123,7 +6125,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, static int efx_ef10_mtd_probe(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); - DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT); + DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; struct efx_mcdi_mtd_partition *parts; size_t outlen, n_parts_total, i, n_parts; unsigned int type; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3643015a55cf..bc655ffc9e02 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -915,7 +915,7 @@ rollback: void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) { - mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); + mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); } static bool efx_default_channel_want_txqs(struct efx_channel *channel) diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 3839eec783ea..20a5523bf9f3 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -6784,6 +6784,14 @@ * subset of the information stored in this partition. */ #define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00 +/* enum: Bundle image partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE 0x1e00 +/* enum: Bundle metadata partition that holds additional information related to + * a bundle update in TLV format + */ +#define NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01 +/* enum: Bundle update non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02 /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 396ff01298cd..8702ab44d80b 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -360,8 +360,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) rc = efx_init_rx_buffers(rx_queue, atomic); if (unlikely(rc)) { /* Ensure that we don't leave the rx queue empty */ - if (rx_queue->added_count == rx_queue->removed_count) - efx_schedule_slow_fill(rx_queue); + efx_schedule_slow_fill(rx_queue); goto out; } } while ((space -= batch_size) >= batch_size); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 22eb059086f7..06c8f282263f 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -471,7 +471,7 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, if (IS_ERR(segments)) return PTR_ERR(segments); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); skb = segments; while (skb) { diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 3140999642ba..358e66b81926 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -666,7 +666,7 @@ static inline void ioc3_tx(struct net_device *dev) packets++; skb = ip->tx_skbs[o_entry]; bytes += skb->len; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); ip->tx_skbs[o_entry] = NULL; o_entry = (o_entry + 1) & 127; /* Next */ diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 0e1b7e960b98..f1271402ca21 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -68,6 +68,8 @@ module_param(timeout, int, 0); * packets in and out, so there is place for a packet */ struct meth_private { + struct platform_device *pdev; + /* in-memory copy of MAC Control register */ u64 mac_ctrl; @@ -211,8 +213,8 @@ static void meth_check_link(struct net_device *dev) static int meth_init_tx_ring(struct meth_private *priv) { /* Init TX ring */ - priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, - &priv->tx_ring_dma, GFP_ATOMIC); + priv->tx_ring = dma_alloc_coherent(&priv->pdev->dev, + TX_RING_BUFFER_SIZE, &priv->tx_ring_dma, GFP_ATOMIC); if (!priv->tx_ring) return -ENOMEM; @@ -236,7 +238,7 @@ static int meth_init_rx_ring(struct meth_private *priv) priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); /* I'll need to re-sync it after each RX */ priv->rx_ring_dmas[i] = - dma_map_single(NULL, priv->rx_ring[i], + dma_map_single(&priv->pdev->dev, priv->rx_ring[i], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); mace->eth.rx_fifo = priv->rx_ring_dmas[i]; } @@ -253,7 +255,7 @@ static void meth_free_tx_ring(struct meth_private *priv) dev_kfree_skb(priv->tx_skbs[i]); priv->tx_skbs[i] = NULL; } - dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring, + dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring, priv->tx_ring_dma); } @@ -263,7 +265,7 @@ static void meth_free_rx_ring(struct meth_private *priv) int i; for (i = 0; i < RX_RING_ENTRIES; i++) { - dma_unmap_single(NULL, priv->rx_ring_dmas[i], + dma_unmap_single(&priv->pdev->dev, priv->rx_ring_dmas[i], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); priv->rx_ring[i] = 0; priv->rx_ring_dmas[i] = 0; @@ -393,7 +395,8 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) fifo_rptr = (fifo_rptr - 1) & 0x0f; } while (priv->rx_write != fifo_rptr) { - dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write], + dma_unmap_single(&priv->pdev->dev, + priv->rx_ring_dmas[priv->rx_write], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); status = priv->rx_ring[priv->rx_write]->status.raw; #if MFE_DEBUG @@ -454,7 +457,8 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head; priv->rx_ring[priv->rx_write]->status.raw = 0; priv->rx_ring_dmas[priv->rx_write] = - dma_map_single(NULL, priv->rx_ring[priv->rx_write], + dma_map_single(&priv->pdev->dev, + priv->rx_ring[priv->rx_write], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; ADVANCE_RX_PTR(priv->rx_write); @@ -521,7 +525,7 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) DPRINTK("RPTR points us here, but packet not done?\n"); break; } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); priv->tx_skbs[priv->tx_read] = NULL; priv->tx_ring[priv->tx_read].header.raw = 0; priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1); @@ -637,7 +641,7 @@ static void meth_tx_1page_prepare(struct meth_private *priv, } /* first page */ - catbuf = dma_map_single(NULL, buffer_data, buffer_len, + catbuf = dma_map_single(&priv->pdev->dev, buffer_data, buffer_len, DMA_TO_DEVICE); desc->data.cat_buf[0].form.start_addr = catbuf >> 3; desc->data.cat_buf[0].form.len = buffer_len - 1; @@ -663,12 +667,12 @@ static void meth_tx_2page_prepare(struct meth_private *priv, } /* first page */ - catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len, + catbuf1 = dma_map_single(&priv->pdev->dev, buffer1_data, buffer1_len, DMA_TO_DEVICE); desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3; desc->data.cat_buf[0].form.len = buffer1_len - 1; /* second page */ - catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len, + catbuf2 = dma_map_single(&priv->pdev->dev, buffer2_data, buffer2_len, DMA_TO_DEVICE); desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3; desc->data.cat_buf[1].form.len = buffer2_len - 1; @@ -840,6 +844,7 @@ static int meth_probe(struct platform_device *pdev) memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN); priv = netdev_priv(dev); + priv->pdev = pdev; spin_lock_init(&priv->meth_lock); SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 808cf9816673..5b351beb78cb 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -714,7 +714,7 @@ static void sis190_tx_interrupt(struct net_device *dev, sis190_unmap_tx_skb(tp->pci_dev, skb, txd); tp->Tx_skbuff[entry] = NULL; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } if (tp->dirty_tx != dirty_tx) { diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 4bb89f74742c..6073387511f8 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1927,7 +1927,7 @@ static void sis900_finish_xmit (struct net_device *net_dev) pci_unmap_single(sis_priv->pci_dev, sis_priv->tx_ring[entry].bufptr, skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); sis_priv->tx_skbuff[entry] = NULL; sis_priv->tx_ring[entry].bufptr = 0; sis_priv->tx_ring[entry].cmdsts = 0; diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 15c62c160953..be47d864f8b9 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep) skb = ep->tx_skbuff[entry]; pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); ep->tx_skbuff[entry] = NULL; } diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 8355dfbb8ec3..b550e624500d 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -1188,7 +1188,7 @@ smc911x_tx_dma_irq(void *data) DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n"); BUG_ON(skb == NULL); - dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); + dma_unmap_single(lp->dev, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); netif_trans_update(dev); dev_kfree_skb_irq(skb); lp->current_tx_skb = NULL; @@ -1219,7 +1219,7 @@ smc911x_rx_dma_irq(void *data) DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n"); - dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); + dma_unmap_single(lp->dev, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); BUG_ON(skb == NULL); lp->current_rx_skb = NULL; PRINT_PKT(skb->data, skb->len); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 20299f6f65fc..7fbb6a4dbf51 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) static int dwmac4_rx_check_timestamp(void *desc) { struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes0 = le32_to_cpu(p->des0); + unsigned int rdes1 = le32_to_cpu(p->des1); + unsigned int rdes3 = le32_to_cpu(p->des3); u32 own, ctxt; int ret = 1; - own = p->des3 & RDES3_OWN; - ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) + own = rdes3 & RDES3_OWN; + ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); if (likely(!own && ctxt)) { - if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) + if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) /* Corrupted value */ ret = -EINVAL; else @@ -268,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, int ret = -EINVAL; /* Get the status from normal w/b descriptor */ - if (likely(p->des3 & TDES3_RS1V)) { + if (likely(le32_to_cpu(p->des3) & RDES3_RDES1_VALID)) { if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) { int i = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 49f5687879df..545cb9c47433 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -124,9 +124,9 @@ void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan) int dwmac4_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan) { - int ret = 0; - u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); + u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + int ret = 0; /* ABNORMAL interrupts */ if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { @@ -151,16 +151,11 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, if (likely(intr_status & DMA_CHAN_STATUS_NIS)) { x->normal_irq_n++; if (likely(intr_status & DMA_CHAN_STATUS_RI)) { - u32 value; - - value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); - /* to schedule NAPI on real RIE event. */ - if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { - x->rx_normal_irq_n++; - ret |= handle_rx; - } + x->rx_normal_irq_n++; + ret |= handle_rx; } - if (likely(intr_status & DMA_CHAN_STATUS_TI)) { + if (likely(intr_status & (DMA_CHAN_STATUS_TI | + DMA_CHAN_STATUS_TBU))) { x->tx_normal_irq_n++; ret |= handle_tx; } @@ -168,12 +163,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, x->rx_early_irq++; } - /* Clear the interrupt by writing a logic 1 to the chanX interrupt - * status [21-0] expect reserved bits [5-3] - */ - writel((intr_status & 0x3fffc7), - ioaddr + DMA_CHAN_STATUS(chan)); - + writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan)); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index d6bb953685fa..37d5e6fe7473 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -193,9 +193,10 @@ #define XGMAC_AIE BIT(14) #define XGMAC_RBUE BIT(7) #define XGMAC_RIE BIT(6) +#define XGMAC_TBUE BIT(2) #define XGMAC_TIE BIT(0) #define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \ - XGMAC_RIE | XGMAC_TIE) + XGMAC_RIE | XGMAC_TBUE | XGMAC_TIE) #define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x))) #define XGMAC_RWT GENMASK(7, 0) #define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x))) @@ -204,6 +205,7 @@ #define XGMAC_FBE BIT(12) #define XGMAC_RBU BIT(7) #define XGMAC_RI BIT(6) +#define XGMAC_TBU BIT(2) #define XGMAC_TPS BIT(1) #define XGMAC_TI BIT(0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index c5e25580a43f..2ba712b48a89 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -283,12 +283,10 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, x->normal_irq_n++; if (likely(intr_status & XGMAC_RI)) { - if (likely(intr_en & XGMAC_RIE)) { - x->rx_normal_irq_n++; - ret |= handle_rx; - } + x->rx_normal_irq_n++; + ret |= handle_rx; } - if (likely(intr_status & XGMAC_TI)) { + if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { x->tx_normal_irq_n++; ret |= handle_tx; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index f9b42b0c20f4..dd95d959c1ce 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -79,11 +79,10 @@ struct stmmac_rx_queue { }; struct stmmac_channel { - struct napi_struct napi ____cacheline_aligned_in_smp; + struct napi_struct rx_napi ____cacheline_aligned_in_smp; + struct napi_struct tx_napi ____cacheline_aligned_in_smp; struct stmmac_priv *priv_data; u32 index; - int has_rx; - int has_tx; }; struct stmmac_tc_entry { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index d1f61c25d82b..3c749c327cbd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -696,33 +696,38 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, struct ethtool_eee *edata) { struct stmmac_priv *priv = netdev_priv(dev); + int ret; - priv->eee_enabled = edata->eee_enabled; - - if (!priv->eee_enabled) + if (!edata->eee_enabled) { stmmac_disable_eee_mode(priv); - else { + } else { /* We are asking for enabling the EEE but it is safe * to verify all by invoking the eee_init function. * In case of failure it will return an error. */ - priv->eee_enabled = stmmac_eee_init(priv); - if (!priv->eee_enabled) + edata->eee_enabled = stmmac_eee_init(priv); + if (!edata->eee_enabled) return -EOPNOTSUPP; - - /* Do not change tx_lpi_timer in case of failure */ - priv->tx_lpi_timer = edata->tx_lpi_timer; } - return phy_ethtool_set_eee(dev->phydev, edata); + ret = phy_ethtool_set_eee(dev->phydev, edata); + if (ret) + return ret; + + priv->eee_enabled = edata->eee_enabled; + priv->tx_lpi_timer = edata->tx_lpi_timer; + return 0; } static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) { unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); - if (!clk) - return 0; + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } return (usec * (clk / 1000000)) / 256; } @@ -731,8 +736,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) { unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); - if (!clk) - return 0; + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } return (riwt * 256) / (clk / 1000000); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 517555bcfa44..e2a13ec2e30b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -155,7 +155,10 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; - napi_disable(&ch->napi); + if (queue < rx_queues_cnt) + napi_disable(&ch->rx_napi); + if (queue < tx_queues_cnt) + napi_disable(&ch->tx_napi); } } @@ -173,7 +176,10 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; - napi_enable(&ch->napi); + if (queue < rx_queues_cnt) + napi_enable(&ch->rx_napi); + if (queue < tx_queues_cnt) + napi_enable(&ch->tx_napi); } } @@ -597,12 +603,13 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: /* PTP v1, UDP, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - /* take time stamp for all event messages */ - if (xmac) - snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; - else - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; - + /* 'xmac' hardware can support Sync, Pdelay_Req and + * Pdelay_resp by setting bit14 and bits17/16 to 01 + * This leaves Delay_Req timestamps out. + * Enable all events *and* general purpose message + * timestamping + */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; break; @@ -633,10 +640,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ - if (xmac) - snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; - else - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; @@ -669,12 +673,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) /* PTP v2/802.AS1 any layer, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; - /* take time stamp for all event messages */ - if (xmac) - snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; - else - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; - + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ptp_over_ethernet = PTP_TCR_TSIPENA; @@ -1962,6 +1961,10 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); } + /* We still have pending packets, let's call for a new scheduling */ + if (tx_q->dirty_tx != tx_q->cur_tx) + mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10)); + __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); return count; @@ -2052,23 +2055,15 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, &priv->xstats, chan); struct stmmac_channel *ch = &priv->channel[chan]; - bool needs_work = false; - - if ((status & handle_rx) && ch->has_rx) { - needs_work = true; - } else { - status &= ~handle_rx; - } - if ((status & handle_tx) && ch->has_tx) { - needs_work = true; - } else { - status &= ~handle_tx; + if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); + napi_schedule_irqoff(&ch->rx_napi); } - if (needs_work && napi_schedule_prep(&ch->napi)) { + if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { stmmac_disable_dma_irq(priv, priv->ioaddr, chan); - __napi_schedule(&ch->napi); + napi_schedule_irqoff(&ch->tx_napi); } return status; @@ -2264,8 +2259,14 @@ static void stmmac_tx_timer(struct timer_list *t) ch = &priv->channel[tx_q->queue_index]; - if (likely(napi_schedule_prep(&ch->napi))) - __napi_schedule(&ch->napi); + /* + * If NAPI is already running we can miss some events. Let's rearm + * the timer and try again. + */ + if (likely(napi_schedule_prep(&ch->tx_napi))) + __napi_schedule(&ch->tx_napi); + else + mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10)); } /** @@ -3046,10 +3047,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q = &priv->tx_queue[queue]; + if (priv->tx_path_in_lpi_mode) + stmmac_disable_eee_mode(priv); + /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) + if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + /* + * There is no way to determine the number of TSO + * capable Queues. Let's use always the Queue 0 + * because if TSO is supported then at least this + * one will be capable. + */ + skb_set_queue_mapping(skb, 0); + return stmmac_tso_xmit(skb, dev); + } } if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { @@ -3064,9 +3077,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - if (priv->tx_path_in_lpi_mode) - stmmac_disable_eee_mode(priv); - entry = tx_q->cur_tx; first_entry = entry; WARN_ON(tx_q->tx_skbuff[first_entry]); @@ -3512,7 +3522,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) else skb->ip_summed = CHECKSUM_UNNECESSARY; - napi_gro_receive(&ch->napi, skb); + napi_gro_receive(&ch->rx_napi, skb); priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; @@ -3527,40 +3537,45 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) return count; } -/** - * stmmac_poll - stmmac poll method (NAPI) - * @napi : pointer to the napi structure. - * @budget : maximum number of packets that the current CPU can receive from - * all interfaces. - * Description : - * To look at the incoming frames and clear the tx resources. - */ -static int stmmac_napi_poll(struct napi_struct *napi, int budget) +static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) { struct stmmac_channel *ch = - container_of(napi, struct stmmac_channel, napi); + container_of(napi, struct stmmac_channel, rx_napi); struct stmmac_priv *priv = ch->priv_data; - int work_done, rx_done = 0, tx_done = 0; u32 chan = ch->index; + int work_done; priv->xstats.napi_poll++; - if (ch->has_tx) - tx_done = stmmac_tx_clean(priv, budget, chan); - if (ch->has_rx) - rx_done = stmmac_rx(priv, budget, chan); + work_done = stmmac_rx(priv, budget, chan); + if (work_done < budget && napi_complete_done(napi, work_done)) + stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + return work_done; +} - work_done = max(rx_done, tx_done); - work_done = min(work_done, budget); +static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) +{ + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, tx_napi); + struct stmmac_priv *priv = ch->priv_data; + struct stmmac_tx_queue *tx_q; + u32 chan = ch->index; + int work_done; + + priv->xstats.napi_poll++; - if (work_done < budget && napi_complete_done(napi, work_done)) { - int stat; + work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan); + work_done = min(work_done, budget); + if (work_done < budget && napi_complete_done(napi, work_done)) stmmac_enable_dma_irq(priv, priv->ioaddr, chan); - stat = stmmac_dma_interrupt_status(priv, priv->ioaddr, - &priv->xstats, chan); - if (stat && napi_reschedule(napi)) - stmmac_disable_dma_irq(priv, priv->ioaddr, chan); + + /* Force transmission restart */ + tx_q = &priv->tx_queue[chan]; + if (tx_q->cur_tx != tx_q->dirty_tx) { + stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, + chan); } return work_done; @@ -4340,13 +4355,14 @@ int stmmac_dvr_probe(struct device *device, ch->priv_data = priv; ch->index = queue; - if (queue < priv->plat->rx_queues_to_use) - ch->has_rx = true; - if (queue < priv->plat->tx_queues_to_use) - ch->has_tx = true; - - netif_napi_add(ndev, &ch->napi, stmmac_napi_poll, - NAPI_POLL_WEIGHT); + if (queue < priv->plat->rx_queues_to_use) { + netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx, + NAPI_POLL_WEIGHT); + } + if (queue < priv->plat->tx_queues_to_use) { + netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx, + NAPI_POLL_WEIGHT); + } } mutex_init(&priv->lock); @@ -4402,7 +4418,10 @@ error_mdio_register: for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; - netif_napi_del(&ch->napi); + if (queue < priv->plat->rx_queues_to_use) + netif_napi_del(&ch->rx_napi); + if (queue < priv->plat->tx_queues_to_use) + netif_napi_del(&ch->tx_napi); } error_hw_init: destroy_workqueue(priv->wq); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index ecccf895fd7e..e852821289cf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -59,9 +59,14 @@ #define PTP_TCR_TSEVNTENA BIT(14) /* Enable Snapshot for Messages Relevant to Master */ #define PTP_TCR_TSMSTRENA BIT(15) -/* Select PTP packets for Taking Snapshots */ +/* Select PTP packets for Taking Snapshots + * On gmac4 specifically: + * Enable SYNC, Pdelay_Req, Pdelay_Resp when TSEVNTENA is enabled. + * or + * Enable SYNC, Follow_Up, Delay_Req, Delay_Resp, Pdelay_Req, Pdelay_Resp, + * Pdelay_Resp_Follow_Up if TSEVNTENA is disabled + */ #define PTP_TCR_SNAPTYPSEL_1 BIT(16) -#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16) /* Enable MAC address for PTP Frame Filtering */ #define PTP_TCR_TSENMACADDR BIT(18) diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 7ec4eb74fe21..6fc05c106afc 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1898,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) cp->net_stats[ring].tx_packets++; cp->net_stats[ring].tx_bytes += skb->len; spin_unlock(&cp->stat_lock[ring]); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } cp->tx_old[ring] = entry; diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 720b7ac77f3b..e9b757b03b56 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp) DTX(("skb(%p) ", skb)); bp->tx_skbs[elem] = NULL; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); elem = NEXT_TX(elem); } diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index ff641cf30a4e..d007dfeba5c3 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp) this = &txbase[elem]; } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); dev->stats.tx_packets++; } hp->tx_old = elem; diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index dc966ddb6d81..b24c11187017 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv) tx_level -= db->rptr->len; /* '-' koz len is negative */ /* now should come skb pointer - free it */ - dev_kfree_skb_irq(db->rptr->addr.skb); + dev_consume_skb_irq(db->rptr->addr.skb); bdx_tx_db_inc_rptr(db); } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1f612268c998..d847f672a705 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device, const char *name; char node_name[32]; - if (of_property_read_string(node, "label", &name) < 0) { + if (of_property_read_string(child, "label", &name) < 0) { snprintf(node_name, sizeof(node_name), "%pOFn", child); name = node_name; } diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 82412691ee66..27f6cf140845 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], le16_to_cpu(pktlen), DMA_TO_DEVICE); } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); tdinfo->skb = NULL; } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 15bb058db392..44efffbe7970 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -630,7 +630,7 @@ static void temac_start_xmit_done(struct net_device *ndev) dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len, DMA_TO_DEVICE); if (cur_p->app4) - dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); + dev_consume_skb_irq((struct sk_buff *)cur_p->app4); cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 0789d8af7d72..ec7e7ec24ff9 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -595,7 +595,7 @@ static void axienet_start_xmit_done(struct net_device *ndev) (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); if (cur_p->app4) - dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); + dev_consume_skb_irq((struct sk_buff *)cur_p->app4); /*cur_p->phys = 0;*/ cur_p->app0 = 0; cur_p->app1 = 0; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 639e3e99af46..b03a417d0073 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -581,7 +581,7 @@ static void xemaclite_tx_handler(struct net_device *dev) return; dev->stats.tx_bytes += lp->deferred_skb->len; - dev_kfree_skb_irq(lp->deferred_skb); + dev_consume_skb_irq(lp->deferred_skb); lp->deferred_skb = NULL; netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index aee55c03def0..ed6623a9801e 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -139,7 +139,7 @@ #ifdef __ARMEB__ typedef struct sk_buff buffer_t; #define free_buffer dev_kfree_skb -#define free_buffer_irq dev_kfree_skb_irq +#define free_buffer_irq dev_consume_skb_irq #else typedef void buffer_t; #define free_buffer kfree diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 38ac8ef41f5f..56b7791911bf 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp) bp->descr_block_virt->xmt_data[comp].long_1, p_xmt_drv_descr->p_skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); + dev_consume_skb_irq(p_xmt_drv_descr->p_skb); /* * Move to start of next packet by updating completion index diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c index 6ef44c480bd5..f29f5a6a45ab 100644 --- a/drivers/net/fddi/skfp/pcmplc.c +++ b/drivers/net/fddi/skfp/pcmplc.c @@ -851,6 +851,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd) case ACTIONS(PC5_SIGNAL) : ACTIONS_DONE() ; + /* fall through */ case PC5_SIGNAL : if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT)) break ; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 58bbba8582b0..3377ac66a347 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1512,9 +1512,13 @@ static void geneve_link_config(struct net_device *dev, } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { - struct rt6_info *rt = rt6_lookup(geneve->net, - &info->key.u.ipv6.dst, NULL, 0, - NULL, 0); + struct rt6_info *rt; + + if (!__in6_dev_get(dev)) + break; + + rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0, + NULL, 0); if (rt && rt->dst.dev) ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index 190f66c88479..ed0841630990 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -203,32 +203,6 @@ static inline void ser12_set_divisor(struct net_device *dev, */ } -/* --------------------------------------------------------------------- */ - -#if 0 -static inline unsigned int hweight16(unsigned int w) - __attribute__ ((unused)); -static inline unsigned int hweight8(unsigned int w) - __attribute__ ((unused)); - -static inline unsigned int hweight16(unsigned int w) -{ - unsigned short res = (w & 0x5555) + ((w >> 1) & 0x5555); - res = (res & 0x3333) + ((res >> 2) & 0x3333); - res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F); - return (res & 0x00FF) + ((res >> 8) & 0x00FF); -} - -static inline unsigned int hweight8(unsigned int w) -{ - unsigned short res = (w & 0x55) + ((w >> 1) & 0x55); - res = (res & 0x33) + ((res >> 2) & 0x33); - return (res & 0x0F) + ((res >> 4) & 0x0F); -} -#endif - -/* --------------------------------------------------------------------- */ - static __inline__ void ser12_rx(struct net_device *dev, struct baycom_state *bc, struct timespec64 *ts, unsigned char curs) { int timediff; diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 44de81e5f140..c589f5ae75bb 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context) } break; case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): - /* rx is starting */ - dev_dbg(printdev(lp), "RX is starting\n"); - mcr20a_handle_rx(lp); + /* rx is starting */ + dev_dbg(printdev(lp), "RX is starting\n"); + mcr20a_handle_rx(lp); break; case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): if (lp->is_tx) { diff --git a/drivers/net/ipvlan/Makefile b/drivers/net/ipvlan/Makefile index 8a2c64dc9641..3ee95367a994 100644 --- a/drivers/net/ipvlan/Makefile +++ b/drivers/net/ipvlan/Makefile @@ -5,4 +5,5 @@ obj-$(CONFIG_IPVLAN) += ipvlan.o obj-$(CONFIG_IPVTAP) += ipvtap.o -ipvlan-objs := ipvlan_core.o ipvlan_main.o +ipvlan-objs-$(CONFIG_IPVLAN_L3S) += ipvlan_l3s.o +ipvlan-objs := ipvlan_core.o ipvlan_main.o $(ipvlan-objs-y) diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index adb826f55e60..b906d2f6bd04 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -165,10 +165,9 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, const void *iaddr, bool is_v6); bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); void ipvlan_ht_addr_del(struct ipvl_addr *addr); -struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb, - u16 proto); -unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state); +struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, + int addr_type, bool use_dest); +void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type); void ipvlan_count_rx(const struct ipvl_dev *ipvlan, unsigned int len, bool success, bool mcast); int ipvlan_link_new(struct net *src_net, struct net_device *dev, @@ -177,6 +176,36 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, void ipvlan_link_delete(struct net_device *dev, struct list_head *head); void ipvlan_link_setup(struct net_device *dev); int ipvlan_link_register(struct rtnl_link_ops *ops); +#ifdef CONFIG_IPVLAN_L3S +int ipvlan_l3s_register(struct ipvl_port *port); +void ipvlan_l3s_unregister(struct ipvl_port *port); +void ipvlan_migrate_l3s_hook(struct net *oldnet, struct net *newnet); +int ipvlan_l3s_init(void); +void ipvlan_l3s_cleanup(void); +#else +static inline int ipvlan_l3s_register(struct ipvl_port *port) +{ + return -ENOTSUPP; +} + +static inline void ipvlan_l3s_unregister(struct ipvl_port *port) +{ +} + +static inline void ipvlan_migrate_l3s_hook(struct net *oldnet, + struct net *newnet) +{ +} + +static inline int ipvlan_l3s_init(void) +{ + return 0; +} + +static inline void ipvlan_l3s_cleanup(void) +{ +} +#endif /* CONFIG_IPVLAN_L3S */ static inline bool netif_is_ipvlan_port(const struct net_device *dev) { diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 1a8132eb2a3e..e0f5bc82b10c 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -138,7 +138,7 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6) return ret; } -static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type) +void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type) { void *lyr3h = NULL; @@ -355,9 +355,8 @@ out: return ret; } -static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, - void *lyr3h, int addr_type, - bool use_dest) +struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, + int addr_type, bool use_dest) { struct ipvl_addr *addr = NULL; @@ -647,7 +646,9 @@ int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) case IPVLAN_MODE_L2: return ipvlan_xmit_mode_l2(skb, dev); case IPVLAN_MODE_L3: +#ifdef CONFIG_IPVLAN_L3S case IPVLAN_MODE_L3S: +#endif return ipvlan_xmit_mode_l3(skb, dev); } @@ -743,8 +744,10 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) return ipvlan_handle_mode_l2(pskb, port); case IPVLAN_MODE_L3: return ipvlan_handle_mode_l3(pskb, port); +#ifdef CONFIG_IPVLAN_L3S case IPVLAN_MODE_L3S: return RX_HANDLER_PASS; +#endif } /* Should not reach here */ @@ -753,97 +756,3 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) kfree_skb(skb); return RX_HANDLER_CONSUMED; } - -static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb, - struct net_device *dev) -{ - struct ipvl_addr *addr = NULL; - struct ipvl_port *port; - void *lyr3h; - int addr_type; - - if (!dev || !netif_is_ipvlan_port(dev)) - goto out; - - port = ipvlan_port_get_rcu(dev); - if (!port || port->mode != IPVLAN_MODE_L3S) - goto out; - - lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); - if (!lyr3h) - goto out; - - addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); -out: - return addr; -} - -struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb, - u16 proto) -{ - struct ipvl_addr *addr; - struct net_device *sdev; - - addr = ipvlan_skb_to_addr(skb, dev); - if (!addr) - goto out; - - sdev = addr->master->dev; - switch (proto) { - case AF_INET: - { - int err; - struct iphdr *ip4h = ip_hdr(skb); - - err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr, - ip4h->tos, sdev); - if (unlikely(err)) - goto out; - break; - } -#if IS_ENABLED(CONFIG_IPV6) - case AF_INET6: - { - struct dst_entry *dst; - struct ipv6hdr *ip6h = ipv6_hdr(skb); - int flags = RT6_LOOKUP_F_HAS_SADDR; - struct flowi6 fl6 = { - .flowi6_iif = sdev->ifindex, - .daddr = ip6h->daddr, - .saddr = ip6h->saddr, - .flowlabel = ip6_flowinfo(ip6h), - .flowi6_mark = skb->mark, - .flowi6_proto = ip6h->nexthdr, - }; - - skb_dst_drop(skb); - dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, - skb, flags); - skb_dst_set(skb, dst); - break; - } -#endif - default: - break; - } - -out: - return skb; -} - -unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - struct ipvl_addr *addr; - unsigned int len; - - addr = ipvlan_skb_to_addr(skb, skb->dev); - if (!addr) - goto out; - - skb->dev = addr->master->dev; - len = skb->len + ETH_HLEN; - ipvlan_count_rx(addr->master, len, true, false); -out: - return NF_ACCEPT; -} diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c new file mode 100644 index 000000000000..d17480a911a3 --- /dev/null +++ b/drivers/net/ipvlan/ipvlan_l3s.c @@ -0,0 +1,227 @@ +/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include "ipvlan.h" + +static unsigned int ipvlan_netid __read_mostly; + +struct ipvlan_netns { + unsigned int ipvl_nf_hook_refcnt; +}; + +static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb, + struct net_device *dev) +{ + struct ipvl_addr *addr = NULL; + struct ipvl_port *port; + int addr_type; + void *lyr3h; + + if (!dev || !netif_is_ipvlan_port(dev)) + goto out; + + port = ipvlan_port_get_rcu(dev); + if (!port || port->mode != IPVLAN_MODE_L3S) + goto out; + + lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); + if (!lyr3h) + goto out; + + addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); +out: + return addr; +} + +static struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, + struct sk_buff *skb, u16 proto) +{ + struct ipvl_addr *addr; + struct net_device *sdev; + + addr = ipvlan_skb_to_addr(skb, dev); + if (!addr) + goto out; + + sdev = addr->master->dev; + switch (proto) { + case AF_INET: + { + struct iphdr *ip4h = ip_hdr(skb); + int err; + + err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr, + ip4h->tos, sdev); + if (unlikely(err)) + goto out; + break; + } +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + { + struct dst_entry *dst; + struct ipv6hdr *ip6h = ipv6_hdr(skb); + int flags = RT6_LOOKUP_F_HAS_SADDR; + struct flowi6 fl6 = { + .flowi6_iif = sdev->ifindex, + .daddr = ip6h->daddr, + .saddr = ip6h->saddr, + .flowlabel = ip6_flowinfo(ip6h), + .flowi6_mark = skb->mark, + .flowi6_proto = ip6h->nexthdr, + }; + + skb_dst_drop(skb); + dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, + skb, flags); + skb_dst_set(skb, dst); + break; + } +#endif + default: + break; + } +out: + return skb; +} + +static const struct l3mdev_ops ipvl_l3mdev_ops = { + .l3mdev_l3_rcv = ipvlan_l3_rcv, +}; + +static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct ipvl_addr *addr; + unsigned int len; + + addr = ipvlan_skb_to_addr(skb, skb->dev); + if (!addr) + goto out; + + skb->dev = addr->master->dev; + len = skb->len + ETH_HLEN; + ipvlan_count_rx(addr->master, len, true, false); +out: + return NF_ACCEPT; +} + +static const struct nf_hook_ops ipvl_nfops[] = { + { + .hook = ipvlan_nf_input, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_LOCAL_IN, + .priority = INT_MAX, + }, +#if IS_ENABLED(CONFIG_IPV6) + { + .hook = ipvlan_nf_input, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_LOCAL_IN, + .priority = INT_MAX, + }, +#endif +}; + +static int ipvlan_register_nf_hook(struct net *net) +{ + struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); + int err = 0; + + if (!vnet->ipvl_nf_hook_refcnt) { + err = nf_register_net_hooks(net, ipvl_nfops, + ARRAY_SIZE(ipvl_nfops)); + if (!err) + vnet->ipvl_nf_hook_refcnt = 1; + } else { + vnet->ipvl_nf_hook_refcnt++; + } + + return err; +} + +static void ipvlan_unregister_nf_hook(struct net *net) +{ + struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); + + if (WARN_ON(!vnet->ipvl_nf_hook_refcnt)) + return; + + vnet->ipvl_nf_hook_refcnt--; + if (!vnet->ipvl_nf_hook_refcnt) + nf_unregister_net_hooks(net, ipvl_nfops, + ARRAY_SIZE(ipvl_nfops)); +} + +void ipvlan_migrate_l3s_hook(struct net *oldnet, struct net *newnet) +{ + struct ipvlan_netns *old_vnet; + + ASSERT_RTNL(); + + old_vnet = net_generic(oldnet, ipvlan_netid); + if (!old_vnet->ipvl_nf_hook_refcnt) + return; + + ipvlan_register_nf_hook(newnet); + ipvlan_unregister_nf_hook(oldnet); +} + +static void ipvlan_ns_exit(struct net *net) +{ + struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); + + if (WARN_ON_ONCE(vnet->ipvl_nf_hook_refcnt)) { + vnet->ipvl_nf_hook_refcnt = 0; + nf_unregister_net_hooks(net, ipvl_nfops, + ARRAY_SIZE(ipvl_nfops)); + } +} + +static struct pernet_operations ipvlan_net_ops = { + .id = &ipvlan_netid, + .size = sizeof(struct ipvlan_netns), + .exit = ipvlan_ns_exit, +}; + +int ipvlan_l3s_init(void) +{ + return register_pernet_subsys(&ipvlan_net_ops); +} + +void ipvlan_l3s_cleanup(void) +{ + unregister_pernet_subsys(&ipvlan_net_ops); +} + +int ipvlan_l3s_register(struct ipvl_port *port) +{ + struct net_device *dev = port->dev; + int ret; + + ASSERT_RTNL(); + + ret = ipvlan_register_nf_hook(read_pnet(&port->pnet)); + if (!ret) { + dev->l3mdev_ops = &ipvl_l3mdev_ops; + dev->priv_flags |= IFF_L3MDEV_RX_HANDLER; + } + + return ret; +} + +void ipvlan_l3s_unregister(struct ipvl_port *port) +{ + struct net_device *dev = port->dev; + + ASSERT_RTNL(); + + dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; + ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); + dev->l3mdev_ops = NULL; +} diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 19bdde60680c..8ec73d973079 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -9,73 +9,10 @@ #include "ipvlan.h" -static unsigned int ipvlan_netid __read_mostly; - -struct ipvlan_netns { - unsigned int ipvl_nf_hook_refcnt; -}; - -static const struct nf_hook_ops ipvl_nfops[] = { - { - .hook = ipvlan_nf_input, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_IN, - .priority = INT_MAX, - }, -#if IS_ENABLED(CONFIG_IPV6) - { - .hook = ipvlan_nf_input, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_IN, - .priority = INT_MAX, - }, -#endif -}; - -static const struct l3mdev_ops ipvl_l3mdev_ops = { - .l3mdev_l3_rcv = ipvlan_l3_rcv, -}; - -static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev) -{ - ipvlan->dev->mtu = dev->mtu; -} - -static int ipvlan_register_nf_hook(struct net *net) -{ - struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); - int err = 0; - - if (!vnet->ipvl_nf_hook_refcnt) { - err = nf_register_net_hooks(net, ipvl_nfops, - ARRAY_SIZE(ipvl_nfops)); - if (!err) - vnet->ipvl_nf_hook_refcnt = 1; - } else { - vnet->ipvl_nf_hook_refcnt++; - } - - return err; -} - -static void ipvlan_unregister_nf_hook(struct net *net) -{ - struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); - - if (WARN_ON(!vnet->ipvl_nf_hook_refcnt)) - return; - - vnet->ipvl_nf_hook_refcnt--; - if (!vnet->ipvl_nf_hook_refcnt) - nf_unregister_net_hooks(net, ipvl_nfops, - ARRAY_SIZE(ipvl_nfops)); -} - static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval, struct netlink_ext_ack *extack) { struct ipvl_dev *ipvlan; - struct net_device *mdev = port->dev; unsigned int flags; int err; @@ -97,17 +34,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval, } if (nval == IPVLAN_MODE_L3S) { /* New mode is L3S */ - err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); - if (!err) { - mdev->l3mdev_ops = &ipvl_l3mdev_ops; - mdev->priv_flags |= IFF_L3MDEV_MASTER; - } else + err = ipvlan_l3s_register(port); + if (err) goto fail; } else if (port->mode == IPVLAN_MODE_L3S) { /* Old mode was L3S */ - mdev->priv_flags &= ~IFF_L3MDEV_MASTER; - ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); - mdev->l3mdev_ops = NULL; + ipvlan_l3s_unregister(port); } port->mode = nval; } @@ -166,11 +98,8 @@ static void ipvlan_port_destroy(struct net_device *dev) struct ipvl_port *port = ipvlan_port_get_rtnl(dev); struct sk_buff *skb; - if (port->mode == IPVLAN_MODE_L3S) { - dev->priv_flags &= ~IFF_L3MDEV_MASTER; - ipvlan_unregister_nf_hook(dev_net(dev)); - dev->l3mdev_ops = NULL; - } + if (port->mode == IPVLAN_MODE_L3S) + ipvlan_l3s_unregister(port); netdev_rx_handler_unregister(dev); cancel_work_sync(&port->wq); while ((skb = __skb_dequeue(&port->backlog)) != NULL) { @@ -446,6 +375,11 @@ static const struct header_ops ipvlan_header_ops = { .cache_update = eth_header_cache_update, }; +static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev) +{ + ipvlan->dev->mtu = dev->mtu; +} + static bool netif_is_ipvlan(const struct net_device *dev) { /* both ipvlan and ipvtap devices use the same netdev_ops */ @@ -781,7 +715,6 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_REGISTER: { struct net *oldnet, *newnet = dev_net(dev); - struct ipvlan_netns *old_vnet; oldnet = read_pnet(&port->pnet); if (net_eq(newnet, oldnet)) @@ -789,12 +722,7 @@ static int ipvlan_device_event(struct notifier_block *unused, write_pnet(&port->pnet, newnet); - old_vnet = net_generic(oldnet, ipvlan_netid); - if (!old_vnet->ipvl_nf_hook_refcnt) - break; - - ipvlan_register_nf_hook(newnet); - ipvlan_unregister_nf_hook(oldnet); + ipvlan_migrate_l3s_hook(oldnet, newnet); break; } case NETDEV_UNREGISTER: @@ -1068,23 +996,6 @@ static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly = { }; #endif -static void ipvlan_ns_exit(struct net *net) -{ - struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); - - if (WARN_ON_ONCE(vnet->ipvl_nf_hook_refcnt)) { - vnet->ipvl_nf_hook_refcnt = 0; - nf_unregister_net_hooks(net, ipvl_nfops, - ARRAY_SIZE(ipvl_nfops)); - } -} - -static struct pernet_operations ipvlan_net_ops = { - .id = &ipvlan_netid, - .size = sizeof(struct ipvlan_netns), - .exit = ipvlan_ns_exit, -}; - static int __init ipvlan_init_module(void) { int err; @@ -1099,13 +1010,13 @@ static int __init ipvlan_init_module(void) register_inetaddr_notifier(&ipvlan_addr4_notifier_block); register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block); - err = register_pernet_subsys(&ipvlan_net_ops); + err = ipvlan_l3s_init(); if (err < 0) goto error; err = ipvlan_link_register(&ipvlan_link_ops); if (err < 0) { - unregister_pernet_subsys(&ipvlan_net_ops); + ipvlan_l3s_cleanup(); goto error; } @@ -1126,7 +1037,7 @@ error: static void __exit ipvlan_cleanup_module(void) { rtnl_link_unregister(&ipvlan_link_ops); - unregister_pernet_subsys(&ipvlan_net_ops); + ipvlan_l3s_cleanup(); unregister_netdevice_notifier(&ipvlan_notifier_block); unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); unregister_inetaddr_validator_notifier( diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 172b271c8bd2..f92c43453ec6 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -248,7 +248,7 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) static int nsim_bpf_verifier_prep(struct bpf_prog *prog) { - struct netdevsim *ns = netdev_priv(prog->aux->offload->netdev); + struct netdevsim *ns = bpf_offload_dev_priv(prog->aux->offload->offdev); if (!ns->bpf_bind_accept) return -EOPNOTSUPP; @@ -589,7 +589,8 @@ int nsim_bpf_init(struct netdevsim *ns) if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs)) return -ENOMEM; - ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops); + ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, + ns); err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev); if (err) return err; diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 8d8e2b3f263e..75a50b59cb8f 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -22,7 +22,6 @@ #include <net/netlink.h> #include <net/pkt_cls.h> #include <net/rtnetlink.h> -#include <net/switchdev.h> #include "netdevsim.h" @@ -148,26 +147,16 @@ static struct device_type nsim_dev_type = { .release = nsim_dev_release, }; -static int -nsim_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) +static int nsim_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct netdevsim *ns = netdev_priv(dev); - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(ns->sdev->switch_id); - memcpy(&attr->u.ppid.id, &ns->sdev->switch_id, - attr->u.ppid.id_len); - return 0; - default: - return -EOPNOTSUPP; - } + ppid->id_len = sizeof(ns->sdev->switch_id); + memcpy(&ppid->id, &ns->sdev->switch_id, ppid->id_len); + return 0; } -static const struct switchdev_ops nsim_switchdev_ops = { - .switchdev_port_attr_get = nsim_port_attr_get, -}; - static int nsim_init(struct net_device *dev) { char sdev_ddir_name[10], sdev_link_name[32]; @@ -214,7 +203,6 @@ static int nsim_init(struct net_device *dev) goto err_bpf_uninit; SET_NETDEV_DEV(dev, &ns->dev); - SWITCHDEV_SET_OPS(dev, &nsim_switchdev_ops); err = nsim_devlink_setup(ns); if (err) @@ -493,6 +481,7 @@ static const struct net_device_ops nsim_netdev_ops = { .ndo_setup_tc = nsim_setup_tc, .ndo_set_features = nsim_set_features, .ndo_bpf = nsim_bpf, + .ndo_get_port_parent_id = nsim_get_port_parent_id, }; static void nsim_setup(struct net_device *dev) diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c index 482004efa62d..0f772a47a18e 100644 --- a/drivers/net/phy/aquantia.c +++ b/drivers/net/phy/aquantia.c @@ -17,6 +17,7 @@ #define PHY_ID_AQR105 0x03a1b4a2 #define PHY_ID_AQR106 0x03a1b4d0 #define PHY_ID_AQR107 0x03a1b4e0 +#define PHY_ID_AQCS109 0x03a1b5c2 #define PHY_ID_AQR405 0x03a1b4b0 #define MDIO_AN_TX_VEND_STATUS1 0xc800 @@ -203,6 +204,16 @@ static struct phy_driver aqr_driver[] = { .read_status = aqr_read_status, }, { + PHY_ID_MATCH_MODEL(PHY_ID_AQCS109), + .name = "Aquantia AQCS109", + .features = PHY_10GBIT_FULL_FEATURES, + .aneg_done = genphy_c45_aneg_done, + .config_aneg = aqr_config_aneg, + .config_intr = aqr_config_intr, + .ack_interrupt = aqr_ack_interrupt, + .read_status = aqr_read_status, +}, +{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405), .name = "Aquantia AQR405", .features = PHY_10GBIT_FULL_FEATURES, @@ -222,6 +233,7 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = { { PHY_ID_MATCH_MODEL(PHY_ID_AQR105) }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR106) }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR107) }, + { PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR405) }, { } }; diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 25ef483bcc24..2fe2ebaf62d1 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -885,14 +885,14 @@ static void decode_txts(struct dp83640_private *dp83640, struct phy_txts *phy_txts) { struct skb_shared_hwtstamps shhwtstamps; + struct dp83640_skb_info *skb_info; struct sk_buff *skb; - u64 ns; u8 overflow; + u64 ns; /* We must already have the skb that triggered this. */ - +again: skb = skb_dequeue(&dp83640->tx_queue); - if (!skb) { pr_debug("have timestamp but tx_queue empty\n"); return; @@ -907,6 +907,11 @@ static void decode_txts(struct dp83640_private *dp83640, } return; } + skb_info = (struct dp83640_skb_info *)skb->cb; + if (time_after(jiffies, skb_info->tmo)) { + kfree_skb(skb); + goto again; + } ns = phy2txts(phy_txts); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); @@ -1459,6 +1464,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, static void dp83640_txtstamp(struct phy_device *phydev, struct sk_buff *skb, int type) { + struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb; struct dp83640_private *dp83640 = phydev->priv; switch (dp83640->hwts_tx_en) { @@ -1471,6 +1477,7 @@ static void dp83640_txtstamp(struct phy_device *phydev, /* fall through */ case HWTSTAMP_TX_ON: skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; skb_queue_tail(&dp83640->tx_queue, skb); break; diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 8a8d9f606b3e..fc09c5c1a4de 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -127,17 +127,13 @@ static int dp83867_config_port_mirroring(struct phy_device *phydev) { struct dp83867_private *dp83867 = (struct dp83867_private *)phydev->priv; - u16 val; - - val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4); if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN) - val |= DP83867_CFG4_PORT_MIRROR_EN; + phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, + DP83867_CFG4_PORT_MIRROR_EN); else - val &= ~DP83867_CFG4_PORT_MIRROR_EN; - - phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val); - + phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, + DP83867_CFG4_PORT_MIRROR_EN); return 0; } @@ -222,11 +218,9 @@ static int dp83867_config_init(struct phy_device *phydev) } /* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */ - if (dp83867->rxctrl_strap_quirk) { - val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4); - val &= ~BIT(7); - phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val); - } + if (dp83867->rxctrl_strap_quirk) + phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, + BIT(7)); if (phy_interface_is_rgmii(phydev)) { val = phy_read(phydev, MII_DP83867_PHYCTRL); @@ -275,17 +269,11 @@ static int dp83867_config_init(struct phy_device *phydev) phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL, delay); - if (dp83867->io_impedance >= 0) { - val = phy_read_mmd(phydev, DP83867_DEVADDR, - DP83867_IO_MUX_CFG); - - val &= ~DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL; - val |= dp83867->io_impedance & - DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL; - - phy_write_mmd(phydev, DP83867_DEVADDR, - DP83867_IO_MUX_CFG, val); - } + if (dp83867->io_impedance >= 0) + phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG, + DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL, + dp83867->io_impedance & + DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL); } /* Enable Interrupt output INT_OE in CFG3 register */ @@ -299,12 +287,11 @@ static int dp83867_config_init(struct phy_device *phydev) dp83867_config_port_mirroring(phydev); /* Clock output selection if muxing property is set */ - if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK) { - val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG); - val &= ~DP83867_IO_MUX_CFG_CLK_O_SEL_MASK; - val |= (dp83867->clk_output_sel << DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT); - phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG, val); - } + if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK) + phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG, + DP83867_IO_MUX_CFG_CLK_O_SEL_MASK, + dp83867->clk_output_sel << + DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT); return 0; } diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index da13356999e5..e9704af1d239 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -144,11 +144,8 @@ static int dp83811_set_wol(struct phy_device *phydev, phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, value); } else { - value = phy_read_mmd(phydev, DP83811_DEVADDR, - MII_DP83811_WOL_CFG); - value &= ~DP83811_WOL_EN; - phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, - value); + phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, + DP83811_WOL_EN); } return 0; @@ -328,14 +325,10 @@ static int dp83811_suspend(struct phy_device *phydev) static int dp83811_resume(struct phy_device *phydev) { - int value; - genphy_resume(phydev); - value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG); - - phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, value | - DP83811_WOL_CLR_INDICATION); + phy_set_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, + DP83811_WOL_CLR_INDICATION); return 0; } diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index f136a23c1a35..b0d1368c3400 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -85,11 +85,11 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) s = read_seqcount_begin(&fp->seqcount); fp->status.link = !fp->no_carrier; /* Issue callback if user registered it. */ - if (fp->link_update) { + if (fp->link_update) fp->link_update(fp->phydev->attached_dev, &fp->status); - fixed_phy_update(fp); - } + /* Check the GPIO for change in status */ + fixed_phy_update(fp); state = fp->status; } while (read_seqcount_retry(&fp->seqcount, s)); @@ -229,12 +229,12 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np) } #endif -struct phy_device *fixed_phy_register(unsigned int irq, - struct fixed_phy_status *status, - struct device_node *np) +static struct phy_device *__fixed_phy_register(unsigned int irq, + struct fixed_phy_status *status, + struct device_node *np, + struct gpio_desc *gpiod) { struct fixed_mdio_bus *fmb = &platform_fmb; - struct gpio_desc *gpiod = NULL; struct phy_device *phy; int phy_addr; int ret; @@ -243,9 +243,11 @@ struct phy_device *fixed_phy_register(unsigned int irq, return ERR_PTR(-EPROBE_DEFER); /* Check if we have a GPIO associated with this fixed phy */ - gpiod = fixed_phy_get_gpiod(np); - if (IS_ERR(gpiod)) - return ERR_CAST(gpiod); + if (!gpiod) { + gpiod = fixed_phy_get_gpiod(np); + if (IS_ERR(gpiod)) + return ERR_CAST(gpiod); + } /* Get the next available PHY address, up to PHY_MAX_ADDR */ phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL); @@ -308,8 +310,24 @@ struct phy_device *fixed_phy_register(unsigned int irq, return phy; } + +struct phy_device *fixed_phy_register(unsigned int irq, + struct fixed_phy_status *status, + struct device_node *np) +{ + return __fixed_phy_register(irq, status, np, NULL); +} EXPORT_SYMBOL_GPL(fixed_phy_register); +struct phy_device * +fixed_phy_register_with_gpiod(unsigned int irq, + struct fixed_phy_status *status, + struct gpio_desc *gpiod) +{ + return __fixed_phy_register(irq, status, NULL, gpiod); +} +EXPORT_SYMBOL_GPL(fixed_phy_register_with_gpiod); + void fixed_phy_unregister(struct phy_device *phy) { phy_device_remove(phy); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 90f44ba8aca7..3ccba37bd6dd 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -842,7 +842,6 @@ static int m88e1510_config_init(struct phy_device *phydev) /* SGMII-to-Copper mode initialization */ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - /* Select page 18 */ err = marvell_set_page(phydev, 18); if (err < 0) @@ -865,21 +864,6 @@ static int m88e1510_config_init(struct phy_device *phydev) err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); if (err < 0) return err; - - /* There appears to be a bug in the 88e1512 when used in - * SGMII to copper mode, where the AN advertisement register - * clears the pause bits each time a negotiation occurs. - * This means we can never be truely sure what was advertised, - * so disable Pause support. - */ - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->advertising); } return m88e1318_config_init(phydev); diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 38cfc923a88a..586ae1bc5a50 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -58,24 +58,6 @@ struct mv3310_priv { char *hwmon_name; }; -static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg, - u16 mask, u16 bits) -{ - int old, val, ret; - - old = phy_read_mmd(phydev, devad, reg); - if (old < 0) - return old; - - val = (old & ~mask) | (bits & mask); - if (val == old) - return 0; - - ret = phy_write_mmd(phydev, devad, reg, val); - - return ret < 0 ? ret : 1; -} - #ifdef CONFIG_HWMON static umode_t mv3310_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, @@ -159,10 +141,9 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable) return ret; val = enable ? MV_V2_TEMP_CTRL_SAMPLE : MV_V2_TEMP_CTRL_DISABLE; - ret = mv3310_modify(phydev, MDIO_MMD_VEND2, MV_V2_TEMP_CTRL, - MV_V2_TEMP_CTRL_MASK, val); - return ret < 0 ? ret : 0; + return phy_modify_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP_CTRL, + MV_V2_TEMP_CTRL_MASK, val); } static void mv3310_hwmon_disable(void *data) @@ -252,8 +233,7 @@ static int mv3310_resume(struct phy_device *phydev) static int mv3310_config_init(struct phy_device *phydev) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; - int val; + int ret, val; /* Check that the PHY interface type is compatible */ if (phydev->interface != PHY_INTERFACE_MODE_SGMII && @@ -262,83 +242,19 @@ static int mv3310_config_init(struct phy_device *phydev) phydev->interface != PHY_INTERFACE_MODE_10GKR) return -ENODEV; - __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); - __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); - if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); if (val < 0) return val; if (val & MDIO_AN_STAT1_ABLE) - __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); - } - - val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT2); - if (val < 0) - return val; - - /* Ethtool does not support the WAN mode bits */ - if (val & (MDIO_PMA_STAT2_10GBSR | MDIO_PMA_STAT2_10GBLR | - MDIO_PMA_STAT2_10GBER | MDIO_PMA_STAT2_10GBLX4 | - MDIO_PMA_STAT2_10GBSW | MDIO_PMA_STAT2_10GBLW | - MDIO_PMA_STAT2_10GBEW)) - __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); - if (val & MDIO_PMA_STAT2_10GBSR) - __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, supported); - if (val & MDIO_PMA_STAT2_10GBLR) - __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, supported); - if (val & MDIO_PMA_STAT2_10GBER) - __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, supported); - - if (val & MDIO_PMA_STAT2_EXTABLE) { - val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE); - if (val < 0) - return val; - - if (val & (MDIO_PMA_EXTABLE_10GBT | MDIO_PMA_EXTABLE_1000BT | - MDIO_PMA_EXTABLE_100BTX | MDIO_PMA_EXTABLE_10BT)) - __set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); - if (val & MDIO_PMA_EXTABLE_10GBLRM) - __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); - if (val & (MDIO_PMA_EXTABLE_10GBKX4 | MDIO_PMA_EXTABLE_10GBKR | - MDIO_PMA_EXTABLE_1000BKX)) - __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, supported); - if (val & MDIO_PMA_EXTABLE_10GBLRM) - __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_10GBT) - __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_10GBKX4) - __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_10GBKR) - __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_1000BT) - __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_1000BKX) - __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_100BTX) { - __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - supported); - __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - supported); - } - if (val & MDIO_PMA_EXTABLE_10BT) { - __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, - supported); - __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, - supported); - } + __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->supported); } - linkmode_copy(phydev->supported, supported); - linkmode_and(phydev->advertising, phydev->advertising, - phydev->supported); + ret = genphy_c45_pma_read_abilities(phydev); + if (ret) + return ret; return 0; } @@ -352,52 +268,27 @@ static int mv3310_config_aneg(struct phy_device *phydev) /* We don't support manual MDI control */ phydev->mdix_ctrl = ETH_TP_MDI_AUTO; - if (phydev->autoneg == AUTONEG_DISABLE) { - ret = genphy_c45_pma_setup_forced(phydev); - if (ret < 0) - return ret; + if (phydev->autoneg == AUTONEG_DISABLE) + return genphy_c45_pma_setup_forced(phydev); - return genphy_c45_an_disable_aneg(phydev); - } - - linkmode_and(phydev->advertising, phydev->advertising, - phydev->supported); - - ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, - ADVERTISE_ALL | ADVERTISE_100BASE4 | - ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, - linkmode_adv_to_mii_adv_t(phydev->advertising)); + ret = genphy_c45_an_config_aneg(phydev); if (ret < 0) return ret; if (ret > 0) changed = true; + /* Clause 45 has no standardized support for 1000BaseT, therefore + * use vendor registers for this mode. + */ reg = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); - ret = mv3310_modify(phydev, MDIO_MMD_AN, MV_AN_CTRL1000, - ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg); - if (ret < 0) - return ret; - if (ret > 0) - changed = true; - - /* 10G control register */ - if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - phydev->advertising)) - reg = MDIO_AN_10GBT_CTRL_ADV10G; - else - reg = 0; - - ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, - MDIO_AN_10GBT_CTRL_ADV10G, reg); + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MV_AN_CTRL1000, + ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg); if (ret < 0) return ret; if (ret > 0) changed = true; - if (changed) - ret = genphy_c45_restart_aneg(phydev); - - return ret; + return genphy_c45_check_and_restart_aneg(phydev, changed); } static int mv3310_aneg_done(struct phy_device *phydev) @@ -446,16 +337,8 @@ static int mv3310_read_10gbr_status(struct phy_device *phydev) static int mv3310_read_status(struct phy_device *phydev) { - u32 mmd_mask = phydev->c45_ids.devices_in_package; int val; - /* The vendor devads do not report link status. Avoid the PHYXS - * instance as there are three, and its status depends on the MAC - * being appropriately configured for the negotiated speed. - */ - mmd_mask &= ~(BIT(MDIO_MMD_VEND1) | BIT(MDIO_MMD_VEND2) | - BIT(MDIO_MMD_PHYXS)); - phydev->speed = SPEED_UNKNOWN; phydev->duplex = DUPLEX_UNKNOWN; linkmode_zero(phydev->lp_advertising); @@ -471,12 +354,10 @@ static int mv3310_read_status(struct phy_device *phydev) if (val & MDIO_STAT1_LSTATUS) return mv3310_read_10gbr_status(phydev); - val = genphy_c45_read_link(phydev, mmd_mask); + val = genphy_c45_read_link(phydev); if (val < 0) return val; - phydev->link = val > 0 ? 1 : 0; - val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); if (val < 0) return val; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 3d313585c9c1..debc74c90307 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -48,11 +48,12 @@ static int mdiobus_register_gpiod(struct mdio_device *mdiodev) gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode, "reset-gpios", 0, GPIOD_OUT_LOW, "PHY reset"); - if (PTR_ERR(gpiod) == -ENOENT || - PTR_ERR(gpiod) == -ENOSYS) - gpiod = NULL; - else if (IS_ERR(gpiod)) - return PTR_ERR(gpiod); + if (IS_ERR(gpiod)) { + if (PTR_ERR(gpiod) == -ENOENT || PTR_ERR(gpiod) == -ENOSYS) + gpiod = NULL; + else + return PTR_ERR(gpiod); + } mdiodev->reset = gpiod; diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 03af927fa5ad..fc3173cc078b 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -47,6 +47,16 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) /* Assume 1000base-T */ ctrl2 |= MDIO_PMA_CTRL2_1000BT; break; + case SPEED_2500: + ctrl1 |= MDIO_CTRL1_SPEED2_5G; + /* Assume 2.5Gbase-T */ + ctrl2 |= MDIO_PMA_CTRL2_2_5GBT; + break; + case SPEED_5000: + ctrl1 |= MDIO_CTRL1_SPEED5G; + /* Assume 5Gbase-T */ + ctrl2 |= MDIO_PMA_CTRL2_5GBT; + break; case SPEED_10000: ctrl1 |= MDIO_CTRL1_SPEED10G; /* Assume 10Gbase-T */ @@ -60,11 +70,61 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) if (ret < 0) return ret; - return phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2); + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2); + if (ret < 0) + return ret; + + return genphy_c45_an_disable_aneg(phydev); } EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced); /** + * genphy_c45_an_config_aneg - configure advertisement registers + * @phydev: target phy_device struct + * + * Configure advertisement registers based on modes set in phydev->advertising + * + * Returns negative errno code on failure, 0 if advertisement didn't change, + * or 1 if advertised modes changed. + */ +int genphy_c45_an_config_aneg(struct phy_device *phydev) +{ + int changed, ret; + u32 adv; + + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); + + changed = genphy_config_eee_advert(phydev); + + adv = linkmode_adv_to_mii_adv_t(phydev->advertising); + + ret = phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, + ADVERTISE_ALL | ADVERTISE_100BASE4 | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, + adv); + if (ret < 0) + return ret; + if (ret > 0) + changed = 1; + + adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising); + + ret = phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, + MDIO_AN_10GBT_CTRL_ADV10G | + MDIO_AN_10GBT_CTRL_ADV5G | + MDIO_AN_10GBT_CTRL_ADV2_5G, + adv); + if (ret < 0) + return ret; + if (ret > 0) + changed = 1; + + return changed; +} +EXPORT_SYMBOL_GPL(genphy_c45_an_config_aneg); + +/** * genphy_c45_an_disable_aneg - disable auto-negotiation * @phydev: target phy_device struct * @@ -75,15 +135,9 @@ EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced); */ int genphy_c45_an_disable_aneg(struct phy_device *phydev) { - int val; - - val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); - if (val < 0) - return val; - - val &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); - return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val); + return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, + MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); } EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg); @@ -97,17 +151,40 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg); */ int genphy_c45_restart_aneg(struct phy_device *phydev) { - int val; + return phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, + MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); +} +EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); - val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); - if (val < 0) - return val; +/** + * genphy_c45_check_and_restart_aneg - Enable and restart auto-negotiation + * @phydev: target phy_device struct + * @restart: whether aneg restart is requested + * + * This assumes that the auto-negotiation MMD is present. + * + * Check, and restart auto-negotiation if needed. + */ +int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart) +{ + int ret = 0; - val |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART; + if (!restart) { + /* Configure and restart aneg if it wasn't set before */ + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); + if (ret < 0) + return ret; - return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val); + if (!(ret & MDIO_AN_CTRL1_ENABLE)) + restart = true; + } + + if (restart) + ret = genphy_c45_restart_aneg(phydev); + + return ret; } -EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); +EXPORT_SYMBOL_GPL(genphy_c45_check_and_restart_aneg); /** * genphy_c45_aneg_done - return auto-negotiation complete status @@ -131,25 +208,40 @@ EXPORT_SYMBOL_GPL(genphy_c45_aneg_done); /** * genphy_c45_read_link - read the overall link status from the MMDs * @phydev: target phy_device struct - * @mmd_mask: MMDs to read status from * * Read the link status from the specified MMDs, and if they all indicate - * that the link is up, return positive. If an error is encountered, + * that the link is up, set phydev->link to 1. If an error is encountered, * a negative errno will be returned, otherwise zero. */ -int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask) +int genphy_c45_read_link(struct phy_device *phydev) { + u32 mmd_mask = phydev->c45_ids.devices_in_package; int val, devad; bool link = true; - while (mmd_mask) { + /* The vendor devads and C22EXT do not report link status. Avoid the + * PHYXS instance as its status may depend on the MAC being + * appropriately configured for the negotiated speed. + */ + mmd_mask &= ~(MDIO_DEVS_VEND1 | MDIO_DEVS_VEND2 | MDIO_DEVS_C22EXT | + MDIO_DEVS_PHYXS); + + while (mmd_mask && link) { devad = __ffs(mmd_mask); mmd_mask &= ~BIT(devad); /* The link state is latched low so that momentary link - * drops can be detected. Do not double-read the status - * register if the link is down. + * drops can be detected. Do not double-read the status + * in polling mode to detect such short link drops. */ + if (!phy_polling_mode(phydev)) { + val = phy_read_mmd(phydev, devad, MDIO_STAT1); + if (val < 0) + return val; + else if (val & MDIO_STAT1_LSTATUS) + continue; + } + val = phy_read_mmd(phydev, devad, MDIO_STAT1); if (val < 0) return val; @@ -158,7 +250,9 @@ int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask) link = false; } - return link; + phydev->link = link; + + return 0; } EXPORT_SYMBOL_GPL(genphy_c45_read_link); @@ -190,9 +284,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev) if (val < 0) return val; - if (val & MDIO_AN_10GBT_STAT_LP10G) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - phydev->lp_advertising); + mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, val); return 0; } @@ -220,6 +312,12 @@ int genphy_c45_read_pma(struct phy_device *phydev) case MDIO_PMA_CTRL1_SPEED1000: phydev->speed = SPEED_1000; break; + case MDIO_CTRL1_SPEED2_5G: + phydev->speed = SPEED_2500; + break; + case MDIO_CTRL1_SPEED5G: + phydev->speed = SPEED_5000; + break; case MDIO_CTRL1_SPEED10G: phydev->speed = SPEED_10000; break; @@ -267,6 +365,95 @@ int genphy_c45_read_mdix(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_read_mdix); +/** + * genphy_c45_pma_read_abilities - read supported link modes from PMA + * @phydev: target phy_device struct + * + * Read the supported link modes from the PMA Status 2 (1.8) register. If bit + * 1.8.9 is set, the list of supported modes is build using the values in the + * PMA Extended Abilities (1.11) register, indicating 1000BASET an 10G related + * modes. If bit 1.11.14 is set, then the list is also extended with the modes + * in the 2.5G/5G PMA Extended register (1.21), indicating if 2.5GBASET and + * 5GBASET are supported. + */ +int genphy_c45_pma_read_abilities(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT2); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + phydev->supported, + val & MDIO_PMA_STAT2_10GBSR); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + phydev->supported, + val & MDIO_PMA_STAT2_10GBLR); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, + phydev->supported, + val & MDIO_PMA_STAT2_10GBER); + + if (val & MDIO_PMA_STAT2_EXTABLE) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10GBLRM); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10GBT); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10GBKX4); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10GBKR); + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_1000BT); + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_1000BKX); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_100BTX); + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_100BTX); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10BT); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10BT); + + if (val & MDIO_PMA_EXTABLE_NBT) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, + MDIO_PMA_NG_EXTABLE); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_NG_EXTABLE_2_5GBT); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_NG_EXTABLE_5GBT); + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(genphy_c45_pma_read_abilities); + /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev) @@ -277,21 +464,11 @@ EXPORT_SYMBOL_GPL(gen10g_config_aneg); int gen10g_read_status(struct phy_device *phydev) { - u32 mmd_mask = phydev->c45_ids.devices_in_package; - int ret; - /* For now just lie and say it's 10G all the time */ phydev->speed = SPEED_10000; phydev->duplex = DUPLEX_FULL; - /* Avoid reading the vendor MMDs */ - mmd_mask &= ~(BIT(MDIO_MMD_VEND1) | BIT(MDIO_MMD_VEND2)); - - ret = genphy_c45_read_link(phydev, mmd_mask); - - phydev->link = ret > 0 ? 1 : 0; - - return 0; + return genphy_c45_read_link(phydev); } EXPORT_SYMBOL_GPL(gen10g_read_status); diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 909b3344babf..5016cd5fd7c7 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -4,6 +4,7 @@ */ #include <linux/export.h> #include <linux/phy.h> +#include <linux/of.h> const char *phy_speed_to_str(int speed) { @@ -338,6 +339,77 @@ size_t phy_speeds(unsigned int *speeds, size_t size, return count; } +static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) +{ + const struct phy_setting *p; + int i; + + for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { + if (p->speed > max_speed) + linkmode_clear_bit(p->bit, phydev->supported); + else + break; + } + + return 0; +} + +int phy_set_max_speed(struct phy_device *phydev, u32 max_speed) +{ + int err; + + err = __set_phy_supported(phydev, max_speed); + if (err) + return err; + + linkmode_copy(phydev->advertising, phydev->supported); + + return 0; +} +EXPORT_SYMBOL(phy_set_max_speed); + +void of_set_phy_supported(struct phy_device *phydev) +{ + struct device_node *node = phydev->mdio.dev.of_node; + u32 max_speed; + + if (!IS_ENABLED(CONFIG_OF_MDIO)) + return; + + if (!node) + return; + + if (!of_property_read_u32(node, "max-speed", &max_speed)) + __set_phy_supported(phydev, max_speed); +} + +void of_set_phy_eee_broken(struct phy_device *phydev) +{ + struct device_node *node = phydev->mdio.dev.of_node; + u32 broken = 0; + + if (!IS_ENABLED(CONFIG_OF_MDIO)) + return; + + if (!node) + return; + + if (of_property_read_bool(node, "eee-broken-100tx")) + broken |= MDIO_EEE_100TX; + if (of_property_read_bool(node, "eee-broken-1000t")) + broken |= MDIO_EEE_1000T; + if (of_property_read_bool(node, "eee-broken-10gt")) + broken |= MDIO_EEE_10GT; + if (of_property_read_bool(node, "eee-broken-1000kx")) + broken |= MDIO_EEE_1000KX; + if (of_property_read_bool(node, "eee-broken-10gkx4")) + broken |= MDIO_EEE_10GKX4; + if (of_property_read_bool(node, "eee-broken-10gkr")) + broken |= MDIO_EEE_10GKR; + + phydev->eee_broken_modes = broken; +} + /** * phy_resolve_aneg_linkmode - resolve the advertisements into phy settings * @phydev: The phy_device struct @@ -349,45 +421,16 @@ size_t phy_speeds(unsigned int *speeds, size_t size, void phy_resolve_aneg_linkmode(struct phy_device *phydev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(common); + int i; linkmode_and(common, phydev->lp_advertising, phydev->advertising); - if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, common)) { - phydev->speed = SPEED_10000; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, - common)) { - phydev->speed = SPEED_5000; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, - common)) { - phydev->speed = SPEED_2500; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - common)) { - phydev->speed = SPEED_1000; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - common)) { - phydev->speed = SPEED_1000; - phydev->duplex = DUPLEX_HALF; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - common)) { - phydev->speed = SPEED_100; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - common)) { - phydev->speed = SPEED_100; - phydev->duplex = DUPLEX_HALF; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, - common)) { - phydev->speed = SPEED_10; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, - common)) { - phydev->speed = SPEED_10; - phydev->duplex = DUPLEX_HALF; - } + for (i = 0; i < ARRAY_SIZE(settings); i++) + if (test_bit(settings[i].bit, common)) { + phydev->speed = settings[i].speed; + phydev->duplex = settings[i].duplex; + break; + } if (phydev->duplex == DUPLEX_FULL) { phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, @@ -414,15 +457,15 @@ static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, } /** - * phy_read_mmd - Convenience function for reading a register + * __phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. * @phydev: The phy_device struct * @devad: The MMD to read from (0..31) * @regnum: The register on the MMD to read (0..65535) * - * Same rules as for phy_read(); + * Same rules as for __phy_read(); */ -int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) +int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) { int val; @@ -434,33 +477,52 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) } else if (phydev->is_c45) { u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff); - val = mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr); + val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr); } else { struct mii_bus *bus = phydev->mdio.bus; int phy_addr = phydev->mdio.addr; - mutex_lock(&bus->mdio_lock); mmd_phy_indirect(bus, phy_addr, devad, regnum); /* Read the content of the MMD's selected register */ val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA); - mutex_unlock(&bus->mdio_lock); } return val; } +EXPORT_SYMBOL(__phy_read_mmd); + +/** + * phy_read_mmd - Convenience function for reading a register + * from an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * + * Same rules as for phy_read(); + */ +int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_read_mmd(phydev, devad, regnum); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} EXPORT_SYMBOL(phy_read_mmd); /** - * phy_write_mmd - Convenience function for writing a register + * __phy_write_mmd - Convenience function for writing a register * on an MMD on a given PHY. * @phydev: The phy_device struct * @devad: The MMD to read from * @regnum: The register on the MMD to read * @val: value to write to @regnum * - * Same rules as for phy_write(); + * Same rules as for __phy_write(); */ -int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) +int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) { int ret; @@ -472,27 +534,47 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) } else if (phydev->is_c45) { u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff); - ret = mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, - addr, val); + ret = __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, + addr, val); } else { struct mii_bus *bus = phydev->mdio.bus; int phy_addr = phydev->mdio.addr; - mutex_lock(&bus->mdio_lock); mmd_phy_indirect(bus, phy_addr, devad, regnum); /* Write the data into MMD's selected register */ __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val); - mutex_unlock(&bus->mdio_lock); ret = 0; } return ret; } +EXPORT_SYMBOL(__phy_write_mmd); + +/** + * phy_write_mmd - Convenience function for writing a register + * on an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * @val: value to write to @regnum + * + * Same rules as for phy_write(); + */ +int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_write_mmd(phydev, devad, regnum, val); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} EXPORT_SYMBOL(phy_write_mmd); /** - * __phy_modify() - Convenience function for modifying a PHY register + * __phy_modify_changed() - Convenience function for modifying a PHY register * @phydev: a pointer to a &struct phy_device * @regnum: register number * @mask: bit mask of bits to clear @@ -500,16 +582,69 @@ EXPORT_SYMBOL(phy_write_mmd); * * Unlocked helper function which allows a PHY register to be modified as * new register value = (old register value & ~mask) | set + * + * Returns negative errno, 0 if there was no change, and 1 in case of change */ -int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) +int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, + u16 set) { - int ret; + int new, ret; ret = __phy_read(phydev, regnum); if (ret < 0) return ret; - ret = __phy_write(phydev, regnum, (ret & ~mask) | set); + new = (ret & ~mask) | set; + if (new == ret) + return 0; + + ret = __phy_write(phydev, regnum, new); + + return ret < 0 ? ret : 1; +} +EXPORT_SYMBOL_GPL(__phy_modify_changed); + +/** + * phy_modify_changed - Function for modifying a PHY register + * @phydev: the phy_device struct + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + * + * Returns negative errno, 0 if there was no change, and 1 in case of change + */ +int phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_modify_changed(phydev, regnum, mask, set); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_modify_changed); + +/** + * __phy_modify - Convenience function for modifying a PHY register + * @phydev: the phy_device struct + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) +{ + int ret; + + ret = __phy_modify_changed(phydev, regnum, mask, set); return ret < 0 ? ret : 0; } @@ -538,6 +673,113 @@ int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) } EXPORT_SYMBOL_GPL(phy_modify); +/** + * __phy_modify_mmd_changed - Function for modifying a register on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * Unlocked helper function which allows a MMD register to be modified as + * new register value = (old register value & ~mask) | set + * + * Returns negative errno, 0 if there was no change, and 1 in case of change + */ +int __phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum, + u16 mask, u16 set) +{ + int new, ret; + + ret = __phy_read_mmd(phydev, devad, regnum); + if (ret < 0) + return ret; + + new = (ret & ~mask) | set; + if (new == ret) + return 0; + + ret = __phy_write_mmd(phydev, devad, regnum, new); + + return ret < 0 ? ret : 1; +} +EXPORT_SYMBOL_GPL(__phy_modify_mmd_changed); + +/** + * phy_modify_mmd_changed - Function for modifying a register on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + * + * Returns negative errno, 0 if there was no change, and 1 in case of change + */ +int phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum, + u16 mask, u16 set) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_modify_mmd_changed(phydev, devad, regnum, mask, set); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_modify_mmd_changed); + +/** + * __phy_modify_mmd - Convenience function for modifying a register on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, + u16 mask, u16 set) +{ + int ret; + + ret = __phy_modify_mmd_changed(phydev, devad, regnum, mask, set); + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(__phy_modify_mmd); + +/** + * phy_modify_mmd - Convenience function for modifying a register on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, + u16 mask, u16 set) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_modify_mmd(phydev, devad, regnum, mask, set); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_modify_mmd); + static int __phy_read_page(struct phy_device *phydev) { return phydev->drv->read_page(phydev); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d12aa512b7f5..69dc64a4dbf8 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -543,7 +543,7 @@ int phy_start_aneg(struct phy_device *phydev) if (err < 0) goto out_unlock; - if (__phy_is_started(phydev)) { + if (phy_is_started(phydev)) { if (phydev->autoneg == AUTONEG_ENABLE) { err = phy_check_link_status(phydev); } else { @@ -699,7 +699,7 @@ void phy_stop_machine(struct phy_device *phydev) cancel_delayed_work_sync(&phydev->state_queue); mutex_lock(&phydev->lock); - if (__phy_is_started(phydev)) + if (phy_is_started(phydev)) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); } @@ -752,9 +752,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) { struct phy_device *phydev = phy_dat; - if (!phy_is_started(phydev)) - return IRQ_NONE; /* It can't be ours. */ - if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) return IRQ_NONE; @@ -813,15 +810,14 @@ EXPORT_SYMBOL(phy_request_interrupt); */ void phy_stop(struct phy_device *phydev) { - mutex_lock(&phydev->lock); - - if (!__phy_is_started(phydev)) { + if (!phy_is_started(phydev)) { WARN(1, "called from state %s\n", phy_state_to_str(phydev->state)); - mutex_unlock(&phydev->lock); return; } + mutex_lock(&phydev->lock); + if (phy_interrupt_is_valid(phydev)) phy_disable_interrupts(phydev); @@ -961,8 +957,10 @@ void phy_state_machine(struct work_struct *work) * state machine would be pointless and possibly error prone when * called from phy_disconnect() synchronously. */ + mutex_lock(&phydev->lock); if (phy_polling_mode(phydev) && phy_is_started(phydev)) phy_queue_state_machine(phydev, PHY_STATE_TIME); + mutex_unlock(&phydev->lock); } /** @@ -1060,17 +1058,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if (!phy_check_valid(phydev->speed, phydev->duplex, common)) goto eee_exit_err; - if (clk_stop_enable) { + if (clk_stop_enable) /* Configure the PHY to stop receiving xMII * clock while it is signaling LPI. */ - int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); - if (val < 0) - return val; - - val |= MDIO_PCS_CTRL1_CLKSTOP_EN; - phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val); - } + phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, + MDIO_PCS_CTRL1_CLKSTOP_EN); return 0; /* EEE supported */ } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 891e0178b97f..49fdd1ee798e 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -30,7 +30,6 @@ #include <linux/mdio.h> #include <linux/io.h> #include <linux/uaccess.h> -#include <linux/of.h> MODULE_DESCRIPTION("PHY library"); MODULE_AUTHOR("Andy Fleming"); @@ -676,13 +675,16 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; - *devices_in_package = (phy_reg & 0xffff) << 16; + *devices_in_package = phy_reg << 16; reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS1; phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; - *devices_in_package |= (phy_reg & 0xffff); + *devices_in_package |= phy_reg; + + /* Bit 0 doesn't represent a device, it indicates c22 regs presence */ + *devices_in_package &= ~BIT(0); return 0; } @@ -743,13 +745,13 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; - c45_ids->device_ids[i] = (phy_reg & 0xffff) << 16; + c45_ids->device_ids[i] = phy_reg << 16; reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID2; phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; - c45_ids->device_ids[i] |= (phy_reg & 0xffff); + c45_ids->device_ids[i] |= phy_reg; } *phy_id = 0; return 0; @@ -786,14 +788,14 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO; } - *phy_id = (phy_reg & 0xffff) << 16; + *phy_id = phy_reg << 16; /* Grab the bits from PHYIR2, and put them in the lower half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID2); if (phy_reg < 0) return -EIO; - *phy_id |= (phy_reg & 0xffff); + *phy_id |= phy_reg; return 0; } @@ -1513,7 +1515,7 @@ EXPORT_SYMBOL(phy_reset_after_clk_enable); static int genphy_config_advert(struct phy_device *phydev) { u32 advertise; - int oldadv, adv, bmsr; + int bmsr, adv; int err, changed = 0; /* Only allow advertising what this PHY supports */ @@ -1526,22 +1528,14 @@ static int genphy_config_advert(struct phy_device *phydev) phydev->advertising); /* Setup standard advertisement */ - adv = phy_read(phydev, MII_ADVERTISE); - if (adv < 0) - return adv; - - oldadv = adv; - adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | - ADVERTISE_PAUSE_ASYM); - adv |= ethtool_adv_to_mii_adv_t(advertise); - - if (adv != oldadv) { - err = phy_write(phydev, MII_ADVERTISE, adv); - - if (err < 0) - return err; + err = phy_modify_changed(phydev, MII_ADVERTISE, + ADVERTISE_ALL | ADVERTISE_100BASE4 | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, + ethtool_adv_to_mii_adv_t(advertise)); + if (err < 0) + return err; + if (err > 0) changed = 1; - } bmsr = phy_read(phydev, MII_BMSR); if (bmsr < 0) @@ -1555,25 +1549,20 @@ static int genphy_config_advert(struct phy_device *phydev) return changed; /* Configure gigabit if it's supported */ - adv = phy_read(phydev, MII_CTRL1000); - if (adv < 0) - return adv; - - oldadv = adv; - adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - + adv = 0; if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, phydev->supported) || linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->supported)) - adv |= ethtool_adv_to_mii_ctrl1000_t(advertise); - - if (adv != oldadv) - changed = 1; + adv = ethtool_adv_to_mii_ctrl1000_t(advertise); - err = phy_write(phydev, MII_CTRL1000, adv); + err = phy_modify_changed(phydev, MII_CTRL1000, + ADVERTISE_1000FULL | ADVERTISE_1000HALF, + adv); if (err < 0) return err; + if (err > 0) + changed = 1; return changed; } @@ -1586,34 +1575,20 @@ static int genphy_config_advert(struct phy_device *phydev) * efficent ethernet modes. Returns 0 if the PHY's advertisement hasn't * changed, and 1 if it has changed. */ -static int genphy_config_eee_advert(struct phy_device *phydev) +int genphy_config_eee_advert(struct phy_device *phydev) { - int broken = phydev->eee_broken_modes; - int old_adv, adv; + int err; /* Nothing to disable */ - if (!broken) + if (!phydev->eee_broken_modes) return 0; - /* If the following call fails, we assume that EEE is not - * supported by the phy. If we read 0, EEE is not advertised - * In both case, we don't need to continue - */ - adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); - if (adv <= 0) - return 0; - - old_adv = adv; - adv &= ~broken; - - /* Advertising remains unchanged with the broken mask */ - if (old_adv == adv) - return 0; - - phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); - - return 1; + err = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, + phydev->eee_broken_modes, 0); + /* If the call failed, we assume that EEE is not supported */ + return err < 0 ? 0 : err; } +EXPORT_SYMBOL(genphy_config_eee_advert); /** * genphy_setup_forced - configures/forces speed/duplex from @phydev @@ -1729,10 +1704,19 @@ int genphy_update_link(struct phy_device *phydev) { int status; - /* Do a fake read */ - status = phy_read(phydev, MII_BMSR); - if (status < 0) - return status; + /* The link state is latched low so that momentary link + * drops can be detected. Do not double-read the status + * in polling mode to detect such short link drops. + */ + if (!phy_polling_mode(phydev)) { + status = phy_read(phydev, MII_BMSR); + if (status < 0) { + return status; + } else if (status & BMSR_LSTATUS) { + phydev->link = 1; + return 0; + } + } /* Read link and autonegotiation status */ status = phy_read(phydev, MII_BMSR); @@ -1763,8 +1747,6 @@ int genphy_read_status(struct phy_device *phydev) int err; int lpa; int lpagb = 0; - int common_adv; - int common_adv_gb = 0; /* Update the link, but return if there was an error */ err = genphy_update_link(phydev); @@ -1796,7 +1778,6 @@ int genphy_read_status(struct phy_device *phydev) mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, lpagb); - common_adv_gb = lpagb & adv << 2; } lpa = phy_read(phydev, MII_LPA); @@ -1805,35 +1786,12 @@ int genphy_read_status(struct phy_device *phydev) mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa); - adv = phy_read(phydev, MII_ADVERTISE); - if (adv < 0) - return adv; - - common_adv = lpa & adv; - - phydev->speed = SPEED_10; - phydev->duplex = DUPLEX_HALF; + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; phydev->pause = 0; phydev->asym_pause = 0; - if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) { - phydev->speed = SPEED_1000; - - if (common_adv_gb & LPA_1000FULL) - phydev->duplex = DUPLEX_FULL; - } else if (common_adv & (LPA_100FULL | LPA_100HALF)) { - phydev->speed = SPEED_100; - - if (common_adv & LPA_100FULL) - phydev->duplex = DUPLEX_FULL; - } else - if (common_adv & LPA_10FULL) - phydev->duplex = DUPLEX_FULL; - - if (phydev->duplex == DUPLEX_FULL) { - phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; - phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; - } + phy_resolve_aneg_linkmode(phydev); } else { int bmcr = phy_read(phydev, MII_BMCR); @@ -1965,44 +1923,6 @@ int genphy_loopback(struct phy_device *phydev, bool enable) } EXPORT_SYMBOL(genphy_loopback); -static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) -{ - switch (max_speed) { - case SPEED_10: - linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - phydev->supported); - /* fall through */ - case SPEED_100: - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - phydev->supported); - break; - case SPEED_1000: - break; - default: - return -ENOTSUPP; - } - - return 0; -} - -int phy_set_max_speed(struct phy_device *phydev, u32 max_speed) -{ - int err; - - err = __set_phy_supported(phydev, max_speed); - if (err) - return err; - - linkmode_copy(phydev->advertising, phydev->supported); - - return 0; -} -EXPORT_SYMBOL(phy_set_max_speed); - /** * phy_remove_link_mode - Remove a supported link mode * @phydev: phy_device structure to remove link mode from @@ -2133,48 +2053,6 @@ bool phy_validate_pause(struct phy_device *phydev, } EXPORT_SYMBOL(phy_validate_pause); -static void of_set_phy_supported(struct phy_device *phydev) -{ - struct device_node *node = phydev->mdio.dev.of_node; - u32 max_speed; - - if (!IS_ENABLED(CONFIG_OF_MDIO)) - return; - - if (!node) - return; - - if (!of_property_read_u32(node, "max-speed", &max_speed)) - __set_phy_supported(phydev, max_speed); -} - -static void of_set_phy_eee_broken(struct phy_device *phydev) -{ - struct device_node *node = phydev->mdio.dev.of_node; - u32 broken = 0; - - if (!IS_ENABLED(CONFIG_OF_MDIO)) - return; - - if (!node) - return; - - if (of_property_read_bool(node, "eee-broken-100tx")) - broken |= MDIO_EEE_100TX; - if (of_property_read_bool(node, "eee-broken-1000t")) - broken |= MDIO_EEE_1000T; - if (of_property_read_bool(node, "eee-broken-10gt")) - broken |= MDIO_EEE_10GT; - if (of_property_read_bool(node, "eee-broken-1000kx")) - broken |= MDIO_EEE_1000KX; - if (of_property_read_bool(node, "eee-broken-10gkx4")) - broken |= MDIO_EEE_10GKX4; - if (of_property_read_bool(node, "eee-broken-10gkr")) - broken |= MDIO_EEE_10GKR; - - phydev->eee_broken_modes = broken; -} - static bool phy_drv_supports_irq(struct phy_driver *phydrv) { return phydrv->config_intr && phydrv->ack_interrupt; @@ -2208,11 +2086,30 @@ static int phy_probe(struct device *dev) mutex_lock(&phydev->lock); + if (phydev->drv->probe) { + /* Deassert the reset signal */ + phy_device_reset(phydev, 0); + + err = phydev->drv->probe(phydev); + if (err) { + /* Assert the reset signal */ + phy_device_reset(phydev, 1); + goto out; + } + } + /* Start out supporting everything. Eventually, * a controller will attach, and may modify one * or both of these values */ - linkmode_copy(phydev->supported, phydrv->features); + if (phydrv->features) { + linkmode_copy(phydev->supported, phydrv->features); + } else { + err = phydrv->get_features(phydev); + if (err) + goto out; + } + of_set_phy_supported(phydev); linkmode_copy(phydev->advertising, phydev->supported); @@ -2232,20 +2129,8 @@ static int phy_probe(struct device *dev) * (e.g. hardware erratum) where the driver wants to set only one * of these bits. */ - if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) || - test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) { - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->supported); - if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features)) - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->supported); - if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydrv->features)) - linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->supported); - } else { + if (!test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported) && + !test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported)) { linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, @@ -2255,17 +2140,7 @@ static int phy_probe(struct device *dev) /* Set the state to READY by default */ phydev->state = PHY_READY; - if (phydev->drv->probe) { - /* Deassert the reset signal */ - phy_device_reset(phydev, 0); - - err = phydev->drv->probe(phydev); - if (err) { - /* Assert the reset signal */ - phy_device_reset(phydev, 1); - } - } - +out: mutex_unlock(&phydev->lock); return err; @@ -2301,7 +2176,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) { int retval; - if (WARN_ON(!new_driver->features)) { + /* Either the features are hard coded, or dynamically + * determine. It cannot be both or neither + */ + if (WARN_ON((!new_driver->features && !new_driver->get_features) || + (new_driver->features && new_driver->get_features))) { pr_err("%s: Driver features are missing\n", new_driver->name); return -EINVAL; } diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 2e21ce42e388..59d175a5ba54 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -302,6 +302,13 @@ static void phylink_mac_config(struct phylink *pl, pl->ops->mac_config(pl->netdev, pl->link_an_mode, state); } +static void phylink_mac_config_up(struct phylink *pl, + const struct phylink_link_state *state) +{ + if (state->link) + phylink_mac_config(pl, state); +} + static void phylink_mac_an_restart(struct phylink *pl) { if (pl->link_config.an_enabled && @@ -401,12 +408,12 @@ static void phylink_resolve(struct work_struct *w) case MLO_AN_PHY: link_state = pl->phy_state; phylink_resolve_flow(pl, &link_state); - phylink_mac_config(pl, &link_state); + phylink_mac_config_up(pl, &link_state); break; case MLO_AN_FIXED: phylink_get_fixed_state(pl, &link_state); - phylink_mac_config(pl, &link_state); + phylink_mac_config_up(pl, &link_state); break; case MLO_AN_INBAND: @@ -471,6 +478,17 @@ static void phylink_run_resolve(struct phylink *pl) queue_work(system_power_efficient_wq, &pl->resolve); } +static void phylink_run_resolve_and_disable(struct phylink *pl, int bit) +{ + unsigned long state = pl->phylink_disable_state; + + set_bit(bit, &pl->phylink_disable_state); + if (state == 0) { + queue_work(system_power_efficient_wq, &pl->resolve); + flush_work(&pl->resolve); + } +} + static void phylink_fixed_poll(struct timer_list *t) { struct phylink *pl = container_of(t, struct phylink, link_poll); @@ -920,9 +938,7 @@ void phylink_stop(struct phylink *pl) if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) del_timer_sync(&pl->link_poll); - set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); - queue_work(system_power_efficient_wq, &pl->resolve); - flush_work(&pl->resolve); + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED); } EXPORT_SYMBOL_GPL(phylink_stop); @@ -1265,6 +1281,24 @@ int phylink_get_eee_err(struct phylink *pl) EXPORT_SYMBOL_GPL(phylink_get_eee_err); /** + * phylink_init_eee() - init and check the EEE features + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @clk_stop_enable: allow PHY to stop receive clock + * + * Must be called either with RTNL held or within mac_link_up() + */ +int phylink_init_eee(struct phylink *pl, bool clk_stop_enable) +{ + int ret = -EOPNOTSUPP; + + if (pl->phydev) + ret = phy_init_eee(pl->phydev, clk_stop_enable); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_init_eee); + +/** * phylink_ethtool_get_eee() - read the energy efficient ethernet parameters * @pl: a pointer to a &struct phylink returned from phylink_create() * @eee: a pointer to a &struct ethtool_eee for the read parameters @@ -1628,9 +1662,7 @@ static void phylink_sfp_link_down(void *upstream) ASSERT_RTNL(); - set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); - queue_work(system_power_efficient_wq, &pl->resolve); - flush_work(&pl->resolve); + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK); } static void phylink_sfp_link_up(void *upstream) diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index ad9db652874d..fef701bfad62 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus) return ret; } } + bus->socket_ops->attach(bus->sfp); if (bus->started) bus->socket_ops->start(bus->sfp); bus->netdev->sfp_bus = bus; @@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) if (bus->registered) { if (bus->started) bus->socket_ops->stop(bus->sfp); + bus->socket_ops->detach(bus->sfp); if (bus->phydev && ops && ops->disconnect_phy) ops->disconnect_phy(bus->upstream); } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 298ab7546929..d4635c2178d1 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -185,6 +185,7 @@ struct sfp { struct gpio_desc *gpio[GPIO_MAX]; + bool attached; unsigned int state; struct delayed_work poll; struct delayed_work timeout; @@ -1476,7 +1477,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) */ switch (sfp->sm_mod_state) { default: - if (event == SFP_E_INSERT) { + if (event == SFP_E_INSERT && sfp->attached) { sfp_module_tx_disable(sfp); sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); } @@ -1608,6 +1609,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) mutex_unlock(&sfp->sm_mutex); } +static void sfp_attach(struct sfp *sfp) +{ + sfp->attached = true; + if (sfp->state & SFP_F_PRESENT) + sfp_sm_event(sfp, SFP_E_INSERT); +} + +static void sfp_detach(struct sfp *sfp) +{ + sfp->attached = false; + sfp_sm_event(sfp, SFP_E_REMOVE); +} + static void sfp_start(struct sfp *sfp) { sfp_sm_event(sfp, SFP_E_DEV_UP); @@ -1668,6 +1682,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, } static const struct sfp_socket_ops sfp_module_ops = { + .attach = sfp_attach, + .detach = sfp_detach, .start = sfp_start, .stop = sfp_stop, .module_info = sfp_module_info, @@ -1835,10 +1851,6 @@ static int sfp_probe(struct platform_device *pdev) dev_info(sfp->dev, "Host maximum power %u.%uW\n", sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); - sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); - if (!sfp->sfp_bus) - return -ENOMEM; - /* Get the initial state, and always signal TX disable, * since the network interface will not be up. */ @@ -1849,10 +1861,6 @@ static int sfp_probe(struct platform_device *pdev) sfp->state |= SFP_F_RATE_SELECT; sfp_set_state(sfp, sfp->state); sfp_module_tx_disable(sfp); - rtnl_lock(); - if (sfp->state & SFP_F_PRESENT) - sfp_sm_event(sfp, SFP_E_INSERT); - rtnl_unlock(); for (i = 0; i < GPIO_MAX; i++) { if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) @@ -1885,6 +1893,10 @@ static int sfp_probe(struct platform_device *pdev) dev_warn(sfp->dev, "No tx_disable pin: SFP modules will always be emitting.\n"); + sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); + if (!sfp->sfp_bus) + return -ENOMEM; + return 0; } diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h index 31b0acf337e2..64f54b0bbd8c 100644 --- a/drivers/net/phy/sfp.h +++ b/drivers/net/phy/sfp.h @@ -7,6 +7,8 @@ struct sfp; struct sfp_socket_ops { + void (*attach)(struct sfp *sfp); + void (*detach)(struct sfp *sfp); void (*start)(struct sfp *sfp); void (*stop)(struct sfp *sfp); int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index ebf419dc7307..2d1449345959 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -35,7 +35,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) u16 val = 0; int err; - err = priv->phy_drv->read_status(phydev); + if (priv->phy_drv->read_status) + err = priv->phy_drv->read_status(phydev); + else + err = genphy_read_status(phydev); if (err < 0) return err; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index afd9d25d1992..958f1cf67282 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team, } } -static bool __team_option_inst_tmp_find(const struct list_head *opts, - const struct team_option_inst *needle) -{ - struct team_option_inst *opt_inst; - - list_for_each_entry(opt_inst, opts, tmp_list) - if (opt_inst == needle) - return true; - return false; -} - static int __team_options_register(struct team *team, const struct team_option *option, size_t option_count) @@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) int err = 0; int i; struct nlattr *nl_option; - LIST_HEAD(opt_inst_list); rtnl_lock(); @@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; struct nlattr *attr; struct nlattr *attr_data; + LIST_HEAD(opt_inst_list); enum team_option_type opt_type; int opt_port_ifindex = 0; /* != 0 for per-port options */ u32 opt_array_index = 0; @@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) if (err) goto team_put; opt_inst->changed = true; - - /* dumb/evil user-space can send us duplicate opt, - * keep only the last one - */ - if (__team_option_inst_tmp_find(&opt_inst_list, - opt_inst)) - continue; - list_add(&opt_inst->tmp_list, &opt_inst_list); } if (!opt_found) { err = -ENOENT; goto team_put; } - } - err = team_nl_send_event_options_get(team, &opt_inst_list); + err = team_nl_send_event_options_get(team, &opt_inst_list); + if (err) + break; + } team_put: team_nl_team_put(team); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 18656c4094b3..fed298c0cb39 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -866,8 +866,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file, if (rtnl_dereference(tun->xdp_prog)) sock_set_flag(&tfile->sk, SOCK_XDP); - tun_set_real_num_queues(tun); - /* device is allowed to go away first, so no need to hold extra * refcnt. */ @@ -879,6 +877,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; + tun_set_real_num_queues(tun); out: return err; } diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 78b16eb9e58c..63aaae487995 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -361,8 +361,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i else return -EINVAL; - dev = alloc_netdev(sizeof(*pnd) + sizeof(pnd->urbs[0]) * rxq_size, - ifname, NET_NAME_UNKNOWN, usbpn_setup); + dev = alloc_netdev(struct_size(pnd, urbs, rxq_size), ifname, + NET_NAME_UNKNOWN, usbpn_setup); if (!dev) return -ENOMEM; diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index f4247b275e09..63e44e746ccc 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -1011,6 +1011,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) switch (cmd) { case SIOCDEVPRIVATE: data[0] = pegasus->phy; + /* fall through */ case SIOCDEVPRIVATE + 1: read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]); res = 0; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 735ad838e2ba..18af2f8eee96 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 60dd1ec1665f..ada6baf8847a 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -557,6 +557,7 @@ enum spd_duplex { /* MAC PASSTHRU */ #define AD_MASK 0xfee0 #define BND_MASK 0x0004 +#define BD_MASK 0x0001 #define EFUSE 0xcfdb #define PASS_THRU_MASK 0x1 @@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) return -ENODEV; } } else { - /* test for RTL8153-BND */ + /* test for RTL8153-BND and RTL8153-BD */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); - if ((ocp_data & BND_MASK) == 0) { + if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK)) { netif_dbg(tp, probe, tp->netdev, "Invalid variant for MAC pass through\n"); return -ENODEV; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 3f145e4c6c08..59dbdbb5feff 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -847,6 +847,7 @@ static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCDEVPRIVATE: data[0] = dev->phy; + /* fall through */ case SIOCDEVPRIVATE + 1: read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]); break; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index f412ea1cef18..fbf890ebbeae 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -540,8 +540,10 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); + /* fall through */ case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); + /* fall through */ case XDP_DROP: goto err_xdp; } @@ -661,8 +663,10 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); + /* fall through */ case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); + /* fall through */ case XDP_DROP: goto drop; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2a0edd4653e3..7eb38ea9ba56 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644); #define VIRTIO_XDP_TX BIT(0) #define VIRTIO_XDP_REDIR BIT(1) +#define VIRTIO_XDP_FLAG BIT(0) + /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled * at once, the weight is chosen so that the EWMA will be insensitive to short- @@ -252,6 +254,21 @@ struct padded_vnet_hdr { char padding[4]; }; +static bool is_xdp_frame(void *ptr) +{ + return (unsigned long)ptr & VIRTIO_XDP_FLAG; +} + +static void *xdp_to_ptr(struct xdp_frame *ptr) +{ + return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); +} + +static struct xdp_frame *ptr_to_xdp(void *ptr) +{ + return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); +} + /* Converting between virtqueue no. and kernel tx/rx queue no. * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq */ @@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, sg_init_one(sq->sg, xdpf->data, xdpf->len); - err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); + err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), + GFP_ATOMIC); if (unlikely(err)) return -ENOSPC; /* Caller handle free/refcnt */ @@ -482,36 +500,47 @@ static int virtnet_xdp_xmit(struct net_device *dev, { struct virtnet_info *vi = netdev_priv(dev); struct receive_queue *rq = vi->rq; - struct xdp_frame *xdpf_sent; struct bpf_prog *xdp_prog; struct send_queue *sq; unsigned int len; + int packets = 0; + int bytes = 0; int drops = 0; int kicks = 0; int ret, err; + void *ptr; int i; - sq = virtnet_xdp_sq(vi); - - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { - ret = -EINVAL; - drops = n; - goto out; - } - /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. */ xdp_prog = rcu_dereference(rq->xdp_prog); - if (!xdp_prog) { - ret = -ENXIO; + if (!xdp_prog) + return -ENXIO; + + sq = virtnet_xdp_sq(vi); + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { + ret = -EINVAL; drops = n; goto out; } /* Free up any pending old buffers before queueing new ones. */ - while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) - xdp_return_frame(xdpf_sent); + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(is_xdp_frame(ptr))) { + struct xdp_frame *frame = ptr_to_xdp(ptr); + + bytes += frame->len; + xdp_return_frame(frame); + } else { + struct sk_buff *skb = ptr; + + bytes += skb->len; + napi_consume_skb(skb, false); + } + packets++; + } for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; @@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev, } out: u64_stats_update_begin(&sq->stats.syncp); + sq->stats.bytes += bytes; + sq->stats.packets += packets; sq->stats.xdp_tx += n; sq->stats.xdp_tx_drops += drops; sq->stats.kicks += kicks; @@ -1333,18 +1364,26 @@ static int virtnet_receive(struct receive_queue *rq, int budget, static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) { - struct sk_buff *skb; unsigned int len; unsigned int packets = 0; unsigned int bytes = 0; + void *ptr; - while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { - pr_debug("Sent skb %p\n", skb); + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(!is_xdp_frame(ptr))) { + struct sk_buff *skb = ptr; - bytes += skb->len; - packets++; + pr_debug("Sent skb %p\n", skb); + + bytes += skb->len; + napi_consume_skb(skb, in_napi); + } else { + struct xdp_frame *frame = ptr_to_xdp(ptr); - napi_consume_skb(skb, in_napi); + bytes += frame->len; + xdp_return_frame(frame); + } + packets++; } /* Avoid overhead when no packets have been processed @@ -1359,6 +1398,16 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) u64_stats_update_end(&sq->stats.syncp); } +static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) +{ + if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) + return false; + else if (q < vi->curr_queue_pairs) + return true; + else + return false; +} + static void virtnet_poll_cleantx(struct receive_queue *rq) { struct virtnet_info *vi = rq->vq->vdev->priv; @@ -1366,7 +1415,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) struct send_queue *sq = &vi->sq[index]; struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); - if (!sq->napi.weight) + if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) return; if (__netif_tx_trylock(txq)) { @@ -1443,8 +1492,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) { struct send_queue *sq = container_of(napi, struct send_queue, napi); struct virtnet_info *vi = sq->vq->vdev->priv; - struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); + unsigned int index = vq2txq(sq->vq); + struct netdev_queue *txq; + if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { + /* We don't need to enable cb for XDP */ + napi_complete_done(napi, 0); + return 0; + } + + txq = netdev_get_tx_queue(vi->dev, index); __netif_tx_lock(txq, raw_smp_processor_id()); free_old_xmit_skbs(sq, true); __netif_tx_unlock(txq); @@ -2396,6 +2453,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, return -ENOMEM; } + old_prog = rtnl_dereference(vi->rq[0].xdp_prog); + if (!prog && !old_prog) + return 0; + if (prog) { prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); if (IS_ERR(prog)) @@ -2403,36 +2464,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } /* Make sure NAPI is not using any XDP TX queues for RX. */ - if (netif_running(dev)) - for (i = 0; i < vi->max_queue_pairs; i++) + if (netif_running(dev)) { + for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); + virtnet_napi_tx_disable(&vi->sq[i].napi); + } + } + + if (!prog) { + for (i = 0; i < vi->max_queue_pairs; i++) { + rcu_assign_pointer(vi->rq[i].xdp_prog, prog); + if (i == 0) + virtnet_restore_guest_offloads(vi); + } + synchronize_net(); + } - netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); err = _virtnet_set_queues(vi, curr_qp + xdp_qp); if (err) goto err; + netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); vi->xdp_queue_pairs = xdp_qp; - for (i = 0; i < vi->max_queue_pairs; i++) { - old_prog = rtnl_dereference(vi->rq[i].xdp_prog); - rcu_assign_pointer(vi->rq[i].xdp_prog, prog); - if (i == 0) { - if (!old_prog) + if (prog) { + for (i = 0; i < vi->max_queue_pairs; i++) { + rcu_assign_pointer(vi->rq[i].xdp_prog, prog); + if (i == 0 && !old_prog) virtnet_clear_guest_offloads(vi); - if (!prog) - virtnet_restore_guest_offloads(vi); } + } + + for (i = 0; i < vi->max_queue_pairs; i++) { if (old_prog) bpf_prog_put(old_prog); - if (netif_running(dev)) + if (netif_running(dev)) { virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, + &vi->sq[i].napi); + } } return 0; err: - for (i = 0; i < vi->max_queue_pairs; i++) - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + if (!prog) { + virtnet_clear_guest_offloads(vi); + for (i = 0; i < vi->max_queue_pairs; i++) + rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); + } + + if (netif_running(dev)) { + for (i = 0; i < vi->max_queue_pairs; i++) { + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, + &vi->sq[i].napi); + } + } if (prog) bpf_prog_sub(prog, vi->max_queue_pairs - 1); return err; @@ -2614,16 +2701,6 @@ static void free_receive_page_frags(struct virtnet_info *vi) put_page(vi->rq[i].alloc_frag.page); } -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) -{ - if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) - return false; - else if (q < vi->curr_queue_pairs) - return true; - else - return false; -} - static void free_unused_bufs(struct virtnet_info *vi) { void *buf; @@ -2632,10 +2709,10 @@ static void free_unused_bufs(struct virtnet_info *vi) for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { - if (!is_xdp_raw_buffer_queue(vi, i)) + if (!is_xdp_frame(buf)) dev_kfree_skb(buf); else - put_page(virt_to_head_page(buf)); + xdp_return_frame(ptr_to_xdp(buf)); } } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ef45c3c925be..33edc78e818d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -869,6 +869,14 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, call_rcu(&f->rcu, vxlan_fdb_free); } +static void vxlan_dst_free(struct rcu_head *head) +{ + struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); + + dst_cache_destroy(&rd->dst_cache); + kfree(rd); +} + static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, union vxlan_addr *ip, __u16 state, __u16 flags, @@ -941,8 +949,10 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, err_notify: if ((flags & NLM_F_REPLACE) && rc) *rd = oldrd; - else if ((flags & NLM_F_APPEND) && rc) + else if ((flags & NLM_F_APPEND) && rc) { list_del_rcu(&rd->list); + call_rcu(&rd->rcu, vxlan_dst_free); + } return err; } @@ -1013,14 +1023,6 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan, } } -static void vxlan_dst_free(struct rcu_head *head) -{ - struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); - - dst_cache_destroy(&rd->dst_cache); - kfree(rd); -} - static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, struct vxlan_rdst *rd, bool swdev_notify) { @@ -2291,7 +2293,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, struct pcpu_sw_netstats *tx_stats, *rx_stats; union vxlan_addr loopback; union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; - struct net_device *dev = skb->dev; + struct net_device *dev; int len = skb->len; tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); @@ -2311,9 +2313,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, #endif } + rcu_read_lock(); + dev = skb->dev; + if (unlikely(!(dev->flags & IFF_UP))) { + kfree_skb(skb); + goto drop; + } + if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) - vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, - vni); + vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); u64_stats_update_begin(&tx_stats->syncp); tx_stats->tx_packets++; @@ -2326,8 +2334,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, rx_stats->rx_bytes += len; u64_stats_update_end(&rx_stats->syncp); } else { +drop: dev->stats.rx_dropped++; } + rcu_read_unlock(); } static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index d5dc823f781e..fa78d2b14136 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -1575,7 +1575,7 @@ try: dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); dpriv->tx_skbuff[cur] = NULL; ++dpriv->tx_dirty; } else { diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 66d889d54e58..a08f04c3f644 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -482,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) memset(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 0, skb->len); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); priv->tx_skbuff[priv->skb_dirtytx] = NULL; priv->skb_dirtytx = diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index d573a57bc301..459ce52a0a4d 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -565,7 +565,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, u32 plx_phy; /* PLX PCI base address */ u32 mem_phy; /* memory PCI base addr */ u8 __iomem *mem; /* memory virtual base addr */ - int i, ports, alloc_size; + int i, ports; #ifndef MODULE pr_info_once("%s\n", version); @@ -601,8 +601,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, default: ports = 4; } - alloc_size = sizeof(struct card) + ports * sizeof(struct port); - card = kzalloc(alloc_size, GFP_KERNEL); + card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL); if (card == NULL) { pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 0b602951ff6b..d28b96d06919 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c @@ -1260,8 +1260,8 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) goto error_msg_hdr_check; result = -EIO; num_pls = le16_to_cpu(msg_hdr->num_pls); - pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */ - num_pls * sizeof(msg_hdr->pld[0]); + /* Check payload descriptor(s) */ + pl_itr = struct_size(msg_hdr, pld, num_pls); pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); if (pl_itr > skb_len) { /* got all the payload descriptors? */ dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index f8eb66ef2944..73842a830645 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -210,6 +210,7 @@ retry: msleep(10); /* give the device some time */ goto retry; } + /* fall through */ case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 399b501f3c3c..e8891f5fc83a 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { { .id = WCN3990_HW_1_0_DEV_VERSION, .dev_id = 0, - .bus = ATH10K_BUS_PCI, + .bus = ATH10K_BUS_SNOC, .name = "wcn3990 hw1.0", .continuous_frag_desc = true, .tx_chain_mask = 0x7, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 2034ccc7cc72..021eb30d1fb7 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -1986,7 +1986,7 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode) /* no default handler to allow compiler to check that the * enum is fully handled */ - }; + } return "<unknown>"; } diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index 54132af70094..aa1c71a76ef7 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -1140,7 +1140,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) len -= ie_len; data += ie_len; - }; + } ret = 0; out: diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c index 77046384dd80..976c8ec4e992 100644 --- a/drivers/net/wireless/broadcom/b43/debugfs.c +++ b/drivers/net/wireless/broadcom/b43/debugfs.c @@ -668,15 +668,13 @@ static void b43_remove_dynamic_debug(struct b43_wldev *dev) static void b43_add_dynamic_debug(struct b43_wldev *dev) { struct b43_dfsentry *e = dev->dfsentry; - struct dentry *d; -#define add_dyn_dbg(name, id, initstate) do { \ - e->dyn_debug[id] = (initstate); \ - d = debugfs_create_bool(name, 0600, e->subdir, \ - &(e->dyn_debug[id])); \ - if (!IS_ERR(d)) \ - e->dyn_debug_dentries[id] = d; \ - } while (0) +#define add_dyn_dbg(name, id, initstate) do { \ + e->dyn_debug[id] = (initstate); \ + e->dyn_debug_dentries[id] = \ + debugfs_create_bool(name, 0600, e->subdir, \ + &(e->dyn_debug[id])); \ + } while (0) add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, false); add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, false); @@ -718,19 +716,6 @@ void b43_debugfs_add_device(struct b43_wldev *dev) snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); e->subdir = debugfs_create_dir(devdir, rootdir); - if (!e->subdir || IS_ERR(e->subdir)) { - if (e->subdir == ERR_PTR(-ENODEV)) { - b43dbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " - "enabled in kernel config\n"); - } else { - b43err(dev->wl, "debugfs: cannot create %s directory\n", - devdir); - } - dev->dfsentry = NULL; - kfree(log->log); - kfree(e); - return; - } e->mmio16read_next = 0xFFFF; /* invalid address */ e->mmio32read_next = 0xFFFF; /* invalid address */ @@ -741,13 +726,10 @@ void b43_debugfs_add_device(struct b43_wldev *dev) #define ADD_FILE(name, mode) \ do { \ - struct dentry *d; \ - d = debugfs_create_file(__stringify(name), \ + e->file_##name.dentry = \ + debugfs_create_file(__stringify(name), \ mode, e->subdir, dev, \ &fops_##name.fops); \ - e->file_##name.dentry = NULL; \ - if (!IS_ERR(d)) \ - e->file_##name.dentry = d; \ } while (0) @@ -818,8 +800,6 @@ void b43_debugfs_log_txstat(struct b43_wldev *dev, void b43_debugfs_init(void) { rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR(rootdir)) - rootdir = NULL; } void b43_debugfs_exit(void) diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c index 82ef56ed7ca1..8150adee3e34 100644 --- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c +++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c @@ -361,15 +361,13 @@ static void b43legacy_remove_dynamic_debug(struct b43legacy_wldev *dev) static void b43legacy_add_dynamic_debug(struct b43legacy_wldev *dev) { struct b43legacy_dfsentry *e = dev->dfsentry; - struct dentry *d; -#define add_dyn_dbg(name, id, initstate) do { \ - e->dyn_debug[id] = (initstate); \ - d = debugfs_create_bool(name, 0600, e->subdir, \ - &(e->dyn_debug[id])); \ - if (!IS_ERR(d)) \ - e->dyn_debug_dentries[id] = d; \ - } while (0) +#define add_dyn_dbg(name, id, initstate) do { \ + e->dyn_debug[id] = (initstate); \ + e->dyn_debug_dentries[id] = \ + debugfs_create_bool(name, 0600, e->subdir, \ + &(e->dyn_debug[id])); \ + } while (0) add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, false); add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, false); @@ -408,29 +406,14 @@ void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev) snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); e->subdir = debugfs_create_dir(devdir, rootdir); - if (!e->subdir || IS_ERR(e->subdir)) { - if (e->subdir == ERR_PTR(-ENODEV)) { - b43legacydbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " - "enabled in kernel config\n"); - } else { - b43legacyerr(dev->wl, "debugfs: cannot create %s directory\n", - devdir); - } - dev->dfsentry = NULL; - kfree(log->log); - kfree(e); - return; - } #define ADD_FILE(name, mode) \ do { \ - struct dentry *d; \ - d = debugfs_create_file(__stringify(name), \ + e->file_##name.dentry = \ + debugfs_create_file(__stringify(name), \ mode, e->subdir, dev, \ &fops_##name.fops); \ e->file_##name.dentry = NULL; \ - if (!IS_ERR(d)) \ - e->file_##name.dentry = d; \ } while (0) @@ -492,8 +475,6 @@ void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, void b43legacy_debugfs_init(void) { rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR(rootdir)) - rootdir = NULL; } void b43legacy_debugfs_exit(void) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile index 22fd95a736a8..f7cf3e5f4849 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile @@ -16,8 +16,8 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ccflags-y += \ - -Idrivers/net/wireless/broadcom/brcm80211/brcmfmac \ - -Idrivers/net/wireless/broadcom/brcm80211/include + -I $(srctree)/$(src) \ + -I $(srctree)/$(src)/../include obj-$(CONFIG_BRCMFMAC) += brcmfmac.o brcmfmac-objs += \ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index d64bf233b12c..ec129864cc9c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -315,7 +315,7 @@ static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev, /* bail out as things are really fishy here */ WARN(1, "invalid sdio function number: %d\n", func->num); err = -ENOMEDIUM; - }; + } if (err == -ENOMEDIUM) brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 1f1e95a15a17..0ce1d8174e6d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -149,7 +149,7 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) return err; } - err = request_firmware(&clm, clm_name, bus->dev); + err = firmware_request_nowarn(&clm, clm_name, bus->dev); if (err) { brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n", err); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c index 51d76ac45075..7535cb0d4ac0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c @@ -43,6 +43,10 @@ static const struct brcmf_dmi_data meegopad_t08_data = { BRCM_CC_43340_CHIP_ID, 2, "meegopad-t08" }; +static const struct brcmf_dmi_data pov_tab_p1006w_data = { + BRCM_CC_43340_CHIP_ID, 2, "pov-tab-p1006w-data" +}; + static const struct dmi_system_id dmi_platform_data[] = { { /* Match for the GPDwin which unfortunately uses somewhat @@ -81,6 +85,17 @@ static const struct dmi_system_id dmi_platform_data[] = { }, .driver_data = (void *)&meegopad_t08_data, }, + { + /* Point of View TAB-P1006W-232 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "BayTrail"), + /* Note 105b is Foxcon's USB/PCI vendor id */ + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "105B"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"), + }, + .driver_data = (void *)&pov_tab_p1006w_data, + }, {} }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c index ffa243e2e2d0..37b403877228 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c @@ -496,6 +496,11 @@ int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid) brcmf_dbg(TRACE, "reqid=%llu\n", reqid); pi = ifp_to_pno(ifp); + + /* No PNO request */ + if (!pi->n_reqs) + return 0; + err = brcmf_pno_remove_request(pi, reqid); if (err) return err; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index a4308c6e72d7..76cfaf6999c8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1550,6 +1550,10 @@ void brcmf_usb_exit(void) void brcmf_usb_register(void) { + int ret; + brcmf_dbg(USB, "Enter\n"); - usb_register(&brcmf_usbdrvr); + ret = usb_register(&brcmf_usbdrvr); + if (ret) + brcmf_err("usb_register failed %d\n", ret); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile index ed83f33aceb7..482d7737764d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile @@ -16,9 +16,9 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ccflags-y := \ - -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac \ - -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac/phy \ - -Idrivers/net/wireless/broadcom/brcm80211/include + -I $(srctree)/$(src) \ + -I $(srctree)/$(src)/phy \ + -I $(srctree)/$(src)/../include brcmsmac-y := \ mac80211_if.o \ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c index 3bd54f125776..6d776ef6ff54 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c @@ -37,27 +37,18 @@ static struct dentry *root_folder; void brcms_debugfs_init(void) { root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR(root_folder)) - root_folder = NULL; } void brcms_debugfs_exit(void) { - if (!root_folder) - return; - debugfs_remove_recursive(root_folder); root_folder = NULL; } -int brcms_debugfs_attach(struct brcms_pub *drvr) +void brcms_debugfs_attach(struct brcms_pub *drvr) { - if (!root_folder) - return -ENODEV; - drvr->dbgfs_dir = debugfs_create_dir( dev_name(&drvr->wlc->hw->d11core->dev), root_folder); - return PTR_ERR_OR_ZERO(drvr->dbgfs_dir); } void brcms_debugfs_detach(struct brcms_pub *drvr) @@ -195,7 +186,7 @@ static const struct file_operations brcms_debugfs_def_ops = { .llseek = seq_lseek }; -static int +static void brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn, int (*read_fn)(struct seq_file *seq, void *data)) { @@ -203,27 +194,18 @@ brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn, struct dentry *dentry = drvr->dbgfs_dir; struct brcms_debugfs_entry *entry; - if (IS_ERR_OR_NULL(dentry)) - return -ENOENT; - entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL); if (!entry) - return -ENOMEM; + return; entry->read = read_fn; entry->drvr = drvr; - dentry = debugfs_create_file(fn, 0444, dentry, entry, - &brcms_debugfs_def_ops); - - return PTR_ERR_OR_ZERO(dentry); + debugfs_create_file(fn, 0444, dentry, entry, &brcms_debugfs_def_ops); } void brcms_debugfs_create_files(struct brcms_pub *drvr) { - if (IS_ERR_OR_NULL(drvr->dbgfs_dir)) - return; - brcms_debugfs_add_entry(drvr, "hardware", brcms_debugfs_hardware_read); brcms_debugfs_add_entry(drvr, "macstat", brcms_debugfs_macstat_read); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h index 822781cf15d4..56898e6d789d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h @@ -68,7 +68,7 @@ void __brcms_dbg(struct device *dev, u32 level, const char *func, struct brcms_pub; void brcms_debugfs_init(void); void brcms_debugfs_exit(void); -int brcms_debugfs_attach(struct brcms_pub *drvr); +void brcms_debugfs_attach(struct brcms_pub *drvr); void brcms_debugfs_detach(struct brcms_pub *drvr); struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr); void brcms_debugfs_create_files(struct brcms_pub *drvr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c index e78a93a45741..c6e107f41948 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c @@ -1199,8 +1199,6 @@ wlc_lcnphy_rx_iq_est(struct brcms_phy *pi, { int wait_count = 0; bool result = true; - u8 phybw40; - phybw40 = CHSPEC_IS40(pi->radio_chanspec); mod_phy_reg(pi, 0x6da, (0x1 << 5), (1) << 5); @@ -3082,7 +3080,7 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) u8 bbmult; struct phytbl_info tab; s32 a1, b0, b1; - s32 tssi, pwr, maxtargetpwr, mintargetpwr; + s32 tssi, pwr, mintargetpwr; bool suspend; struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); @@ -3119,7 +3117,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) b0 = pi->txpa_2g[0]; b1 = pi->txpa_2g[1]; a1 = pi->txpa_2g[2]; - maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1); mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1); tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; @@ -4212,7 +4209,7 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi) s8 index; struct phytbl_info tab; s32 a1, b0, b1; - s32 tssi, pwr, maxtargetpwr, mintargetpwr; + s32 tssi, pwr, mintargetpwr; struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; pi->phy_lastcal = pi->sh->now; @@ -4249,7 +4246,6 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi) b0 = pi->txpa_2g[0]; b1 = pi->txpa_2g[1]; a1 = pi->txpa_2g[2]; - maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1); mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1); tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; @@ -4622,13 +4618,10 @@ static void wlc_lcnphy_radio_init(struct brcms_phy *pi) static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) { uint idx; - u8 phybw40; struct phytbl_info tab; const struct phytbl_info *tb; u32 val; - phybw40 = CHSPEC_IS40(pi->radio_chanspec); - for (idx = 0; idx < dot11lcnphytbl_info_sz_rev0; idx++) wlc_lcnphy_write_table(pi, &dot11lcnphytbl_info_rev0[idx]); @@ -4831,9 +4824,7 @@ static void wlc_lcnphy_baseband_init(struct brcms_phy *pi) void wlc_phy_init_lcnphy(struct brcms_phy *pi) { - u8 phybw40; struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; - phybw40 = CHSPEC_IS40(pi->radio_chanspec); pi_lcn->lcnphy_cal_counter = 0; pi_lcn->lcnphy_cal_temper = pi_lcn->lcnphy_rawtempsense; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile index 256c91f9ac4b..bb02c6220a88 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile @@ -15,9 +15,7 @@ # OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -ccflags-y := \ - -Idrivers/net/wireless/broadcom/brcm80211/brcmutil \ - -Idrivers/net/wireless/broadcom/brcm80211/include +ccflags-y := -I $(srctree)/$(src)/../include obj-$(CONFIG_BRCMUTIL) += brcmutil.o brcmutil-objs = utils.o d11.o diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 57e3b6cca234..271977f7fbb0 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3756,10 +3756,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_remove_sysfs; - err = il_dbgfs_register(il, DRV_NAME); - if (err) - IL_ERR("failed to create debugfs files. Ignoring error: %d\n", - err); + il_dbgfs_register(il, DRV_NAME); /* Start monitoring the killswitch */ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ); diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 6b4488a178a7..94222ae464ae 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -4988,10 +4988,7 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context) if (err) goto out_unbind; - err = il_dbgfs_register(il, DRV_NAME); - if (err) - IL_ERR("failed to create debugfs files. Ignoring error: %d\n", - err); + il_dbgfs_register(il, DRV_NAME); err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group); if (err) { diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h index dc6a74a05983..b079c64ca014 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.h +++ b/drivers/net/wireless/intel/iwlegacy/common.h @@ -2974,13 +2974,11 @@ il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len) #endif /* CONFIG_IWLEGACY_DEBUG */ #ifdef CONFIG_IWLEGACY_DEBUGFS -int il_dbgfs_register(struct il_priv *il, const char *name); +void il_dbgfs_register(struct il_priv *il, const char *name); void il_dbgfs_unregister(struct il_priv *il); #else -static inline int -il_dbgfs_register(struct il_priv *il, const char *name) +static inline void il_dbgfs_register(struct il_priv *il, const char *name) { - return 0; } static inline void diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c index d76073def677..fa211412e0ac 100644 --- a/drivers/net/wireless/intel/iwlegacy/debug.c +++ b/drivers/net/wireless/intel/iwlegacy/debug.c @@ -128,23 +128,12 @@ EXPORT_SYMBOL(il_update_stats); /* create and remove of files */ #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, il, \ - &il_dbgfs_##name##_ops)) \ - goto err; \ + debugfs_create_file(#name, mode, parent, il, \ + &il_dbgfs_##name##_ops); \ } while (0) #define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_bool(#name, 0600, parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_x32(#name, 0600, parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ + debugfs_create_bool(#name, 0600, parent, ptr); \ } while (0) /* file operation */ @@ -1341,27 +1330,18 @@ DEBUGFS_WRITE_FILE_OPS(wd_timeout); * Create the debugfs files and directories * */ -int +void il_dbgfs_register(struct il_priv *il, const char *name) { struct dentry *phyd = il->hw->wiphy->debugfsdir; struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; dir_drv = debugfs_create_dir(name, phyd); - if (!dir_drv) - return -ENOMEM; - il->debugfs_dir = dir_drv; dir_data = debugfs_create_dir("data", dir_drv); - if (!dir_data) - goto err; dir_rf = debugfs_create_dir("rf", dir_drv); - if (!dir_rf) - goto err; dir_debug = debugfs_create_dir("debug", dir_drv); - if (!dir_debug) - goto err; DEBUGFS_ADD_FILE(nvm, dir_data, 0400); DEBUGFS_ADD_FILE(sram, dir_data, 0600); @@ -1399,12 +1379,6 @@ il_dbgfs_register(struct il_priv *il, const char *name) DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, &il->disable_chain_noise_cal); DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal); - return 0; - -err: - IL_ERR("Can't create the debugfs directory\n"); - il_dbgfs_unregister(il); - return -ENOMEM; } EXPORT_SYMBOL(il_dbgfs_register); diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 491ca3c8b43c..83d5bceea08f 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -1,6 +1,6 @@ config IWLWIFI tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " - depends on PCI && HAS_IOMEM + depends on PCI && HAS_IOMEM && CFG80211 select FW_LOADER ---help--- Select to build the driver supporting the: @@ -47,6 +47,7 @@ if IWLWIFI config IWLWIFI_LEDS bool depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI + depends on IWLMVM || IWLDVM select LEDS_TRIGGERS select MAC80211_LEDS default y diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 7e65073834b7..eb93711d474b 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -83,6 +83,7 @@ #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" +#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" #define IWL_22000_HR_MODULE_FIRMWARE(api) \ IWL_22000_HR_FW_PRE __stringify(api) ".ucode" @@ -104,6 +105,8 @@ IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" +#define IWL_CC_A_MODULE_FIRMWARE(api) \ + IWL_CC_A_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -195,8 +198,8 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = { IWL_DEVICE_22500, }; -const struct iwl_cfg iwl22000_2ax_cfg_hr = { - .name = "Intel(R) Dual Band Wireless AX 22000", +const struct iwl_cfg iwl22560_2ax_cfg_hr = { + .name = "Intel(R) Wireless-AX 22560", .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -207,6 +210,42 @@ const struct iwl_cfg iwl22000_2ax_cfg_hr = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; +const struct iwl_cfg iwl22260_2ax_cfg = { + .name = "Intel(R) Wireless-AX 22260", + .fw_name_pre = IWL_CC_A_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +const struct iwl_cfg killer1650x_2ax_cfg = { + .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (22260NGW)", + .fw_name_pre = IWL_CC_A_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +const struct iwl_cfg killer1650w_2ax_cfg = { + .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (22260D2W)", + .fw_name_pre = IWL_CC_A_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + /* * All JF radio modules are part of the 9000 series, but the MAC part * looks more like 22000. That's why this device is here, but called @@ -230,6 +269,12 @@ const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = { IWL_DEVICE_22500, }; +const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = { .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, @@ -242,6 +287,30 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = { IWL_DEVICE_22500, }; +const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { + .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)", + .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { + .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)", + .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + const struct iwl_cfg iwl22000_2ax_cfg_jf = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, @@ -324,3 +393,4 @@ MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index f2114137c13f..113bcf7735a0 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -73,21 +73,12 @@ #define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_LEN 0x68000 -#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" -#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-" -#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-" -#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-" -#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" -#define IWL9000A_MODULE_FIRMWARE(api) \ - IWL9000A_FW_PRE __stringify(api) ".ucode" -#define IWL9000B_MODULE_FIRMWARE(api) \ - IWL9000B_FW_PRE __stringify(api) ".ucode" -#define IWL9000RFB_MODULE_FIRMWARE(api) \ - IWL9000RFB_FW_PRE __stringify(api) ".ucode" -#define IWL9260A_MODULE_FIRMWARE(api) \ - IWL9260A_FW_PRE __stringify(api) ".ucode" -#define IWL9260B_MODULE_FIRMWARE(api) \ - IWL9260B_FW_PRE __stringify(api) ".ucode" +#define IWL9000_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-" +#define IWL9260_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" +#define IWL9000_MODULE_FIRMWARE(api) \ + IWL9000_FW_PRE __stringify(api) ".ucode" +#define IWL9260_MODULE_FIRMWARE(api) \ + IWL9260_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl9000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -162,81 +153,87 @@ static const struct iwl_tt_params iwl9000_tt_params = { const struct iwl_cfg iwl9160_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9160", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9260", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, + IWL_DEVICE_9000, +}; + +const struct iwl_cfg iwl9260_2ac_160_cfg = { + .name = "Intel(R) Wireless-AC 9260 160MHz", + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9260_killer_2ac_cfg = { .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9270_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9270", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9460_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9460", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9460_2ac_cfg_soc = { .name = "Intel(R) Dual Band Wireless AC 9460", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9461_2ac_cfg_soc = { - .name = "Intel(R) Dual Band Wireless AC 9461", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, - IWL_DEVICE_9000, - .integrated = true, - .soc_latency = 5000, + .name = "Intel(R) Dual Band Wireless AC 9461", + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .integrated = true, + .soc_latency = 5000, }; const struct iwl_cfg iwl9462_2ac_cfg_soc = { - .name = "Intel(R) Dual Band Wireless AC 9462", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, - IWL_DEVICE_9000, - .integrated = true, - .soc_latency = 5000, + .name = "Intel(R) Dual Band Wireless AC 9462", + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .integrated = true, + .soc_latency = 5000, }; const struct iwl_cfg iwl9560_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9560", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, + IWL_DEVICE_9000, +}; + +const struct iwl_cfg iwl9560_2ac_160_cfg = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9560_2ac_cfg_soc = { .name = "Intel(R) Dual Band Wireless AC 9560", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9560_2ac_160_cfg_soc = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -244,9 +241,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = { const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = { .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -254,9 +249,7 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = { const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = { .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -264,9 +257,7 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = { const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9460", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -275,9 +266,7 @@ const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9461", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -286,9 +275,7 @@ const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = { const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9462", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -297,9 +284,16 @@ const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = { const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9560", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -308,9 +302,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = { .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -319,17 +311,12 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = { const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = { .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; -MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile index 702d42b2d452..0486b17d7c41 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile @@ -11,4 +11,4 @@ iwldvm-objs += rxon.o devices.o iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o -ccflags-y += -I$(src)/../ +ccflags-y += -I $(srctree)/$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 49b71dbf8490..54b759cec8b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -710,24 +711,6 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return ret; } -static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) -{ - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) - return false; - return true; -} - -static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) -{ - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) - return false; - if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) - return true; - - /* disabled by default */ - return false; -} - static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) @@ -752,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, switch (action) { case IEEE80211_AMPDU_RX_START: - if (!iwl_enable_rx_ampdu(priv->cfg)) + if (!iwl_enable_rx_ampdu()) break; IWL_DEBUG_HT(priv, "start Rx\n"); ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); @@ -764,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, case IEEE80211_AMPDU_TX_START: if (!priv->trans->ops->txq_enable) break; - if (!iwl_enable_tx_ampdu(priv->cfg)) + if (!iwl_enable_tx_ampdu()) break; IWL_DEBUG_HT(priv, "start Tx\n"); ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index c219bca5cff4..bd3c3b921d4c 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1054,7 +1054,7 @@ static void iwl_bg_restart(struct work_struct *data) ieee80211_restart_hw(priv->hw); else IWL_ERR(priv, - "Cannot request restart before registrating with mac80211\n"); + "Cannot request restart before registering with mac80211\n"); } else { WARN_ON(1); } diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c index 4de2727ac63e..a156dcf5b7d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -325,9 +326,9 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force) iwl_prepare_ct_kill_task(priv); tt->state = old_state; } - } else if (old_state == IWL_TI_CT_KILL && - tt->state != IWL_TI_CT_KILL) + } else if (old_state == IWL_TI_CT_KILL) { iwl_perform_ct_kill_task(priv, false); + } IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n", tt->state); IWL_DEBUG_TEMP(priv, "Power Index change to %u\n", diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 8b4922bbe139..0290b333d860 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -77,7 +77,8 @@ * @DATA_PATH_GROUP: data path group, uses command IDs from * &enum iwl_data_path_subcmd_ids * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids - * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids + * @LOCATION_GROUP: location group, uses command IDs from + * &enum iwl_location_subcmd_ids * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from * &enum iwl_prot_offload_subcmd_ids * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from @@ -92,7 +93,7 @@ enum iwl_mvm_command_groups { PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, NAN_GROUP = 0x7, - TOF_GROUP = 0x8, + LOCATION_GROUP = 0x8, PROT_OFFLOAD_GROUP = 0xb, REGULATORY_AND_NVM_GROUP = 0xc, DEBUG_GROUP = 0xf, @@ -353,16 +354,6 @@ enum iwl_legacy_cmds { PHY_DB_CMD = 0x6c, /** - * @TOF_CMD: &struct iwl_tof_config_cmd - */ - TOF_CMD = 0x10, - - /** - * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd - */ - TOF_NOTIFICATION = 0x11, - - /** * @POWER_TABLE_CMD: &struct iwl_device_power_cmd */ POWER_TABLE_CMD = 0x77, @@ -415,7 +406,11 @@ enum iwl_legacy_cmds { TX_ANT_CONFIGURATION_CMD = 0x98, /** - * @STATISTICS_CMD: &struct iwl_statistics_cmd + * @STATISTICS_CMD: + * one of &struct iwl_statistics_cmd, + * &struct iwl_notif_statistics_v11, + * &struct iwl_notif_statistics_v10, + * &struct iwl_notif_statistics */ STATISTICS_CMD = 0x9c, @@ -423,7 +418,7 @@ enum iwl_legacy_cmds { * @STATISTICS_NOTIFICATION: * one of &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics_v11, - * &struct iwl_notif_statistics_cdb + * &struct iwl_notif_statistics */ STATISTICS_NOTIFICATION = 0x9d, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index 6fae02fa4cad..86ea0784e1a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -224,8 +224,18 @@ struct iwl_wowlan_pattern { #define IWL_WOWLAN_MAX_PATTERNS 20 +/** + * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns + */ struct iwl_wowlan_patterns_cmd { + /** + * @n_patterns: number of patterns + */ __le32 n_patterns; + + /** + * @patterns: the patterns, array length in @n_patterns + */ struct iwl_wowlan_pattern patterns[]; } __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index fdc54a5dc9de..93c06e6c1ced 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -105,6 +105,12 @@ enum iwl_data_path_subcmd_ids { HE_AIR_SNIFFER_CONFIG_CMD = 0x13, /** + * @CHEST_COLLECTOR_FILTER_CONFIG_CMD: Configure the CSI + * matrix collection, uses &struct iwl_channel_estimation_cfg + */ + CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14, + + /** * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data */ RX_NO_DATA_NOTIF = 0xF5, @@ -156,4 +162,53 @@ struct iwl_mu_group_mgmt_notif { __le32 user_position[4]; } __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ +enum iwl_channel_estimation_flags { + IWL_CHANNEL_ESTIMATION_ENABLE = BIT(0), + IWL_CHANNEL_ESTIMATION_TIMER = BIT(1), + IWL_CHANNEL_ESTIMATION_COUNTER = BIT(2), +}; + +/** + * struct iwl_channel_estimation_cfg - channel estimation reporting config + */ +struct iwl_channel_estimation_cfg { + /** + * @flags: flags, see &enum iwl_channel_estimation_flags + */ + __le32 flags; + /** + * @timer: if enabled via flags, automatically disable after this many + * microseconds + */ + __le32 timer; + /** + * @count: if enabled via flags, automatically disable after this many + * frames with channel estimation matrix were captured + */ + __le32 count; + /** + * @rate_n_flags_mask: only try to record the channel estimation matrix + * if the rate_n_flags value for the received frame (let's call + * that rx_rnf) matches the mask/value given here like this: + * (rx_rnf & rate_n_flags_mask) == rate_n_flags_val. + */ + __le32 rate_n_flags_mask; + /** + * @rate_n_flags_val: see @rate_n_flags_mask + */ + __le32 rate_n_flags_val; + /** + * @reserved: reserved (for alignment) + */ + __le32 reserved; + /** + * @frame_types: bitmap of frame types to capture, the received frame's + * subtype|type takes 6 bits in the frame and the corresponding bit + * in this field must be set to 1 to capture channel estimation for + * that frame type. Set to all-ones to enable capturing for all + * frame types. + */ + __le64 frame_types; +} __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_1 */ + #endif /* __iwl_fw_api_datapath_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index dc1fa377087a..988584973aba 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -335,29 +335,11 @@ struct iwl_dbg_mem_access_rsp { __le32 data[]; } __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ -#define CONT_REC_COMMAND_SIZE 80 -#define ENABLE_CONT_RECORDING 0x15 -#define DISABLE_CONT_RECORDING 0x16 +#define LDBG_CFG_COMMAND_SIZE 80 #define BUFFER_ALLOCATION 0x27 #define START_DEBUG_RECORDING 0x29 #define STOP_DEBUG_RECORDING 0x2A -/* - * struct iwl_continuous_record_mode - recording mode - */ -struct iwl_continuous_record_mode { - __le16 enable_recording; -} __packed; - -/* - * struct iwl_continuous_record_cmd - enable/disable continuous recording - */ -struct iwl_continuous_record_cmd { - struct iwl_continuous_record_mode record_mode; - u8 pad[CONT_REC_COMMAND_SIZE - - sizeof(struct iwl_continuous_record_mode)]; -} __packed; - /* maximum fragments to be allocated per target of allocationId */ #define IWL_BUFFER_LOCATION_MAX_FRAGS 2 @@ -385,4 +367,17 @@ struct iwl_buffer_allocation_cmd { struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS]; } __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */ +/** + * struct iwl_ldbg_config_cmd - LDBG config command + * @type: configuration type + * @pad: reserved space for type-dependent data + */ +struct iwl_ldbg_config_cmd { + __le32 type; + union { + u8 pad[LDBG_CFG_COMMAND_SIZE - sizeof(__le32)]; + struct iwl_buffer_allocation_cmd buffer_allocation; + }; /* LDBG_CFG_BODY_API_U_VER_2 (partially) */ +} __packed; /* LDBG_CFG_CMD_API_S_VER_2 */ + #endif /* __iwl_fw_api_debug_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h new file mode 100644 index 000000000000..6da91ec0df55 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -0,0 +1,711 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_location_h__ +#define __iwl_fw_api_location_h__ + +/** + * enum iwl_location_subcmd_ids - location group command IDs + */ +enum iwl_location_subcmd_ids { + /** + * @TOF_RANGE_REQ_CMD: TOF ranging request, + * uses &struct iwl_tof_range_req_cmd + */ + TOF_RANGE_REQ_CMD = 0x0, + /** + * @TOF_CONFIG_CMD: TOF configuration, uses &struct iwl_tof_config_cmd + */ + TOF_CONFIG_CMD = 0x1, + /** + * @TOF_RANGE_ABORT_CMD: abort ongoing ranging, uses + * &struct iwl_tof_range_abort_cmd + */ + TOF_RANGE_ABORT_CMD = 0x2, + /** + * @TOF_RANGE_REQ_EXT_CMD: TOF extended ranging config, + * uses &struct iwl_tof_range_request_ext_cmd + */ + TOF_RANGE_REQ_EXT_CMD = 0x3, + /** + * @TOF_RESPONDER_CONFIG_CMD: FTM responder configuration, + * uses &struct iwl_tof_responder_config_cmd + */ + TOF_RESPONDER_CONFIG_CMD = 0x4, + /** + * @TOF_RESPONDER_DYN_CONFIG_CMD: FTM dynamic configuration, + * uses &struct iwl_tof_responder_dyn_config_cmd + */ + TOF_RESPONDER_DYN_CONFIG_CMD = 0x5, + /** + * @CSI_HEADER_NOTIFICATION: CSI header + */ + CSI_HEADER_NOTIFICATION = 0xFA, + /** + * @CSI_CHUNKS_NOTIFICATION: CSI chunk, + * uses &struct iwl_csi_chunk_notification + */ + CSI_CHUNKS_NOTIFICATION = 0xFB, + /** + * @TOF_LC_NOTIF: used for LCI/civic location, contains just + * the action frame + */ + TOF_LC_NOTIF = 0xFC, + /** + * @TOF_RESPONDER_STATS: FTM responder statistics notification, + * uses &struct iwl_ftm_responder_stats + */ + TOF_RESPONDER_STATS = 0xFD, + /** + * @TOF_MCSI_DEBUG_NOTIF: MCSI debug notification, uses + * &struct iwl_tof_mcsi_notif + */ + TOF_MCSI_DEBUG_NOTIF = 0xFE, + /** + * @TOF_RANGE_RESPONSE_NOTIF: ranging response, using + * &struct iwl_tof_range_rsp_ntfy + */ + TOF_RANGE_RESPONSE_NOTIF = 0xFF, +}; + +/** + * struct iwl_tof_config_cmd - ToF configuration + * @tof_disabled: indicates if ToF is disabled (or not) + * @one_sided_disabled: indicates if one-sided is disabled (or not) + * @is_debug_mode: indiciates if debug mode is active + * @is_buf_required: indicates if channel estimation buffer is required + */ +struct iwl_tof_config_cmd { + u8 tof_disabled; + u8 one_sided_disabled; + u8 is_debug_mode; + u8 is_buf_required; +} __packed; + +/** + * enum iwl_tof_bandwidth - values for iwl_tof_range_req_ap_entry.bandwidth + * @IWL_TOF_BW_20_LEGACY: 20 MHz non-HT + * @IWL_TOF_BW_20_HT: 20 MHz HT + * @IWL_TOF_BW_40: 40 MHz + * @IWL_TOF_BW_80: 80 MHz + * @IWL_TOF_BW_160: 160 MHz + */ +enum iwl_tof_bandwidth { + IWL_TOF_BW_20_LEGACY, + IWL_TOF_BW_20_HT, + IWL_TOF_BW_40, + IWL_TOF_BW_80, + IWL_TOF_BW_160, +}; /* LOCAT_BW_TYPE_E */ + +/* + * enum iwl_tof_algo_type - Algorithym type for range measurement request + */ +enum iwl_tof_algo_type { + IWL_TOF_ALGO_TYPE_MAX_LIKE = 0, + IWL_TOF_ALGO_TYPE_LINEAR_REG = 1, + IWL_TOF_ALGO_TYPE_FFT = 2, + + /* Keep last */ + IWL_TOF_ALGO_TYPE_INVALID, +}; /* ALGO_TYPE_E */ + +/* + * enum iwl_tof_mcsi_ntfy - Enable/Disable MCSI notifications + */ +enum iwl_tof_mcsi_enable { + IWL_TOF_MCSI_DISABLED = 0, + IWL_TOF_MCSI_ENABLED = 1, +}; /* MCSI_ENABLE_E */ + +/** + * enum iwl_tof_responder_cmd_valid_field - valid fields in the responder cfg + * @IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO: channel info is valid + * @IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET: ToA offset is valid + * @IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB: common calibration mode is valid + * @IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB: spefici calibration mode is + * valid + * @IWL_TOF_RESPONDER_CMD_VALID_BSSID: BSSID is valid + * @IWL_TOF_RESPONDER_CMD_VALID_TX_ANT: TX antenna is valid + * @IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE: algorithm type is valid + * @IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT: non-ASAP support is valid + * @IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT: statistics report + * support is valid + * @IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT: MCSI notification support + * is valid + * @IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT: fast algorithm support + * is valid + * @IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL: retry on algorithm failure + * is valid + * @IWL_TOF_RESPONDER_CMD_VALID_STA_ID: station ID is valid + */ +enum iwl_tof_responder_cmd_valid_field { + IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0), + IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET = BIT(1), + IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB = BIT(2), + IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB = BIT(3), + IWL_TOF_RESPONDER_CMD_VALID_BSSID = BIT(4), + IWL_TOF_RESPONDER_CMD_VALID_TX_ANT = BIT(5), + IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE = BIT(6), + IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT = BIT(7), + IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT = BIT(8), + IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT = BIT(9), + IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = BIT(10), + IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = BIT(11), + IWL_TOF_RESPONDER_CMD_VALID_STA_ID = BIT(12), +}; + +/** + * enum iwl_tof_responder_cfg_flags - responder configuration flags + * @IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT: non-ASAP support + * @IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS: report statistics + * @IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI: report MCSI + * @IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE: algorithm type + * @IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE: ToA offset mode + * @IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE: common calibration mode + * @IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE: specific calibration mode + * @IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT: fast algorithm support + * @IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL: retry on algorithm fail + * @IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT: TX antenna mask + */ +enum iwl_tof_responder_cfg_flags { + IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0), + IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS = BIT(1), + IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI = BIT(2), + IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE = BIT(3) | BIT(4) | BIT(5), + IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE = BIT(6), + IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE = BIT(7), + IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE = BIT(8), + IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9), + IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10), + IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK, +}; + +/** + * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) + * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field + * @responder_cfg_flags: &iwl_tof_responder_cfg_flags + * @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth + * @rate: current AP rate + * @channel_num: current AP Channel + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency, see iwl_mvm_get_ctrl_pos() + * @sta_id: index of the AP STA when in AP mode + * @reserved1: reserved + * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @common_calib: XVT: common calibration value + * @specific_calib: XVT: specific calibration value + * @bssid: Current AP BSSID + * @reserved2: reserved + */ +struct iwl_tof_responder_config_cmd { + __le32 cmd_valid_fields; + __le32 responder_cfg_flags; + u8 bandwidth; + u8 rate; + u8 channel_num; + u8 ctrl_ch_position; + u8 sta_id; + u8 reserved1; + __le16 toa_offset; + __le16 common_calib; + __le16 specific_calib; + u8 bssid[ETH_ALEN]; + __le16 reserved2; +} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */ + +#define IWL_LCI_CIVIC_IE_MAX_SIZE 400 + +/** + * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings + * @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer + * @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer + * @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if + * needed, 0-padding such that the next part is dword-aligned, then CIVIC + * data (if exists) follows, and then 0-padding again to complete a + * 4-multiple long buffer. + */ +struct iwl_tof_responder_dyn_config_cmd { + __le32 lci_len; + __le32 civic_len; + u8 lci_civic[]; +} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */ + +/** + * struct iwl_tof_range_request_ext_cmd - extended range req for WLS + * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF + * @reserved: reserved + * @min_delta_ftm: Minimal time between two consecutive measurements, + * in units of 100us. 0 means no preference by station + * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended + * value be sent to the AP + * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended + * value to be sent to the AP + * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended + * value to be sent to the AP + */ +struct iwl_tof_range_req_ext_cmd { + __le16 tsf_timer_offset_msec; + __le16 reserved; + u8 min_delta_ftm; + u8 ftm_format_and_bw20M; + u8 ftm_format_and_bw40M; + u8 ftm_format_and_bw80M; +} __packed; + +/** + * enum iwl_tof_location_query - values for query bitmap + * @IWL_TOF_LOC_LCI: query LCI + * @IWL_TOF_LOC_CIVIC: query civic + */ +enum iwl_tof_location_query { + IWL_TOF_LOC_LCI = 0x01, + IWL_TOF_LOC_CIVIC = 0x02, +}; + + /** + * struct iwl_tof_range_req_ap_entry - AP configuration parameters + * @channel_num: Current AP Channel + * @bandwidth: Current AP Bandwidth. One of iwl_tof_bandwidth. + * @tsf_delta_direction: TSF relatively to the subject AP + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency, see iwl_mvm_get_ctrl_pos(). + * @bssid: AP's BSSID + * @measure_type: Measurement type: 0 - two sided, 1 - One sided + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the + * number of measurement iterations (min 2^0 = 1, max 2^14) + * @burst_period: Recommended value to be sent to the AP. Measurement + * periodicity In units of 100ms. ignored if num_of_bursts = 0 + * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31); + * 1-sided: how many rts/cts pairs should be used per burst. + * @retries_per_sample: Max number of retries that the LMAC should send + * in case of no replies by the AP. + * @tsf_delta: TSF Delta in units of microseconds. + * The difference between the AP TSF and the device local clock. + * @location_req: Location Request Bit[0] LCI should be sent in the FTMR; + * Bit[1] Civic should be sent in the FTMR + * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) + * @enable_dyn_ack: Enable Dynamic ACK BW. + * 0: Initiator interact with regular AP; + * 1: Initiator interact with Responder machine: need to send the + * Initiator Acks with HT 40MHz / 80MHz, since the Responder should + * use it for its ch est measurement (this flag will be set when we + * configure the opposite machine to be Responder). + * @rssi: Last received value + * legal values: -128-0 (0x7f). above 0x0 indicating an invalid value. + * @algo_type: &enum iwl_tof_algo_type + * @notify_mcsi: &enum iwl_tof_mcsi_ntfy. + * @reserved: For alignment and future use + */ +struct iwl_tof_range_req_ap_entry { + u8 channel_num; + u8 bandwidth; + u8 tsf_delta_direction; + u8 ctrl_ch_position; + u8 bssid[ETH_ALEN]; + u8 measure_type; + u8 num_of_bursts; + __le16 burst_period; + u8 samples_per_burst; + u8 retries_per_sample; + __le32 tsf_delta; + u8 location_req; + u8 asap_mode; + u8 enable_dyn_ack; + s8 rssi; + u8 algo_type; + u8 notify_mcsi; + __le16 reserved; +} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */ + +/** + * enum iwl_tof_response_mode + * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as + * possible (not supported for this release) + * @IWL_MVM_TOF_RESPONSE_TIMEOUT: report all AP measurements as a batch upon + * timeout expiration + * @IWL_MVM_TOF_RESPONSE_COMPLETE: report all AP measurements as a batch at the + * earlier of: measurements completion / timeout + * expiration. + */ +enum iwl_tof_response_mode { + IWL_MVM_TOF_RESPONSE_ASAP, + IWL_MVM_TOF_RESPONSE_TIMEOUT, + IWL_MVM_TOF_RESPONSE_COMPLETE, +}; + +/** + * enum iwl_tof_initiator_flags + * + * @IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED: disable fast algo, meaning run + * the algo on ant A+B, instead of only one of them. + * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A: open RX antenna A for FTMs RX + * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B: open RX antenna B for FTMs RX + * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C: open RX antenna C for FTMs RX + * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A: use antenna A fo TX ACKs during FTM + * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B: use antenna B fo TX ACKs during FTM + * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C: use antenna C fo TX ACKs during FTM + * @IWL_TOF_INITIATOR_FLAGS_MINDELTA_NO_PREF: no preference for minDeltaFTM + */ +enum iwl_tof_initiator_flags { + IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED = BIT(0), + IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A = BIT(1), + IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B = BIT(2), + IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C = BIT(3), + IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A = BIT(4), + IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B = BIT(5), + IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C = BIT(6), + IWL_TOF_INITIATOR_FLAGS_MINDELTA_NO_PREF = BIT(7), +}; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */ + +#define IWL_MVM_TOF_MAX_APS 5 +#define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5 + +/** + * struct iwl_tof_range_req_cmd - start measurement cmd + * @initiator_flags: see flags @ iwl_tof_initiator_flags + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @initiator: 0- NW initiated, 1 - Client Initiated + * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, + * '1' - run ML-Algo for ToF only + * @req_timeout: Requested timeout of the response in units of 100ms. + * This is equivalent to the session time configured to the + * LMAC in Initiator Request + * @report_policy: Supported partially for this release: For current release - + * the range report will be uploaded as a batch when ready or + * when the session is done (successfully / partially). + * one of iwl_tof_response_mode. + * @reserved0: reserved + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), + * '1' Use MAC Address randomization according to the below + * @range_req_bssid: ranging request BSSID + * @macaddr_template: MAC address template to use for non-randomized bits + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @ftm_rx_chains: Rx chain to open to receive Responder's FTMs (XVT) + * @ftm_tx_chains: Tx chain to send the ack to the Responder FTM (XVT) + * @common_calib: The common calib value to inject to this measurement calc + * @specific_calib: The specific calib value to inject to this measurement calc + * @ap: per-AP request data + */ +struct iwl_tof_range_req_cmd { + __le32 initiator_flags; + u8 request_id; + u8 initiator; + u8 one_sided_los_disable; + u8 req_timeout; + u8 report_policy; + u8 reserved0; + u8 num_of_ap; + u8 macaddr_random; + u8 range_req_bssid[ETH_ALEN]; + u8 macaddr_template[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + u8 ftm_rx_chains; + u8 ftm_tx_chains; + __le16 common_calib; + __le16 specific_calib; + struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; +} __packed; +/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */ + +/* + * enum iwl_tof_range_request_status - status of the sent request + * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the + * request + * @IWL_TOF_RANGE_REQUEST_STATUS_BUSY - FW is busy with a previous request, the + * sent request will not be handled + */ +enum iwl_tof_range_request_status { + IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS, + IWL_TOF_RANGE_REQUEST_STATUS_BUSY, +}; + +/** + * enum iwl_tof_entry_status + * + * @IWL_TOF_ENTRY_SUCCESS: successful measurement. + * @IWL_TOF_ENTRY_GENERAL_FAILURE: General failure. + * @IWL_TOF_ENTRY_NO_RESPONSE: Responder didn't reply to the request. + * @IWL_TOF_ENTRY_REQUEST_REJECTED: Responder rejected the request. + * @IWL_TOF_ENTRY_NOT_SCHEDULED: Time event was scheduled but not called yet. + * @IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: Time event triggered but no + * measurement was completed. + * @IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE: No range due inability to switch + * from the primary channel. + * @IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED: Device doesn't support FTM. + * @IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON: Request aborted due to unknown + * reason. + * @IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP: Failure due to invalid + * T1/T4. + * @IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE: Failure due to invalid FTM frame + * structure. + * @IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED: Request cannot be scheduled. + * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE: Responder cannot serve the + * initiator for some period, period supplied in @refusal_period. + * @IWL_TOF_ENTRY_BAD_REQUEST_ARGS: Bad request arguments. + * @IWL_TOF_ENTRY_WIFI_NOT_ENABLED: Wifi not enabled. + * @IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS: Responder override the original + * parameters within the current session. + */ +enum iwl_tof_entry_status { + IWL_TOF_ENTRY_SUCCESS = 0, + IWL_TOF_ENTRY_GENERAL_FAILURE = 1, + IWL_TOF_ENTRY_NO_RESPONSE = 2, + IWL_TOF_ENTRY_REQUEST_REJECTED = 3, + IWL_TOF_ENTRY_NOT_SCHEDULED = 4, + IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT = 5, + IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE = 6, + IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED = 7, + IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON = 8, + IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP = 9, + IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE = 10, + IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED = 11, + IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE = 12, + IWL_TOF_ENTRY_BAD_REQUEST_ARGS = 13, + IWL_TOF_ENTRY_WIFI_NOT_ENABLED = 14, + IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS = 15, +}; /* LOCATION_RANGE_RSP_AP_ENTRY_NTFY_API_S_VER_2 */ + +/** + * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) + * @bssid: BSSID of the AP + * @measure_status: current APs measurement status, one of + * &enum iwl_tof_entry_status. + * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rtt: The Round Trip Time that took for the last measurement for + * current AP [pSec] + * @rtt_variance: The Variance of the RTT values measured for current AP + * @rtt_spread: The Difference between the maximum and the minimum RTT + * values measured for current AP in the current session [pSec] + * @rssi: RSSI as uploaded in the Channel Estimation notification + * @rssi_spread: The Difference between the maximum and the minimum RSSI values + * measured for current AP in the current session + * @reserved: reserved + * @refusal_period: refusal period in case of + * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] + * @range: Measured range [cm] + * @range_variance: Measured range variance [cm] + * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was + * uploaded by the LMAC + * @t2t3_initiator: as calculated from the algo in the initiator + * @t1t4_responder: as calculated from the algo in the responder + * @common_calib: Calib val that was used in for this AP measurement + * @specific_calib: val that was used in for this AP measurement + * @papd_calib_output: The result of the tof papd calibration that was injected + * into the algorithm. + */ +struct iwl_tof_range_rsp_ap_entry_ntfy { + u8 bssid[ETH_ALEN]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + u8 reserved; + u8 refusal_period; + __le32 range; + __le32 range_variance; + __le32 timestamp; + __le32 t2t3_initiator; + __le32 t1t4_responder; + __le16 common_calib; + __le16 specific_calib; + __le32 papd_calib_output; +} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */ + +/** + * enum iwl_tof_response_status - tof response status + * + * @IWL_TOF_RESPONSE_SUCCESS: successful range. + * @IWL_TOF_RESPONSE_TIMEOUT: request aborted due to timeout expiration. + * partial result of ranges done so far is included in the response. + * @IWL_TOF_RESPONSE_ABORTED: Measurement aborted by command. + * @IWL_TOF_RESPONSE_FAILED: Measurement request command failed. + */ +enum iwl_tof_response_status { + IWL_TOF_RESPONSE_SUCCESS = 0, + IWL_TOF_RESPONSE_TIMEOUT = 1, + IWL_TOF_RESPONSE_ABORTED = 4, + IWL_TOF_RESPONSE_FAILED = 5, +}; /* LOCATION_RNG_RSP_STATUS */ + +/** + * struct iwl_tof_range_rsp_ntfy - ranging response notification + * @request_id: A Token ID of the corresponding Range request + * @request_status: status of current measurement session, one of + * &enum iwl_tof_response_status. + * @last_in_batch: reprot policy (when not all responses are uploaded at once) + * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @ap: per-AP data + */ +struct iwl_tof_range_rsp_ntfy { + u8 request_id; + u8 request_status; + u8 last_in_batch; + u8 num_of_aps; + struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; +} __packed; + +#define IWL_MVM_TOF_MCSI_BUF_SIZE (245) +/** + * struct iwl_tof_mcsi_notif - used for debug + * @token: token ID for the current session + * @role: '0' - initiator, '1' - responder + * @reserved: reserved + * @initiator_bssid: initiator machine + * @responder_bssid: responder machine + * @mcsi_buffer: debug data + */ +struct iwl_tof_mcsi_notif { + u8 token; + u8 role; + __le16 reserved; + u8 initiator_bssid[ETH_ALEN]; + u8 responder_bssid[ETH_ALEN]; + u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; +} __packed; + +/** + * struct iwl_tof_range_abort_cmd + * @request_id: corresponds to a range request + * @reserved: reserved + */ +struct iwl_tof_range_abort_cmd { + u8 request_id; + u8 reserved[3]; +} __packed; + +enum ftm_responder_stats_flags { + FTM_RESP_STAT_NON_ASAP_STARTED = BIT(0), + FTM_RESP_STAT_NON_ASAP_IN_WIN = BIT(1), + FTM_RESP_STAT_NON_ASAP_OUT_WIN = BIT(2), + FTM_RESP_STAT_TRIGGER_DUP = BIT(3), + FTM_RESP_STAT_DUP = BIT(4), + FTM_RESP_STAT_DUP_IN_WIN = BIT(5), + FTM_RESP_STAT_DUP_OUT_WIN = BIT(6), + FTM_RESP_STAT_SCHED_SUCCESS = BIT(7), + FTM_RESP_STAT_ASAP_REQ = BIT(8), + FTM_RESP_STAT_NON_ASAP_REQ = BIT(9), + FTM_RESP_STAT_ASAP_RESP = BIT(10), + FTM_RESP_STAT_NON_ASAP_RESP = BIT(11), + FTM_RESP_STAT_FAIL_INITIATOR_INACTIVE = BIT(12), + FTM_RESP_STAT_FAIL_INITIATOR_OUT_WIN = BIT(13), + FTM_RESP_STAT_FAIL_INITIATOR_RETRY_LIM = BIT(14), + FTM_RESP_STAT_FAIL_NEXT_SERVED = BIT(15), + FTM_RESP_STAT_FAIL_TRIGGER_ERR = BIT(16), + FTM_RESP_STAT_FAIL_GC = BIT(17), + FTM_RESP_STAT_SUCCESS = BIT(18), + FTM_RESP_STAT_INTEL_IE = BIT(19), + FTM_RESP_STAT_INITIATOR_ACTIVE = BIT(20), + FTM_RESP_STAT_MEASUREMENTS_AVAILABLE = BIT(21), + FTM_RESP_STAT_TRIGGER_UNKNOWN = BIT(22), + FTM_RESP_STAT_PROCESS_FAIL = BIT(23), + FTM_RESP_STAT_ACK = BIT(24), + FTM_RESP_STAT_NACK = BIT(25), + FTM_RESP_STAT_INVALID_INITIATOR_ID = BIT(26), + FTM_RESP_STAT_TIMER_MIN_DELTA = BIT(27), + FTM_RESP_STAT_INITIATOR_REMOVED = BIT(28), + FTM_RESP_STAT_INITIATOR_ADDED = BIT(29), + FTM_RESP_STAT_ERR_LIST_FULL = BIT(30), + FTM_RESP_STAT_INITIATOR_SCHED_NOW = BIT(31), +}; /* RESP_IND_E */ + +/** + * struct iwl_ftm_responder_stats - FTM responder statistics + * @addr: initiator address + * @success_ftm: number of successful ftm frames + * @ftm_per_burst: num of FTM frames that were received + * @flags: &enum ftm_responder_stats_flags + * @duration: actual duration of FTM + * @allocated_duration: time that was allocated for this FTM session + * @bw: FTM request bandwidth + * @rate: FTM request rate + * @reserved: for alingment and future use + */ +struct iwl_ftm_responder_stats { + u8 addr[ETH_ALEN]; + u8 success_ftm; + u8 ftm_per_burst; + __le32 flags; + __le32 duration; + __le32 allocated_duration; + u8 bw; + u8 rate; + __le16 reserved; +} __packed; /* TOF_RESPONDER_STATISTICS_NTFY_S_VER_2 */ + +#define IWL_CSI_CHUNK_CTL_NUM_MASK 0x3 +#define IWL_CSI_CHUNK_CTL_IDX_MASK 0xc + +struct iwl_csi_chunk_notification { + __le32 token; + __le16 seq; + __le16 ctl; + __le32 size; + u8 data[]; +} __packed; /* CSI_CHUNKS_HDR_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_location_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h index 45f61c6af14e..b833b80ea3d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -95,17 +97,36 @@ #define PHY_VHT_CTRL_POS_4_ABOVE (0x7) /* + * struct iwl_fw_channel_info_v1 - channel information + * * @band: PHY_BAND_* * @channel: channel number * @width: PHY_[VHT|LEGACY]_CHANNEL_* * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* */ -struct iwl_fw_channel_info { +struct iwl_fw_channel_info_v1 { u8 band; u8 channel; u8 width; u8 ctrl_pos; -} __packed; +} __packed; /* CHANNEL_CONFIG_API_S_VER_1 */ + +/* + * struct iwl_fw_channel_info - channel information + * + * @channel: channel number + * @band: PHY_BAND_* + * @width: PHY_[VHT|LEGACY]_CHANNEL_* + * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* + * @reserved: for future use and alignment + */ +struct iwl_fw_channel_info { + __le32 channel; + u8 band; + u8 width; + u8 ctrl_pos; + u8 reserved; +} __packed; /*CHANNEL_CONFIG_API_S_VER_2 */ #define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) #define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ @@ -134,6 +155,22 @@ struct iwl_fw_channel_info { /* TODO: complete missing documentation */ /** + * struct iwl_phy_context_cmd_tail - tail of iwl_phy_ctx_cmd for alignment with + * various channel structures. + * + * @txchain_info: ??? + * @rxchain_info: ??? + * @acquisition_data: ??? + * @dsp_cfg_flags: set to 0 + */ +struct iwl_phy_context_cmd_tail { + __le32 txchain_info; + __le32 rxchain_info; + __le32 acquisition_data; + __le32 dsp_cfg_flags; +} __packed; + +/** * struct iwl_phy_context_cmd - config of the PHY context * ( PHY_CONTEXT_CMD = 0x8 ) * @id_and_color: ID and color of the relevant Binding @@ -142,10 +179,7 @@ struct iwl_fw_channel_info { * other value means apply new params after X usecs * @tx_param_color: ??? * @ci: channel info - * @txchain_info: ??? - * @rxchain_info: ??? - * @acquisition_data: ??? - * @dsp_cfg_flags: set to 0 + * @tail: command tail */ struct iwl_phy_context_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ @@ -155,10 +189,7 @@ struct iwl_phy_context_cmd { __le32 apply_time; __le32 tx_param_color; struct iwl_fw_channel_info ci; - __le32 txchain_info; - __le32 rxchain_info; - __le32 acquisition_data; - __le32 dsp_cfg_flags; + struct iwl_phy_context_cmd_tail tail; } __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ #endif /* __iwl_fw_api_phy_ctxt_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 0791a854fc8f..6e8224ce8906 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -209,8 +209,6 @@ enum iwl_rx_phy_flags { * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors * @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask * @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift - * @RX_MPDU_RES_STATUS_FILTERING_MSK: filter status - * @RX_MPDU_RES_STATUS2_FILTERING_MSK: filter status 2 */ enum iwl_mvm_rx_status { RX_MPDU_RES_STATUS_CRC_OK = BIT(0), @@ -238,8 +236,6 @@ enum iwl_mvm_rx_status { RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24, RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT, - RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000), - RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000), }; /* 9000 series API */ @@ -337,6 +333,8 @@ enum iwl_rx_mpdu_phy_info { IWL_RX_MPDU_PHY_AMPDU = BIT(5), IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6), IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7), + /* short preamble is only for CCK, for non-CCK overridden by this */ + IWL_RX_MPDU_PHY_NCCK_ADDTL_NTFY = BIT(7), IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8), }; @@ -723,6 +721,9 @@ struct iwl_rx_mpdu_desc { #define RX_NO_DATA_FRAME_TIME_POS 0 #define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS) +#define RX_NO_DATA_RX_VEC0_HE_NSTS_MSK 0x03800000 +#define RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK 0x38000000 + /** * struct iwl_rx_no_data - RX no data descriptor * @info: 7:0 frame type, 15:8 RX error type @@ -743,7 +744,7 @@ struct iwl_rx_no_data { __le32 fr_time; __le32 rate; __le32 phy_info[2]; - __le32 rx_vec[3]; + __le32 rx_vec[2]; } __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h index 53cb622aa9ab..318843138490 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,6 +30,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -363,14 +365,7 @@ struct mvm_statistics_general_v8 { u8 reserved[4 - (NUM_MAC_INDEX % 4)]; } __packed; /* STATISTICS_GENERAL_API_S_VER_8 */ -struct mvm_statistics_general_cdb_v9 { - struct mvm_statistics_general_common_v19 common; - __le32 beacon_counter[NUM_MAC_INDEX_CDB]; - u8 beacon_average_energy[NUM_MAC_INDEX_CDB]; - u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)]; -} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */ - -struct mvm_statistics_general_cdb { +struct mvm_statistics_general { struct mvm_statistics_general_common common; __le32 beacon_counter[MAC_INDEX_AUX]; u8 beacon_average_energy[MAC_INDEX_AUX]; @@ -435,11 +430,11 @@ struct iwl_notif_statistics_v11 { struct mvm_statistics_load_v1 load_stats; } __packed; /* STATISTICS_NTFY_API_S_VER_11 */ -struct iwl_notif_statistics_cdb { +struct iwl_notif_statistics { __le32 flag; struct mvm_statistics_rx rx; struct mvm_statistics_tx tx; - struct mvm_statistics_general_cdb general; + struct mvm_statistics_general general; struct mvm_statistics_load load_stats; } __packed; /* STATISTICS_NTFY_API_S_VER_13 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h index 7c6c2462d0e8..b089285ac466 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -111,6 +113,17 @@ struct iwl_tdls_channel_switch_frame { } __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ /** + * struct iwl_tdls_channel_switch_cmd_tail - tail of iwl_tdls_channel_switch_cmd + * + * @timing: timing related data for command + * @frame: channel-switch request/response template, depending to switch_type + */ +struct iwl_tdls_channel_switch_cmd_tail { + struct iwl_tdls_channel_switch_timing timing; + struct iwl_tdls_channel_switch_frame frame; +} __packed; + +/** * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command * * The command is sent to initiate a channel switch and also in response to @@ -119,15 +132,13 @@ struct iwl_tdls_channel_switch_frame { * @switch_type: see &enum iwl_tdls_channel_switch_type * @peer_sta_id: station id of TDLS peer * @ci: channel we switch to - * @timing: timing related data for command - * @frame: channel-switch request/response template, depending to switch_type + * @tail: command tail */ struct iwl_tdls_channel_switch_cmd { u8 switch_type; __le32 peer_sta_id; struct iwl_fw_channel_info ci; - struct iwl_tdls_channel_switch_timing timing; - struct iwl_tdls_channel_switch_frame frame; + struct iwl_tdls_channel_switch_cmd_tail tail; } __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index f824bebceb06..4621ef93a2cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -318,6 +320,25 @@ struct iwl_time_event_notif { } __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ /* + * struct iwl_hs20_roc_req_tail - tail of iwl_hs20_roc_req + * + * @node_addr: Our MAC Address + * @reserved: reserved for alignment + * @apply_time: GP2 value to start (should always be the current GP2 value) + * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max + * time by which start of the event is allowed to be postponed. + * @duration: event duration in TU To calculate event duration: + * timeEventDuration = min(duration, remainingQuota) + */ +struct iwl_hs20_roc_req_tail { + u8 node_addr[ETH_ALEN]; + __le16 reserved; + __le32 apply_time; + __le32 apply_time_max_delay; + __le32 duration; +} __packed; + +/* * Aux ROC command * * Command requests the firmware to create a time event for a certain duration @@ -336,13 +357,6 @@ struct iwl_time_event_notif { * @sta_id_and_color: station id and color, resumed during "Remain On Channel" * activity. * @channel_info: channel info - * @node_addr: Our MAC Address - * @reserved: reserved for alignment - * @apply_time: GP2 value to start (should always be the current GP2 value) - * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max - * time by which start of the event is allowed to be postponed. - * @duration: event duration in TU To calculate event duration: - * timeEventDuration = min(duration, remainingQuota) */ struct iwl_hs20_roc_req { /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ @@ -351,11 +365,7 @@ struct iwl_hs20_roc_req { __le32 event_unique_id; __le32 sta_id_and_color; struct iwl_fw_channel_info channel_info; - u8 node_addr[ETH_ALEN]; - __le16 reserved; - __le32 apply_time; - __le32 apply_time_max_delay; - __le32 duration; + struct iwl_hs20_roc_req_tail tail; } __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ /* diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h deleted file mode 100644 index 7328a1606146..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h +++ /dev/null @@ -1,393 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __iwl_fw_api_tof_h__ -#define __iwl_fw_api_tof_h__ - -/* ToF sub-group command IDs */ -enum iwl_mvm_tof_sub_grp_ids { - TOF_RANGE_REQ_CMD = 0x1, - TOF_CONFIG_CMD = 0x2, - TOF_RANGE_ABORT_CMD = 0x3, - TOF_RANGE_REQ_EXT_CMD = 0x4, - TOF_RESPONDER_CONFIG_CMD = 0x5, - TOF_NW_INITIATED_RES_SEND_CMD = 0x6, - TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7, - TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC, - TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD, - TOF_RANGE_RESPONSE_NOTIF = 0xFE, - TOF_MCSI_DEBUG_NOTIF = 0xFB, -}; - -/** - * struct iwl_tof_config_cmd - ToF configuration - * @tof_disabled: 0 enabled, 1 - disabled - * @one_sided_disabled: 0 enabled, 1 - disabled - * @is_debug_mode: 1 debug mode, 0 - otherwise - * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise - */ -struct iwl_tof_config_cmd { - __le32 sub_grp_cmd_id; - u8 tof_disabled; - u8 one_sided_disabled; - u8 is_debug_mode; - u8 is_buf_required; -} __packed; - -/** - * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) - * @burst_period: future use: (currently hard coded in the LMAC) - * The interval between two sequential bursts. - * @min_delta_ftm: future use: (currently hard coded in the LMAC) - * The minimum delay between two sequential FTM Responses - * in the same burst. - * @burst_duration: future use: (currently hard coded in the LMAC) - * The total time for all FTMs handshake in the same burst. - * Affect the time events duration in the LMAC. - * @num_of_burst_exp: future use: (currently hard coded in the LMAC) - * The number of bursts for the current ToF request. Affect - * the number of events allocations in the current iteration. - * @get_ch_est: for xVT only, NA for driver - * @abort_responder: when set to '1' - Responder will terminate its activity - * (all other fields in the command are ignored) - * @recv_sta_req_params: 1 - Responder will ignore the other Responder's - * params and use the recomended Initiator params. - * 0 - otherwise - * @channel_num: current AP Channel - * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @rate: current AP rate - * @ctrl_ch_position: coding of the control channel position relative to - * the center frequency: - * - * 40 MHz - * 0 below center, 1 above center - * - * 80 MHz - * bits [0..1] - * * 0 the near 20MHz to the center, - * * 1 the far 20MHz to the center - * bit[2] - * as above 40MHz - * @ftm_per_burst: FTMs per Burst - * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response, - * '1' - we measure over the Initial FTM Response - * @asap_mode: ASAP / Non ASAP mode for the current WLS station - * @sta_id: index of the AP STA when in AP mode - * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF - * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug - * purposes, simulating station movement by adding various values - * to this field - * @bssid: Current AP BSSID - */ -struct iwl_tof_responder_config_cmd { - __le32 sub_grp_cmd_id; - __le16 burst_period; - u8 min_delta_ftm; - u8 burst_duration; - u8 num_of_burst_exp; - u8 get_ch_est; - u8 abort_responder; - u8 recv_sta_req_params; - u8 channel_num; - u8 bandwidth; - u8 rate; - u8 ctrl_ch_position; - u8 ftm_per_burst; - u8 ftm_resp_ts_avail; - u8 asap_mode; - u8 sta_id; - __le16 tsf_timer_offset_msecs; - __le16 toa_offset; - u8 bssid[ETH_ALEN]; -} __packed; - -/** - * struct iwl_tof_range_request_ext_cmd - extended range req for WLS - * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF - * @reserved: reserved - * @min_delta_ftm: Minimal time between two consecutive measurements, - * in units of 100us. 0 means no preference by station - * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended - * value be sent to the AP - * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended - * value to be sent to the AP - * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended - * value to be sent to the AP - */ -struct iwl_tof_range_req_ext_cmd { - __le32 sub_grp_cmd_id; - __le16 tsf_timer_offset_msec; - __le16 reserved; - u8 min_delta_ftm; - u8 ftm_format_and_bw20M; - u8 ftm_format_and_bw40M; - u8 ftm_format_and_bw80M; -} __packed; - -#define IWL_MVM_TOF_MAX_APS 21 - -/** - * struct iwl_tof_range_req_ap_entry - AP configuration parameters - * @channel_num: Current AP Channel - * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @tsf_delta_direction: TSF relatively to the subject AP - * @ctrl_ch_position: Coding of the control channel position relative to the - * center frequency. - * 40MHz 0 below center, 1 above center - * 80MHz bits [0..1]: 0 the near 20MHz to the center, - * 1 the far 20MHz to the center - * bit[2] as above 40MHz - * @bssid: AP's bss id - * @measure_type: Measurement type: 0 - two sided, 1 - One sided - * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the - * number of measurement iterations (min 2^0 = 1, max 2^14) - * @burst_period: Recommended value to be sent to the AP. Measurement - * periodicity In units of 100ms. ignored if num_of_bursts = 0 - * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31) - * 1-sided: how many rts/cts pairs should be used per burst. - * @retries_per_sample: Max number of retries that the LMAC should send - * in case of no replies by the AP. - * @tsf_delta: TSF Delta in units of microseconds. - * The difference between the AP TSF and the device local clock. - * @location_req: Location Request Bit[0] LCI should be sent in the FTMR - * Bit[1] Civic should be sent in the FTMR - * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) - * @enable_dyn_ack: Enable Dynamic ACK BW. - * 0 Initiator interact with regular AP - * 1 Initiator interact with Responder machine: need to send the - * Initiator Acks with HT 40MHz / 80MHz, since the Responder should - * use it for its ch est measurement (this flag will be set when we - * configure the opposite machine to be Responder). - * @rssi: Last received value - * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value. - */ -struct iwl_tof_range_req_ap_entry { - u8 channel_num; - u8 bandwidth; - u8 tsf_delta_direction; - u8 ctrl_ch_position; - u8 bssid[ETH_ALEN]; - u8 measure_type; - u8 num_of_bursts; - __le16 burst_period; - u8 samples_per_burst; - u8 retries_per_sample; - __le32 tsf_delta; - u8 location_req; - u8 asap_mode; - u8 enable_dyn_ack; - s8 rssi; -} __packed; - -/** - * enum iwl_tof_response_mode - * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as - * possible (not supported for this release) - * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon - * timeout expiration - * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the - * earlier of: measurements completion / timeout - * expiration. - */ -enum iwl_tof_response_mode { - IWL_MVM_TOF_RESPOSE_ASAP = 1, - IWL_MVM_TOF_RESPOSE_TIMEOUT, - IWL_MVM_TOF_RESPOSE_COMPLETE, -}; - -/** - * struct iwl_tof_range_req_cmd - start measurement cmd - * @request_id: A Token incremented per request. The same Token will be - * sent back in the range response - * @initiator: 0- NW initiated, 1 - Client Initiated - * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, - * '1' - run ML-Algo for ToF only - * @req_timeout: Requested timeout of the response in units of 100ms. - * This is equivalent to the session time configured to the - * LMAC in Initiator Request - * @report_policy: Supported partially for this release: For current release - - * the range report will be uploaded as a batch when ready or - * when the session is done (successfully / partially). - * one of iwl_tof_response_mode. - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) - * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), - * '1' Use MAC Address randomization according to the below - * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. - * Bits set to 1 shall be randomized by the UMAC - * @ap: per-AP request data - */ -struct iwl_tof_range_req_cmd { - __le32 sub_grp_cmd_id; - u8 request_id; - u8 initiator; - u8 one_sided_los_disable; - u8 req_timeout; - u8 report_policy; - u8 los_det_disable; - u8 num_of_ap; - u8 macaddr_random; - u8 macaddr_template[ETH_ALEN]; - u8 macaddr_mask[ETH_ALEN]; - struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; -} __packed; - -/** - * struct iwl_tof_gen_resp_cmd - generic ToF response - */ -struct iwl_tof_gen_resp_cmd { - __le32 sub_grp_cmd_id; - u8 data[]; -} __packed; - -/** - * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) - * @bssid: BSSID of the AP - * @measure_status: current APs measurement status, one of - * &enum iwl_tof_entry_status. - * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @rtt: The Round Trip Time that took for the last measurement for - * current AP [nSec] - * @rtt_variance: The Variance of the RTT values measured for current AP - * @rtt_spread: The Difference between the maximum and the minimum RTT - * values measured for current AP in the current session [nsec] - * @rssi: RSSI as uploaded in the Channel Estimation notification - * @rssi_spread: The Difference between the maximum and the minimum RSSI values - * measured for current AP in the current session - * @reserved: reserved - * @range: Measured range [cm] - * @range_variance: Measured range variance [cm] - * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was - * uploaded by the LMAC - */ -struct iwl_tof_range_rsp_ap_entry_ntfy { - u8 bssid[ETH_ALEN]; - u8 measure_status; - u8 measure_bw; - __le32 rtt; - __le32 rtt_variance; - __le32 rtt_spread; - s8 rssi; - u8 rssi_spread; - __le16 reserved; - __le32 range; - __le32 range_variance; - __le32 timestamp; -} __packed; - -/** - * struct iwl_tof_range_rsp_ntfy - - * @request_id: A Token ID of the corresponding Range request - * @request_status: status of current measurement session - * @last_in_batch: reprot policy (when not all responses are uploaded at once) - * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) - * @ap: per-AP data - */ -struct iwl_tof_range_rsp_ntfy { - u8 request_id; - u8 request_status; - u8 last_in_batch; - u8 num_of_aps; - struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; -} __packed; - -#define IWL_MVM_TOF_MCSI_BUF_SIZE (245) -/** - * struct iwl_tof_mcsi_notif - used for debug - * @token: token ID for the current session - * @role: '0' - initiator, '1' - responder - * @reserved: reserved - * @initiator_bssid: initiator machine - * @responder_bssid: responder machine - * @mcsi_buffer: debug data - */ -struct iwl_tof_mcsi_notif { - u8 token; - u8 role; - __le16 reserved; - u8 initiator_bssid[ETH_ALEN]; - u8 responder_bssid[ETH_ALEN]; - u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; -} __packed; - -/** - * struct iwl_tof_neighbor_report_notif - * @bssid: BSSID of the AP which sent the report - * @request_token: same token as the corresponding request - * @status: - * @report_ie_len: the length of the response frame starting from the Element ID - * @data: the IEs - */ -struct iwl_tof_neighbor_report { - u8 bssid[ETH_ALEN]; - u8 request_token; - u8 status; - __le16 report_ie_len; - u8 data[]; -} __packed; - -/** - * struct iwl_tof_range_abort_cmd - * @request_id: corresponds to a range request - * @reserved: reserved - */ -struct iwl_tof_range_abort_cmd { - __le32 sub_grp_cmd_id; - u8 request_id; - u8 reserved[3]; -} __packed; - -#endif diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 2a19b178c5e8..22efd94da6d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -469,6 +469,93 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a02400, .end = 0x00a02758 }, }; +static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = { + { .start = 0x00a00000, .end = 0x00a00000 }, + { .start = 0x00a0000c, .end = 0x00a00024 }, + { .start = 0x00a0002c, .end = 0x00a00034 }, + { .start = 0x00a0003c, .end = 0x00a0003c }, + { .start = 0x00a00410, .end = 0x00a00418 }, + { .start = 0x00a00420, .end = 0x00a00420 }, + { .start = 0x00a00428, .end = 0x00a00428 }, + { .start = 0x00a00430, .end = 0x00a0043c }, + { .start = 0x00a00444, .end = 0x00a00444 }, + { .start = 0x00a00840, .end = 0x00a00840 }, + { .start = 0x00a00850, .end = 0x00a00858 }, + { .start = 0x00a01004, .end = 0x00a01008 }, + { .start = 0x00a01010, .end = 0x00a01010 }, + { .start = 0x00a01018, .end = 0x00a01018 }, + { .start = 0x00a01024, .end = 0x00a01024 }, + { .start = 0x00a0102c, .end = 0x00a01034 }, + { .start = 0x00a0103c, .end = 0x00a01040 }, + { .start = 0x00a01048, .end = 0x00a01050 }, + { .start = 0x00a01058, .end = 0x00a01058 }, + { .start = 0x00a01060, .end = 0x00a01070 }, + { .start = 0x00a0108c, .end = 0x00a0108c }, + { .start = 0x00a01c20, .end = 0x00a01c28 }, + { .start = 0x00a01d10, .end = 0x00a01d10 }, + { .start = 0x00a01e28, .end = 0x00a01e2c }, + { .start = 0x00a01e60, .end = 0x00a01e60 }, + { .start = 0x00a01e80, .end = 0x00a01e80 }, + { .start = 0x00a01ea0, .end = 0x00a01ea0 }, + { .start = 0x00a02000, .end = 0x00a0201c }, + { .start = 0x00a02024, .end = 0x00a02024 }, + { .start = 0x00a02040, .end = 0x00a02048 }, + { .start = 0x00a020c0, .end = 0x00a020e0 }, + { .start = 0x00a02400, .end = 0x00a02404 }, + { .start = 0x00a0240c, .end = 0x00a02414 }, + { .start = 0x00a0241c, .end = 0x00a0243c }, + { .start = 0x00a02448, .end = 0x00a024bc }, + { .start = 0x00a024c4, .end = 0x00a024cc }, + { .start = 0x00a02508, .end = 0x00a02508 }, + { .start = 0x00a02510, .end = 0x00a02514 }, + { .start = 0x00a0251c, .end = 0x00a0251c }, + { .start = 0x00a0252c, .end = 0x00a0255c }, + { .start = 0x00a02564, .end = 0x00a025a0 }, + { .start = 0x00a025a8, .end = 0x00a025b4 }, + { .start = 0x00a025c0, .end = 0x00a025c0 }, + { .start = 0x00a025e8, .end = 0x00a025f4 }, + { .start = 0x00a02c08, .end = 0x00a02c18 }, + { .start = 0x00a02c2c, .end = 0x00a02c38 }, + { .start = 0x00a02c68, .end = 0x00a02c78 }, + { .start = 0x00a03000, .end = 0x00a03000 }, + { .start = 0x00a03010, .end = 0x00a03014 }, + { .start = 0x00a0301c, .end = 0x00a0302c }, + { .start = 0x00a03034, .end = 0x00a03038 }, + { .start = 0x00a03040, .end = 0x00a03044 }, + { .start = 0x00a03060, .end = 0x00a03068 }, + { .start = 0x00a03070, .end = 0x00a03070 }, + { .start = 0x00a0307c, .end = 0x00a03084 }, + { .start = 0x00a0308c, .end = 0x00a03090 }, + { .start = 0x00a03098, .end = 0x00a03098 }, + { .start = 0x00a030a0, .end = 0x00a030a0 }, + { .start = 0x00a030a8, .end = 0x00a030b4 }, + { .start = 0x00a030bc, .end = 0x00a030c0 }, + { .start = 0x00a030c8, .end = 0x00a030f4 }, + { .start = 0x00a03100, .end = 0x00a0312c }, + { .start = 0x00a03c00, .end = 0x00a03c5c }, + { .start = 0x00a04400, .end = 0x00a04454 }, + { .start = 0x00a04460, .end = 0x00a04474 }, + { .start = 0x00a044c0, .end = 0x00a044ec }, + { .start = 0x00a04500, .end = 0x00a04504 }, + { .start = 0x00a04510, .end = 0x00a04538 }, + { .start = 0x00a04540, .end = 0x00a04548 }, + { .start = 0x00a04560, .end = 0x00a04560 }, + { .start = 0x00a04570, .end = 0x00a0457c }, + { .start = 0x00a04590, .end = 0x00a04590 }, + { .start = 0x00a04598, .end = 0x00a04598 }, + { .start = 0x00a045c0, .end = 0x00a045f4 }, + { .start = 0x00a0c000, .end = 0x00a0c018 }, + { .start = 0x00a0c020, .end = 0x00a0c028 }, + { .start = 0x00a0c038, .end = 0x00a0c094 }, + { .start = 0x00a0c0c0, .end = 0x00a0c104 }, + { .start = 0x00a0c10c, .end = 0x00a0c118 }, + { .start = 0x00a0c150, .end = 0x00a0c174 }, + { .start = 0x00a0c17c, .end = 0x00a0c188 }, + { .start = 0x00a0c190, .end = 0x00a0c198 }, + { .start = 0x00a0c1a0, .end = 0x00a0c1a8 }, + { .start = 0x00a0c1b0, .end = 0x00a0c1b8 }, +}; + static void iwl_read_prph_block(struct iwl_trans *trans, u32 start, u32 len_bytes, __le32 *data) { @@ -478,15 +565,20 @@ static void iwl_read_prph_block(struct iwl_trans *trans, u32 start, *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); } -static void iwl_dump_prph(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data, +static void iwl_dump_prph(struct iwl_fw_runtime *fwrt, const struct iwl_prph_range *iwl_prph_dump_addr, - u32 range_len) + u32 range_len, void *ptr) { struct iwl_fw_error_dump_prph *prph; + struct iwl_trans *trans = fwrt->trans; + struct iwl_fw_error_dump_data **data = + (struct iwl_fw_error_dump_data **)ptr; unsigned long flags; u32 i; + if (!data) + return; + IWL_DEBUG_INFO(trans, "WRT PRPH dump\n"); if (!iwl_trans_grab_nic_access(trans, &flags)) @@ -552,37 +644,47 @@ static struct scatterlist *alloc_sgtable(int size) return table; } -static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt) +static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt, + const struct iwl_prph_range *iwl_prph_dump_addr, + u32 range_len, void *ptr) { - u32 prph_len = 0; - int i; + u32 *prph_len = (u32 *)ptr; + int i, num_bytes_in_chunk; + + if (!prph_len) + return; - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); - i++) { + for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ - int num_bytes_in_chunk = - iwl_prph_dump_addr_comm[i].end - - iwl_prph_dump_addr_comm[i].start + 4; + num_bytes_in_chunk = + iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4; - prph_len += sizeof(struct iwl_fw_error_dump_data) + + *prph_len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_prph) + num_bytes_in_chunk; } +} - if (fwrt->trans->cfg->mq_rx_supported) { - for (i = 0; i < - ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = - iwl_prph_dump_addr_9000[i].end - - iwl_prph_dump_addr_9000[i].start + 4; - - prph_len += sizeof(struct iwl_fw_error_dump_data) + - sizeof(struct iwl_fw_error_dump_prph) + - num_bytes_in_chunk; +static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr, + void (*handler)(struct iwl_fw_runtime *, + const struct iwl_prph_range *, + u32, void *)) +{ + u32 range_len; + + if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000); + handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr); + } else { + range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm); + handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr); + + if (fwrt->trans->cfg->mq_rx_supported) { + range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000); + handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr); } } - return prph_len; } static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt, @@ -646,6 +748,9 @@ static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt, ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len); /* Count RXF1 sizes */ + if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) + mem_cfg->num_lmacs = MAX_NUM_LMAC; + for (i = 0; i < mem_cfg->num_lmacs; i++) ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len); @@ -664,6 +769,9 @@ static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt, goto dump_internal_txf; /* Count TXF sizes */ + if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) + mem_cfg->num_lmacs = MAX_NUM_LMAC; + for (i = 0; i < mem_cfg->num_lmacs; i++) { int j; @@ -733,6 +841,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; + if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX) + return NULL; img = &fwrt->fw->img[fwrt->cur_fw_img]; sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; @@ -747,9 +857,9 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, fifo_len += iwl_fw_txf_len(fwrt, mem_cfg); /* Make room for PRPH registers */ - if (!fwrt->trans->cfg->gen2 && - iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) - prph_len += iwl_fw_get_prph_len(fwrt); + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) + iwl_fw_prph_handler(fwrt, &prph_len, + iwl_fw_get_prph_len); if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) @@ -828,7 +938,13 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, sizeof(dump_info->dev_human_readable) - 1); strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->bus_human_readable) - 1); - dump_info->rt_status = cpu_to_le32(fwrt->dump.rt_status); + dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs; + dump_info->lmac_err_id[0] = + cpu_to_le32(fwrt->dump.lmac_err_id[0]); + if (fwrt->smem_cfg.num_lmacs > 1) + dump_info->lmac_err_id[1] = + cpu_to_le32(fwrt->dump.lmac_err_id[1]); + dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id); dump_data = iwl_fw_error_next_data(dump_data); } @@ -935,16 +1051,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, if (iwl_fw_dbg_is_paging_enabled(fwrt)) iwl_dump_paging(fwrt, &dump_data); - if (prph_len) { - iwl_dump_prph(fwrt->trans, &dump_data, - iwl_prph_dump_addr_comm, - ARRAY_SIZE(iwl_prph_dump_addr_comm)); - - if (fwrt->trans->cfg->mq_rx_supported) - iwl_dump_prph(fwrt->trans, &dump_data, - iwl_prph_dump_addr_9000, - ARRAY_SIZE(iwl_prph_dump_addr_9000)); - } + if (prph_len) + iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph); out: dump_file->file_len = cpu_to_le32(file_len); @@ -1108,13 +1216,13 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, iwl_dump_prph_ini(fwrt->trans, data, reg); break; case IWL_FW_INI_REGION_DRAM_BUFFER: - *dump_mask |= IWL_FW_ERROR_DUMP_FW_MONITOR; + *dump_mask |= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR); break; case IWL_FW_INI_REGION_PAGING: if (iwl_fw_dbg_is_paging_enabled(fwrt)) iwl_dump_paging(fwrt, data); else - *dump_mask |= IWL_FW_ERROR_DUMP_PAGING; + *dump_mask |= BIT(IWL_FW_ERROR_DUMP_PAGING); break; case IWL_FW_INI_REGION_TXF: iwl_fw_dump_txf(fwrt, data); @@ -1146,10 +1254,6 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, if (id == FW_DBG_TRIGGER_FW_ASSERT) id = IWL_FW_TRIGGER_ID_FW_ASSERT; - else if (id == FW_DBG_TRIGGER_USER) - id = IWL_FW_TRIGGER_ID_USER_TRIGGER; - else if (id < FW_DBG_TRIGGER_MAX) - return NULL; if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs))) return NULL; @@ -1385,7 +1489,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, if (WARN_ON(!fwrt->dump.active_trigs[id].active)) return -EINVAL; - delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->ignore_consec); + delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->dump_delay); occur = le32_to_cpu(fwrt->dump.active_trigs[id].conf->occurrences); if (!occur) return 0; @@ -1570,27 +1674,29 @@ iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_allocation_tlv *alloc) { struct iwl_trans *trans = fwrt->trans; - struct iwl_continuous_record_cmd cont_rec = {}; - struct iwl_buffer_allocation_cmd *cmd = (void *)&cont_rec.pad[0]; + struct iwl_ldbg_config_cmd ldbg_cmd = { + .type = cpu_to_le32(BUFFER_ALLOCATION), + }; + struct iwl_buffer_allocation_cmd *cmd = &ldbg_cmd.buffer_allocation; struct iwl_host_cmd hcmd = { .id = LDBG_CONFIG_CMD, .flags = CMD_ASYNC, - .data[0] = &cont_rec, - .len[0] = sizeof(cont_rec), + .data[0] = &ldbg_cmd, + .len[0] = sizeof(ldbg_cmd), }; void *virtual_addr = NULL; u32 size = le32_to_cpu(alloc->size); dma_addr_t phys_addr; - cont_rec.record_mode.enable_recording = cpu_to_le16(BUFFER_ALLOCATION); - if (!trans->num_blocks && le32_to_cpu(alloc->buffer_location) != IWL_FW_INI_LOCATION_DRAM_PATH) return; - virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size, - &phys_addr, GFP_KERNEL); + virtual_addr = + dma_alloc_coherent(fwrt->trans->dev, size, &phys_addr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO | + __GFP_COMP); /* TODO: alloc fragments if needed */ if (!virtual_addr) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 6aabbdd72326..330229d2a61d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -102,7 +102,10 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) if (fwrt->dump.desc != &iwl_dump_desc_assert) kfree(fwrt->dump.desc); fwrt->dump.desc = NULL; - fwrt->dump.rt_status = 0; + fwrt->dump.lmac_err_id[0] = 0; + if (fwrt->smem_cfg.num_lmacs > 1) + fwrt->dump.lmac_err_id[1] = 0; + fwrt->dump.umac_err_id = 0; } void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); @@ -266,20 +269,20 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) -static int iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start) +static inline int +iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start) { - struct iwl_continuous_record_cmd cont_rec = {}; + struct iwl_ldbg_config_cmd cmd = { + .type = start ? cpu_to_le32(START_DEBUG_RECORDING) : + cpu_to_le32(STOP_DEBUG_RECORDING), + }; struct iwl_host_cmd hcmd = { .id = LDBG_CONFIG_CMD, .flags = CMD_ASYNC, - .data[0] = &cont_rec, - .len[0] = sizeof(cont_rec), + .data[0] = &cmd, + .len[0] = sizeof(cmd), }; - cont_rec.record_mode.enable_recording = start ? - cpu_to_le16(START_DEBUG_RECORDING) : - cpu_to_le16(STOP_DEBUG_RECORDING); - return iwl_trans_send_cmd(fwrt->trans, &hcmd); } @@ -378,6 +381,7 @@ static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt) { return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) && !fwrt->trans->cfg->gen2 && + fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 65faecf552cd..c02425a1e64f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -180,6 +180,8 @@ enum iwl_fw_error_dump_family { IWL_FW_ERROR_DUMP_FAMILY_8 = 8, }; +#define MAX_NUM_LMAC 2 + /** * struct iwl_fw_error_dump_info - info on the device / firmware * @device_family: the family of the device (7 / 8) @@ -187,7 +189,10 @@ enum iwl_fw_error_dump_family { * @fw_human_readable: human readable FW version * @dev_human_readable: name of the device * @bus_human_readable: name of the bus used - * @rt_status: the error_id/rt_status that that triggered the latest dump + * @num_of_lmacs: the number of lmacs + * @lmac_err_id: the lmac 0/1 error_id/rt_status that triggered the latest dump + * if the dump collection was not initiated by an assert, the value is 0 + * @umac_err_id: the umac error_id/rt_status that triggered the latest dump * if the dump collection was not initiated by an assert, the value is 0 */ struct iwl_fw_error_dump_info { @@ -196,7 +201,9 @@ struct iwl_fw_error_dump_info { u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ]; u8 dev_human_readable[64]; u8 bus_human_readable[8]; - __le32 rt_status; + u8 num_of_lmacs; + __le32 umac_err_id; + __le32 lmac_err_id[MAX_NUM_LMAC]; } __packed; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 81f557c0b58d..e8b00b795cbb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -303,7 +303,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan. * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer - * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM) * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current * tx power value into TPC Report action frame and Link Measurement Report @@ -334,6 +333,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related * @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2 + * @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band + * (6 GHz). * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT @@ -357,10 +358,13 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger * command size (command version 4) that supports toggling ACK TX * power reduction. - * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3 * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax * capability. + * @IWL_UCODE_TLV_CAPA_CSI_REPORTING: firmware is capable of being configured + * to report the CSI information with (certain) RX frames + * + * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -369,7 +373,6 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1, IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2, IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3, - IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9, @@ -394,6 +397,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45, + IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48, IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, @@ -412,6 +416,8 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87, IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88, IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89, + IWL_UCODE_TLV_CAPA_CSI_REPORTING = (__force iwl_ucode_tlv_capa_t)90, + IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, NUM_IWL_UCODE_TLV_CAPA diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 4f7090f88cb0..a0fcbb28114b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -142,7 +142,8 @@ struct iwl_fw_runtime { u32 *d3_debug_data; struct iwl_fw_ini_active_regs active_regs[IWL_FW_INI_MAX_REGION_ID]; struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM]; - u32 rt_status; + u32 lmac_err_id[MAX_NUM_LMAC]; + u32 umac_err_id; } dump; #ifdef CONFIG_IWLWIFI_DEBUGFS struct { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 91861a9cbe57..ff942532fac3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -335,10 +335,6 @@ struct iwl_csr_params { * @fw_name_pre: Firmware filename prefix. The api version and extension * (.ucode) will be added to filename before loading from disk. The * filename is constructed as fw_name_pre<api>.ucode. - * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps - * (if supported) - * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf - * next step. Supported only in integrated solutions. * @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver. * @max_inst_size: The maximal length of the fw inst section (only DVM) @@ -392,8 +388,6 @@ struct iwl_cfg { /* params specific to an individual device within a device family */ const char *name; const char *fw_name_pre; - const char *fw_name_pre_b_or_c_step; - const char *fw_name_pre_rf_next_step; /* params not likely to change within a device family */ const struct iwl_base_params *base_params; /* params likely to change within a device family */ @@ -551,29 +545,40 @@ extern const struct iwl_cfg iwl8275_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl9160_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; +extern const struct iwl_cfg iwl9260_2ac_160_cfg; extern const struct iwl_cfg iwl9260_killer_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; +extern const struct iwl_cfg iwl9560_2ac_160_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg_soc; extern const struct iwl_cfg iwl9461_2ac_cfg_soc; extern const struct iwl_cfg iwl9462_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc; extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc; extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk; extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; +extern const struct iwl_cfg iwl22560_2ax_cfg_hr; extern const struct iwl_cfg iwl22000_2ax_cfg_hr; +extern const struct iwl_cfg iwl22260_2ax_cfg; +extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; +extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; +extern const struct iwl_cfg killer1650x_2ax_cfg; +extern const struct iwl_cfg killer1650w_2ax_cfg; extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0; extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_jf; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index caa5806acd81..42af421bbc3c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -325,6 +325,7 @@ enum { #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) +#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) /* RF_ID value */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index bf1be985f36b..2efa1dfe9b4c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -210,18 +210,15 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) { const struct iwl_cfg *cfg = drv->trans->cfg; char tag[8]; - const char *fw_pre_name; if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && - (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP || - CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP)) - fw_pre_name = cfg->fw_name_pre_b_or_c_step; - else if (drv->trans->cfg->integrated && - CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP && - cfg->fw_name_pre_rf_next_step) - fw_pre_name = cfg->fw_name_pre_rf_next_step; - else - fw_pre_name = cfg->fw_name_pre; + (CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP && + CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) { + IWL_ERR(drv, + "Only HW steps B and C are currently supported (0x%0x)\n", + drv->trans->hw_rev); + return -EINVAL; + } if (first) { drv->fw_index = cfg->ucode_api_max; @@ -235,15 +232,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) IWL_ERR(drv, "no suitable firmware found!\n"); if (cfg->ucode_api_min == cfg->ucode_api_max) { - IWL_ERR(drv, "%s%d is required\n", fw_pre_name, + IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre, cfg->ucode_api_max); } else { IWL_ERR(drv, "minimum version required: %s%d\n", - fw_pre_name, - cfg->ucode_api_min); + cfg->fw_name_pre, cfg->ucode_api_min); IWL_ERR(drv, "maximum version supported: %s%d\n", - fw_pre_name, - cfg->ucode_api_max); + cfg->fw_name_pre, cfg->ucode_api_max); } IWL_ERR(drv, @@ -252,7 +247,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) } snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", - fw_pre_name, tag); + cfg->fw_name_pre, tag); IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n", drv->firmware_name); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 4f10914f6048..ffd1e649bfa0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -1,10 +1,13 @@ /****************************************************************************** * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * - * Portions of this file are derived from the ipw3945 project. - * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -15,12 +18,44 @@ * more details. * * The full GNU General Public License is included in this distribution in the - * file called LICENSE. + * file called COPYING. * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * + * BSD LICENSE + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ #include <linux/delay.h> #include <linux/device.h> diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h index 38085850a2d3..61477e58352d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -1,8 +1,9 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. * - * Portions of this file are derived from the ipw3945 project. + * GPL LICENSE SUMMARY * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -14,14 +15,43 @@ * more details. * * The full GNU General Public License is included in this distribution in the - * file called LICENSE. + * file called COPYING. * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * + * BSD LICENSE + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ - #ifndef __iwl_io_h__ #define __iwl_io_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index 73b1c46f1158..0cae2ef9b9df 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -152,4 +152,22 @@ struct iwl_mod_params { bool enable_ini; }; +static inline bool iwl_enable_rx_ampdu(void) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + return false; + return true; +} + +static inline bool iwl_enable_tx_ampdu(void) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + return false; + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) + return true; + + /* enabled by default */ + return true; +} + #endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index e1178b09c4d5..63c081e5c63a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -558,8 +558,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE | - IEEE80211_HE_MAC_CAP0_TWT_RES, + IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, @@ -1180,14 +1179,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, regd_to_copy = sizeof(struct ieee80211_regdomain) + valid_rules * sizeof(struct ieee80211_reg_rule); - copy_rd = kzalloc(regd_to_copy, GFP_KERNEL); + copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL); if (!copy_rd) { copy_rd = ERR_PTR(-ENOMEM); goto out; } - memcpy(copy_rd, regd, regd_to_copy); - out: kfree(regdb_ptrs); kfree(regd); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 9d89b7d7f9fa..3aaa5f06461c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -358,12 +358,12 @@ /* FW monitor */ #define MON_BUFF_SAMPLE_CTL (0xa03c00) -#define MON_BUFF_BASE_ADDR (0xa03c3c) +#define MON_BUFF_BASE_ADDR (0xa03c1c) #define MON_BUFF_END_ADDR (0xa03c40) #define MON_BUFF_WRPTR (0xa03c44) #define MON_BUFF_CYCLE_CNT (0xa03c48) /* FW monitor family 8000 and on */ -#define MON_BUFF_BASE_ADDR_VER2 (0xa03c3c) +#define MON_BUFF_BASE_ADDR_VER2 (0xa03c1c) #define MON_BUFF_END_ADDR_VER2 (0xa03c20) #define MON_BUFF_WRPTR_VER2 (0xa03c24) #define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 9ffd21918b5a..30cbd981efbd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -7,7 +7,6 @@ iwlmvm-y += power.o coex.o iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o -iwlmvm-y += tof.o iwlmvm-$(CONFIG_PM) += d3.o -ccflags-y += -I$(src)/../ +ccflags-y += -I $(srctree)/$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 01b5338201d6..36ed7d6fc971 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -2125,7 +2125,6 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) file->private_data = inode->i_private; - ieee80211_stop_queues(mvm->hw); synchronize_net(); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; @@ -2140,10 +2139,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) rtnl_unlock(); if (err > 0) err = -EINVAL; - if (err) { - ieee80211_wake_queues(mvm->hw); + if (err) return err; - } + mvm->d3_test_active = true; mvm->keep_vif = NULL; return 0; @@ -2223,8 +2221,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); - ieee80211_wake_queues(mvm->hw); - return 0; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 33b0af24a537..2453ceabf00d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -60,7 +60,6 @@ * *****************************************************************************/ #include "mvm.h" -#include "fw/api/tof.h" #include "debugfs.h" static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, @@ -523,753 +522,30 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } -static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = -EINVAL; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("tof_disabled=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.tof_disabled = value; - goto out; - } - - data = iwl_dbgfs_is_match("one_sided_disabled=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.one_sided_disabled = value; - goto out; - } - - data = iwl_dbgfs_is_match("is_debug_mode=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.is_debug_mode = value; - goto out; - } - - data = iwl_dbgfs_is_match("is_buf=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.is_buf_required = value; - goto out; - } - - data = iwl_dbgfs_is_match("send_tof_cfg=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - ret = iwl_mvm_tof_config_cmd(mvm); - goto out; - } - } - -out: - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_enable_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_config_cmd *cmd; - - cmd = &mvm->tof_data.tof_cfg; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n", - cmd->tof_disabled); - pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n", - cmd->one_sided_disabled); - pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n", - cmd->is_debug_mode); - pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n", - cmd->is_buf_required); - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("burst_period=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (!ret) - mvm->tof_data.responder_cfg.burst_period = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("min_delta_ftm=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.min_delta_ftm = value; - goto out; - } - - data = iwl_dbgfs_is_match("burst_duration=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.burst_duration = value; - goto out; - } - - data = iwl_dbgfs_is_match("num_of_burst_exp=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.num_of_burst_exp = value; - goto out; - } - - data = iwl_dbgfs_is_match("abort_responder=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.abort_responder = value; - goto out; - } - - data = iwl_dbgfs_is_match("get_ch_est=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.get_ch_est = value; - goto out; - } - - data = iwl_dbgfs_is_match("recv_sta_req_params=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.recv_sta_req_params = value; - goto out; - } - - data = iwl_dbgfs_is_match("channel_num=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.channel_num = value; - goto out; - } - - data = iwl_dbgfs_is_match("bandwidth=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.bandwidth = value; - goto out; - } - - data = iwl_dbgfs_is_match("rate=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.rate = value; - goto out; - } - - data = iwl_dbgfs_is_match("bssid=", buf); - if (data) { - u8 *mac = mvm->tof_data.responder_cfg.bssid; - - if (!mac_pton(data, mac)) { - ret = -EINVAL; - goto out; - } - } - - data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.tsf_timer_offset_msecs = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("toa_offset=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.toa_offset = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("center_freq=", buf); - if (data) { - struct iwl_tof_responder_config_cmd *cmd = - &mvm->tof_data.responder_cfg; - - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - enum nl80211_band band = (cmd->channel_num <= 14) ? - NL80211_BAND_2GHZ : - NL80211_BAND_5GHZ; - struct ieee80211_channel chn = { - .band = band, - .center_freq = ieee80211_channel_to_frequency( - cmd->channel_num, band), - }; - struct cfg80211_chan_def chandef = { - .chan = &chn, - .center_freq1 = - ieee80211_channel_to_frequency(value, - band), - }; - - cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef); - } - goto out; - } - - data = iwl_dbgfs_is_match("ftm_per_burst=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.ftm_per_burst = value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value; - goto out; - } - - data = iwl_dbgfs_is_match("asap_mode=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.asap_mode = value; - goto out; - } - - data = iwl_dbgfs_is_match("send_responder_cfg=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - ret = iwl_mvm_tof_responder_cmd(mvm, vif); - goto out; - } - } - -out: - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_responder_config_cmd *cmd; - - cmd = &mvm->tof_data.responder_cfg; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n", - le16_to_cpu(cmd->burst_period)); - pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n", - cmd->burst_duration); - pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n", - cmd->bandwidth); - pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n", - cmd->channel_num); - pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n", - cmd->ctrl_ch_position); - pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n", - cmd->bssid); - pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n", - cmd->min_delta_ftm); - pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n", - cmd->num_of_burst_exp); - pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate); - pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n", - cmd->abort_responder); - pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n", - cmd->get_ch_est); - pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n", - cmd->recv_sta_req_params); - pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n", - cmd->ftm_per_burst); - pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n", - cmd->ftm_resp_ts_avail); - pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n", - cmd->asap_mode); - pos += scnprintf(buf + pos, bufsz - pos, - "tsf_timer_offset_msecs = %d\n", - le16_to_cpu(cmd->tsf_timer_offset_msecs)); - pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n", - le16_to_cpu(cmd->toa_offset)); - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif, - char *buf, size_t count, - loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("request_id=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.request_id = value; - goto out; - } - - data = iwl_dbgfs_is_match("initiator=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.initiator = value; - goto out; - } - - data = iwl_dbgfs_is_match("one_sided_los_disable=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.one_sided_los_disable = value; - goto out; - } - - data = iwl_dbgfs_is_match("req_timeout=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.req_timeout = value; - goto out; - } - - data = iwl_dbgfs_is_match("report_policy=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.report_policy = value; - goto out; - } - - data = iwl_dbgfs_is_match("macaddr_random=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.macaddr_random = value; - goto out; - } - - data = iwl_dbgfs_is_match("num_of_ap=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.num_of_ap = value; - goto out; - } - - data = iwl_dbgfs_is_match("macaddr_template=", buf); - if (data) { - u8 mac[ETH_ALEN]; - - if (!mac_pton(data, mac)) { - ret = -EINVAL; - goto out; - } - memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN); - goto out; - } - - data = iwl_dbgfs_is_match("macaddr_mask=", buf); - if (data) { - u8 mac[ETH_ALEN]; - - if (!mac_pton(data, mac)) { - ret = -EINVAL; - goto out; - } - memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN); - goto out; - } - - data = iwl_dbgfs_is_match("ap=", buf); - if (data) { - struct iwl_tof_range_req_ap_entry ap = {}; - int size = sizeof(struct iwl_tof_range_req_ap_entry); - u16 burst_period; - u8 *mac = ap.bssid; - unsigned int i; - - if (sscanf(data, "%u %hhd %hhd %hhd" - "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx" - "%hhd %hhd %hd" - "%hhd %hhd %d" - "%hhx %hhd %hhd %hhd", - &i, &ap.channel_num, &ap.bandwidth, - &ap.ctrl_ch_position, - mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5, - &ap.measure_type, &ap.num_of_bursts, - &burst_period, - &ap.samples_per_burst, &ap.retries_per_sample, - &ap.tsf_delta, &ap.location_req, &ap.asap_mode, - &ap.enable_dyn_ack, &ap.rssi) != 20) { - ret = -EINVAL; - goto out; - } - if (i >= IWL_MVM_TOF_MAX_APS) { - IWL_ERR(mvm, "Invalid AP index %d\n", i); - ret = -EINVAL; - goto out; - } - - ap.burst_period = cpu_to_le16(burst_period); - - memcpy(&mvm->tof_data.range_req.ap[i], &ap, size); - goto out; - } - - data = iwl_dbgfs_is_match("send_range_request=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) - ret = iwl_mvm_tof_range_request_cmd(mvm, vif); - goto out; - } - - ret = -EINVAL; -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[512]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_range_req_cmd *cmd; - int i; - - cmd = &mvm->tof_data.range_req; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n", - cmd->request_id); - pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n", - cmd->initiator); - pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n", - cmd->one_sided_los_disable); - pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n", - cmd->req_timeout); - pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n", - cmd->report_policy); - pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n", - cmd->macaddr_random); - pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n", - cmd->macaddr_template); - pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n", - cmd->macaddr_mask); - pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n", - cmd->num_of_ap); - for (i = 0; i < cmd->num_of_ap; i++) { - struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i]; - - pos += scnprintf(buf + pos, bufsz - pos, - "ap %.2d: channel_num=%hhd bw=%hhd" - " control=%hhd bssid=%pM type=%hhd" - " num_of_bursts=%hhd burst_period=%hd ftm=%hhd" - " retries=%hhd tsf_delta=%d" - " tsf_delta_direction=%hhd location_req=0x%hhx " - " asap=%hhd enable=%hhd rssi=%hhd\n", - i, ap->channel_num, ap->bandwidth, - ap->ctrl_ch_position, ap->bssid, - ap->measure_type, ap->num_of_bursts, - ap->burst_period, ap->samples_per_burst, - ap->retries_per_sample, ap->tsf_delta, - ap->tsf_delta_direction, - ap->location_req, ap->asap_mode, - ap->enable_dyn_ack, ap->rssi); - } - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.tsf_timer_offset_msec = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("min_delta_ftm=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.min_delta_ftm = value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.ftm_format_and_bw20M = - value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.ftm_format_and_bw40M = - value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.ftm_format_and_bw80M = - value; - goto out; - } - - data = iwl_dbgfs_is_match("send_range_req_ext=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) - ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif); - goto out; - } - - ret = -EINVAL; -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_range_req_ext_cmd *cmd; - - cmd = &mvm->tof_data.range_req_ext; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, - "tsf_timer_offset_msec = %hd\n", - cmd->tsf_timer_offset_msec); - pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n", - cmd->min_delta_ftm); - pos += scnprintf(buf + pos, bufsz - pos, - "ftm_format_and_bw20M = %hhd\n", - cmd->ftm_format_and_bw20M); - pos += scnprintf(buf + pos, bufsz - pos, - "ftm_format_and_bw40M = %hhd\n", - cmd->ftm_format_and_bw40M); - pos += scnprintf(buf + pos, bufsz - pos, - "ftm_format_and_bw80M = %hhd\n", - cmd->ftm_format_and_bw80M); - - mutex_unlock(&mvm->mutex); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int abort_id, ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("abort_id=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.last_abort_id = value; - goto out; - } - - data = iwl_dbgfs_is_match("send_range_abort=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - abort_id = mvm->tof_data.last_abort_id; - ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id); - goto out; - } - } - -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[32]; - int pos = 0; - const size_t bufsz = sizeof(buf); - int last_abort_id; - - mutex_lock(&mvm->mutex); - last_abort_id = mvm->tof_data.last_abort_id; - mutex_unlock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n", - last_abort_id); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) +static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) { - struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; - char *buf; - int pos = 0; - const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256; - struct iwl_tof_range_rsp_ntfy *cmd; - int i, ret; + u8 value; + int ret; - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; + ret = kstrtou8(buf, 0, &value); + if (ret) + return ret; + if (value > 1) + return -EINVAL; mutex_lock(&mvm->mutex); - cmd = &mvm->tof_data.range_resp; - - pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n", - cmd->request_id); - pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n", - cmd->request_status); - pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n", - cmd->last_in_batch); - pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n", - cmd->num_of_aps); - for (i = 0; i < cmd->num_of_aps; i++) { - struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i]; - - pos += scnprintf(buf + pos, bufsz - pos, - "ap %.2d: bssid=%pM status=%hhd bw=%hhd" - " rtt=%d rtt_var=%d rtt_spread=%d" - " rssi=%hhd rssi_spread=%hhd" - " range=%d range_var=%d" - " time_stamp=%d\n", - i, ap->bssid, ap->measure_status, - ap->measure_bw, - ap->rtt, ap->rtt_variance, ap->rtt_spread, - ap->rssi, ap->rssi_spread, ap->range, - ap->range_variance, ap->timestamp); - } + iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS); mutex_unlock(&mvm->mutex); - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; + return count; } -static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, - size_t count, loff_t *ppos) +static ssize_t +iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; @@ -1279,13 +555,24 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, ret = kstrtou8(buf, 0, &value); if (ret) return ret; - if (value > 1) + + if (value > NUM_LOW_LATENCY_FORCE) return -EINVAL; mutex_lock(&mvm->mutex); - iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS); + if (value == LOW_LATENCY_FORCE_UNSET) { + iwl_mvm_update_low_latency(mvm, vif, false, + LOW_LATENCY_DEBUGFS_FORCE); + iwl_mvm_update_low_latency(mvm, vif, false, + LOW_LATENCY_DEBUGFS_FORCE_ENABLE); + } else { + iwl_mvm_update_low_latency(mvm, vif, + value == LOW_LATENCY_FORCE_ON, + LOW_LATENCY_DEBUGFS_FORCE); + iwl_mvm_update_low_latency(mvm, vif, true, + LOW_LATENCY_DEBUGFS_FORCE_ENABLE); + } mutex_unlock(&mvm->mutex); - return count; } @@ -1295,15 +582,25 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file, { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[30] = {}; + char format[] = "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n" + "dbgfs_force_enable=%d\ndbgfs_force=%d\nactual=%d\n"; + + /* + * all values in format are boolean so the size of format is enough + * for holding the result string + */ + char buf[sizeof(format) + 1] = {}; int len; - len = scnprintf(buf, sizeof(buf) - 1, - "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n", + len = scnprintf(buf, sizeof(buf) - 1, format, !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC), !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS), !!(mvmvif->low_latency & LOW_LATENCY_VCMD), - !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE)); + !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE), + !!(mvmvif->low_latency & + LOW_LATENCY_DEBUGFS_FORCE_ENABLE), + !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE), + !!(mvmvif->low_latency_actual)); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -1456,14 +753,9 @@ MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt); MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); +MVM_DEBUGFS_WRITE_FILE_OPS(low_latency_force, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); -MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff); @@ -1497,6 +789,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE_VIF(low_latency_force, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600); @@ -1506,24 +799,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif == mvm->bf_allowed_vif) MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600); - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) && - !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) { - if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP) - MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params, - mvmvif->dbgfs_dir, 0600); - - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir, - 0600); - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir, - 0600); - MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir, - 0600); - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir, - 0600); - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir, - 0400); - } - /* * Create symlink for convenience pointing to interface specific * debugfs entries for the driver. For example, under diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 52c361a6124c..e136475a34f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -69,6 +69,7 @@ #include "sta.h" #include "iwl-io.h" #include "debugfs.h" +#include "iwl-modparams.h" #include "fw/error-dump.h" static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, @@ -1206,47 +1207,6 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } -/* - * Enable / Disable continuous recording. - * Cause the FW to start continuous recording, by sending the relevant hcmd. - * Enable: input of every integer larger than 0, ENABLE_CONT_RECORDING. - * Disable: for 0 as input, DISABLE_CONT_RECORDING. - */ -static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm, - char *buf, size_t count, - loff_t *ppos) -{ - struct iwl_trans *trans = mvm->trans; - const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv; - struct iwl_continuous_record_cmd cont_rec = {}; - int ret, rec_mode; - - if (!iwl_mvm_firmware_running(mvm)) - return -EIO; - - if (!dest) - return -EOPNOTSUPP; - - if (dest->monitor_mode != SMEM_MODE || - trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) - return -EOPNOTSUPP; - - ret = kstrtoint(buf, 0, &rec_mode); - if (ret) - return ret; - - cont_rec.record_mode.enable_recording = rec_mode ? - cpu_to_le16(ENABLE_CONT_RECORDING) : - cpu_to_le16(DISABLE_CONT_RECORDING); - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_send_cmd_pdu(mvm, LDBG_CONFIG_CMD, 0, - sizeof(cont_rec), &cont_rec); - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) @@ -1722,11 +1682,33 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } +struct iwl_mvm_sniffer_apply { + struct iwl_mvm *mvm; + u16 aid; +}; + +static bool iwl_mvm_sniffer_apply(struct iwl_notif_wait_data *notif_data, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm_sniffer_apply *apply = data; + + apply->mvm->cur_aid = cpu_to_le16(apply->aid); + + return true; +} + static ssize_t iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { + struct iwl_notification_wait wait; struct iwl_he_monitor_cmd he_mon_cmd = {}; + struct iwl_mvm_sniffer_apply apply = { + .mvm = mvm, + }; + u16 wait_cmds[] = { + iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0), + }; u32 aid; int ret; @@ -1742,10 +1724,30 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf, he_mon_cmd.aid = cpu_to_le16(aid); + apply.aid = aid; + mutex_lock(&mvm->mutex); + + /* + * Use the notification waiter to get our function triggered + * in sequence with other RX. This ensures that frames we get + * on the RX queue _before_ the new configuration is applied + * still have mvm->cur_aid pointing to the old AID, and that + * frames on the RX queue _after_ the firmware processed the + * new configuration (and sent the response, synchronously) + * get mvm->cur_aid correctly set to the new AID. + */ + iwl_init_notification_wait(&mvm->notif_wait, &wait, + wait_cmds, ARRAY_SIZE(wait_cmds), + iwl_mvm_sniffer_apply, &apply); + ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0), 0, sizeof(he_mon_cmd), &he_mon_cmd); + + /* no need to really wait, we already did anyway */ + iwl_remove_notification(&mvm->notif_wait, &wait); + mutex_unlock(&mvm->mutex); return ret ?: count; @@ -1800,7 +1802,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); -MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8); MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); @@ -2004,7 +2005,6 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200); - MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200); #ifdef CONFIG_ACPI @@ -2071,6 +2071,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) if (!debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir, &mvm->nvm_phy_sku_blob)) goto err; + if (!debugfs_create_blob("nvm_reg", S_IRUSR, + mvm->debugfs_dir, &mvm->nvm_reg_blob)) + goto err; debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 143c7fcaea41..e3eb812e0248 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -89,7 +91,7 @@ #include "fw/api/sf.h" #include "fw/api/sta.h" #include "fw/api/stats.h" -#include "fw/api/tof.h" +#include "fw/api/location.h" #include "fw/api/tx.h" #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 0d6c313b6669..d3dc9d276e0f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -293,9 +293,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) { struct iwl_notification_wait alive_wait; - struct iwl_mvm_alive_data alive_data; + struct iwl_mvm_alive_data alive_data = {}; const struct fw_img *fw; - int ret, i; + int ret; enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { MVM_ALIVE }; @@ -373,9 +373,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap = BIT(IWL_MAX_TID_COUNT + 2); - for (i = 0; i < IEEE80211_MAX_QUEUES; i++) - atomic_set(&mvm->mac80211_queue_stop_count[i], 0); - set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_fw_set_dbg_rec_on(&mvm->fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 7779951a9533..7cfdd07d8736 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -97,11 +97,6 @@ struct iwl_mvm_mac_iface_iterator_data { bool found_vif; }; -struct iwl_mvm_hw_queues_iface_iterator_data { - struct ieee80211_vif *exclude_vif; - unsigned long used_hw_queues; -}; - static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { @@ -208,61 +203,6 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, data->preferred_tsf = NUM_TSF_IDS; } -/* - * Get the mask of the queues used by the vif - */ -u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif) -{ - u32 qmask = 0, ac; - - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) - return BIT(IWL_MVM_OFFCHANNEL_QUEUE); - - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) - qmask |= BIT(vif->hw_queue[ac]); - } - - if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC) - qmask |= BIT(vif->cab_queue); - - return qmask; -} - -static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_hw_queues_iface_iterator_data *data = _data; - - /* exclude the given vif */ - if (vif == data->exclude_vif) - return; - - data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif); -} - -unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, - struct ieee80211_vif *exclude_vif) -{ - struct iwl_mvm_hw_queues_iface_iterator_data data = { - .exclude_vif = exclude_vif, - .used_hw_queues = - BIT(IWL_MVM_OFFCHANNEL_QUEUE) | - BIT(mvm->aux_queue) | - BIT(IWL_MVM_DQA_GCAST_QUEUE), - }; - - lockdep_assert_held(&mvm->mutex); - - /* mark all VIF used hw queues */ - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, - iwl_mvm_iface_hw_queues_iter, &data); - - return data.used_hw_queues; -} - static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { @@ -360,8 +300,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_iface_iterator, &data); - used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif); - /* * In the case we're getting here during resume, it's similar to * firmware restart, and with RESUME_ALL the iterator will find @@ -416,9 +354,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * the ones here - no real limit */ queue_limit = IEEE80211_MAX_QUEUES; - BUILD_BUG_ON(IEEE80211_MAX_QUEUES > - BITS_PER_BYTE * - sizeof(mvm->hw_queue_to_mac80211[0])); /* * Find available queues, and allocate them to the ACs. When in @@ -446,9 +381,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * queue value (when queue is enabled). */ mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; - vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; - } else { - vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; } mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA; @@ -462,8 +394,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) exit_fail: memset(mvmvif, 0, sizeof(struct iwl_mvm_vif)); - memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue)); - vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; return ret; } @@ -778,27 +708,9 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, if (vif->bss_conf.assoc && vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u8 sta_id = mvmvif->ap_sta_id; - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); - if (sta_id != IWL_MVM_INVALID_STA) { - struct ieee80211_sta *sta; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - - /* - * TODO: we should check the ext cap IE but it is - * unclear why the spec requires two bits (one in HE - * cap IE, and one in the ext cap IE). In the meantime - * rely on the HE cap IE only. - */ - if (sta && (sta->he_cap.he_cap_elem.mac_cap_info[0] & - IEEE80211_HE_MAC_CAP0_TWT_RES)) - ctxt_sta->data_policy |= - cpu_to_le32(TWT_SUPPORTED); - } + if (vif->bss_conf.twt_requester) + ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED); } @@ -881,8 +793,6 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); - /* Override the filter flags to accept only probe requests */ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); @@ -1203,7 +1113,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) - ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); + ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue); /* * Only set the beacon time when the MAC is being added, when we @@ -1435,7 +1345,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr); status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, - "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n", + "beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n", status, beacon_notify_hdr->failure_frame, le64_to_cpu(beacon->tsf), mvm->ap_last_beacon_gp2, @@ -1472,35 +1382,48 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, } } -static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) +void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { - struct iwl_missed_beacons_notif *missed_beacons = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_missed_beacons_notif *mb = (void *)pkt->data; struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig; struct iwl_fw_dbg_trigger_tlv *trigger; u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; u32 rx_missed_bcon, rx_missed_bcon_since_rx; + struct ieee80211_vif *vif; + u32 id = le32_to_cpu(mb->mac_id); - if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id)) - return; + IWL_DEBUG_INFO(mvm, + "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", + le32_to_cpu(mb->mac_id), + le32_to_cpu(mb->consec_missed_beacons), + le32_to_cpu(mb->consec_missed_beacons_since_last_rx), + le32_to_cpu(mb->num_recvd_beacons), + le32_to_cpu(mb->num_expected_beacons)); + + rcu_read_lock(); + + vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); + if (!vif) + goto out; - rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons); + rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons); rx_missed_bcon_since_rx = - le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx); + le32_to_cpu(mb->consec_missed_beacons_since_last_rx); /* * TODO: the threshold should be adjusted based on latency conditions, * and/or in case of a CS flow on one of the other AP vifs. */ - if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) > - IWL_MVM_MISSED_BEACONS_THRESHOLD) + if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) + iwl_mvm_connection_loss(mvm, vif, "missed beacons"); + else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MISSED_BEACONS); if (!trigger) - return; + goto out; bcon_trig = (void *)trigger->data; stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); @@ -1512,28 +1435,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); -} - -void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_missed_beacons_notif *mb = (void *)pkt->data; - - IWL_DEBUG_INFO(mvm, - "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", - le32_to_cpu(mb->mac_id), - le32_to_cpu(mb->consec_missed_beacons), - le32_to_cpu(mb->consec_missed_beacons_since_last_rx), - le32_to_cpu(mb->num_recvd_beacons), - le32_to_cpu(mb->num_expected_beacons)); - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_beacon_loss_iterator, - mb); iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_MISSED_BEACONS); + +out: + rcu_read_unlock(); } void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, @@ -1575,16 +1481,29 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); } -static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) +void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { - struct iwl_probe_resp_data_notif *notif = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_probe_resp_data_notif *notif = (void *)pkt->data; struct iwl_probe_resp_data *old_data, *new_data; + int len = iwl_rx_packet_payload_len(pkt); + u32 id = le32_to_cpu(notif->mac_id); + struct ieee80211_vif *vif; + struct iwl_mvm_vif *mvmvif; - if (mvmvif->id != (u16)le32_to_cpu(notif->mac_id)) + if (WARN_ON_ONCE(len < sizeof(*notif))) return; + IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n", + notif->noa_active, notif->csa_counter); + + vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false); + if (!vif) + return; + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + new_data = kzalloc(sizeof(*new_data), GFP_KERNEL); if (!new_data) return; @@ -1615,25 +1534,6 @@ static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac, ieee80211_csa_set_counter(vif, notif->csa_counter); } -void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_probe_resp_data_notif *notif = (void *)pkt->data; - int len = iwl_rx_packet_payload_len(pkt); - - if (WARN_ON_ONCE(len < sizeof(*notif))) - return; - - IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n", - notif->noa_active, notif->csa_counter); - - ieee80211_iterate_active_interfaces(mvm->hw, - IEEE80211_IFACE_ITER_ACTIVE, - iwl_mvm_probe_resp_data_iter, - notif); -} - void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 47d65adfa3e0..e098884dd915 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -395,6 +395,21 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) return ret; } +const static u8 he_if_types_ext_capa_sta[] = { + [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, + [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, +}; + +const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { + { + .iftype = NL80211_IFTYPE_STATION, + .extended_capabilities = he_if_types_ext_capa_sta, + .extended_capabilities_mask = he_if_types_ext_capa_sta, + .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), + }, +}; + int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; @@ -410,7 +425,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SPECTRUM_MGMT); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); - ieee80211_hw_set(hw, QUEUE_CONTROL); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); @@ -424,6 +438,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); + ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); + ieee80211_hw_set(hw, STA_MMPDU_TXQ); + ieee80211_hw_set(hw, TX_AMSDU); + ieee80211_hw_set(hw, TX_FRAG_LIST); if (iwl_mvm_has_tlc_offload(mvm)) { ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); @@ -469,6 +487,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; + hw->max_tx_fragments = mvm->trans->max_skb_frags; BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); @@ -534,6 +553,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->chanctx_data_size = sizeof(u16); + hw->txq_data_size = sizeof(struct iwl_mvm_txq); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | @@ -673,6 +693,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); } + if (mvm->nvm_data->sku_cap_11ax_enable && + !iwlwifi_mod_params.disable_11ax) { + hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; + hw->wiphy->num_iftype_ext_capab = + ARRAY_SIZE(he_iftypes_ext_capa); + } + mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP @@ -776,7 +803,6 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, goto out; __skb_queue_tail(&mvm->d0i3_tx, skb); - ieee80211_stop_queues(mvm->hw); /* trigger wakeup */ iwl_mvm_ref(mvm, IWL_MVM_REF_TX); @@ -796,13 +822,15 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_sta *sta = control->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; + bool offchannel = IEEE80211_SKB_CB(skb)->flags & + IEEE80211_TX_CTL_TX_OFFCHAN; if (iwl_mvm_is_radio_killed(mvm)) { IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); goto drop; } - if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && + if (offchannel && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; @@ -815,8 +843,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, sta = NULL; /* If there is no sta, and it's not offchannel - send through AP */ - if (info->control.vif->type == NL80211_IFTYPE_STATION && - info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) { + if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && + !offchannel) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); @@ -844,22 +872,95 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, ieee80211_free_txskb(hw, skb); } -static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) +void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) - return false; - return true; + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); + struct sk_buff *skb = NULL; + + /* + * No need for threads to be pending here, they can leave the first + * taker all the work. + * + * mvmtxq->tx_request logic: + * + * If 0, no one is currently TXing, set to 1 to indicate current thread + * will now start TX and other threads should quit. + * + * If 1, another thread is currently TXing, set to 2 to indicate to + * that thread that there was another request. Since that request may + * have raced with the check whether the queue is empty, the TXing + * thread should check the queue's status one more time before leaving. + * This check is done in order to not leave any TX hanging in the queue + * until the next TX invocation (which may not even happen). + * + * If 2, another thread is currently TXing, and it will already double + * check the queue, so do nothing. + */ + if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) + return; + + rcu_read_lock(); + do { + while (likely(!mvmtxq->stopped && + (mvm->trans->system_pm_mode == + IWL_PLAT_PM_MODE_DISABLED))) { + skb = ieee80211_tx_dequeue(hw, txq); + + if (!skb) + break; + + if (!txq->sta) + iwl_mvm_tx_skb_non_sta(mvm, skb); + else + iwl_mvm_tx_skb(mvm, skb, txq->sta); + } + } while (atomic_dec_return(&mvmtxq->tx_request)); + rcu_read_unlock(); } -static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) +static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) { - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) - return false; - if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) - return true; + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); - /* enabled by default */ - return true; + /* + * Please note that racing is handled very carefully here: + * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is + * deleted afterwards. + * This means that if: + * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): + * queue is allocated and we can TX. + * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): + * a race, should defer the frame. + * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): + * need to allocate the queue and defer the frame. + * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): + * queue is already scheduled for allocation, no need to allocate, + * should defer the frame. + */ + + /* If the queue is allocated TX and return. */ + if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { + /* + * Check that list is empty to avoid a race where txq_id is + * already updated, but the queue allocation work wasn't + * finished + */ + if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) + return; + + iwl_mvm_mac_itxq_xmit(hw, txq); + return; + } + + /* The list is being deleted only after the queue is fully allocated. */ + if (!list_empty(&mvmtxq->list)) + return; + + list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); + schedule_work(&mvm->add_stream_wk); } #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ @@ -974,7 +1075,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, mvmvif = iwl_mvm_vif_from_mac80211(vif); cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); } - if (!iwl_enable_rx_ampdu(mvm->cfg)) { + if (!iwl_enable_rx_ampdu()) { ret = -EINVAL; break; } @@ -986,7 +1087,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, timeout); break; case IEEE80211_AMPDU_TX_START: - if (!iwl_enable_tx_ampdu(mvm->cfg)) { + if (!iwl_enable_tx_ampdu()) { ret = -EINVAL; break; } @@ -1066,6 +1167,8 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_mvm_stop_device(mvm); + mvm->cur_aid = 0; + mvm->scan_status = 0; mvm->ps_disabled = false; mvm->calibrating = false; @@ -1085,7 +1188,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); - memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); @@ -1391,6 +1493,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, if (ret) goto out_unlock; + rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); + /* Counting number of interfaces is needed for legacy PM */ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count++; @@ -1582,6 +1686,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); + RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); + if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = false; @@ -2186,6 +2292,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); } + /* Update MU EDCA params */ + if (changes & BSS_CHANGED_QOS && mvmvif->associated && + bss_conf->assoc && vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) + iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); + /* * If we're not associated yet, take the (new) BSSID before associating * so the firmware knows. If we're already associated, then use the old @@ -2712,7 +2824,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, return; spin_lock_bh(&mvmsta->lock); - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) @@ -2901,32 +3013,6 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, peer_addr, action); } -static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta) -{ - struct iwl_mvm_tid_data *tid_data; - struct sk_buff *skb; - int i; - - spin_lock_bh(&mvm_sta->lock); - for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { - tid_data = &mvm_sta->tid_data[i]; - - while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - /* - * The first deferred frame should've stopped the MAC - * queues, so we should never get a second deferred - * frame for the RA/TID. - */ - iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue)); - ieee80211_free_txskb(mvm->hw, skb); - } - } - spin_unlock_bh(&mvm_sta->lock); -} - static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2960,7 +3046,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, */ if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { - iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); flush_work(&mvm->add_stream_wk); /* @@ -3007,6 +3092,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_SETUP); } + + sta->max_rc_amsdu_len = 1; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_AUTH) { /* @@ -3076,6 +3163,16 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_DISABLE_LINK); } + + /* Remove STA key if this is an AP using WEP */ + if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { + int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta, + mvmvif->ap_wep_key); + + if (!ret) + ret = rm_ret; + } + } else { ret = -EIO; } @@ -3471,14 +3568,20 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), - /* Set the channel info data */ - .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ? - PHY_BAND_24 : PHY_BAND_5, - .channel_info.channel = channel->hw_value, - .channel_info.width = PHY_VHT_CHANNEL_MODE20, - /* Set the time and duration */ - .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)), - }; + }; + struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, + &aux_roc_req.channel_info); + u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); + + /* Set the channel info data */ + iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, + (channel->band == NL80211_BAND_2GHZ) ? + PHY_BAND_24 : PHY_BAND_5, + PHY_VHT_CHANNEL_MODE20, + 0); + + /* Set the time and duration */ + tail->apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)); delay = AUX_ROC_MIN_DELAY; req_dur = MSEC_TO_TU(duration); @@ -3503,15 +3606,15 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, } } - aux_roc_req.duration = cpu_to_le32(req_dur); - aux_roc_req.apply_time_max_delay = cpu_to_le32(delay); + tail->duration = cpu_to_le32(req_dur); + tail->apply_time_max_delay = cpu_to_le32(delay); IWL_DEBUG_TE(mvm, "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", channel->hw_value, req_dur, duration, delay, dtim_interval); /* Set the node address */ - memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); + memcpy(tail->node_addr, vif->addr, ETH_ALEN); lockdep_assert_held(&mvm->mutex); @@ -3542,7 +3645,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, ARRAY_SIZE(time_event_response), iwl_mvm_rx_aux_roc, te_data); - res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req), + res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, &aux_roc_req); if (res) { @@ -4696,8 +4799,35 @@ static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) mutex_unlock(&mvm->mutex); } +static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) +{ + u8 protocol = ip_hdr(skb)->protocol; + + if (!IS_ENABLED(CONFIG_INET)) + return false; + + return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; +} + +static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, + struct sk_buff *head, + struct sk_buff *skb) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* For now don't aggregate IPv6 in AMSDU */ + if (skb->protocol != htons(ETH_P_IP)) + return false; + + if (!iwl_mvm_is_csum_supported(mvm)) + return true; + + return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); +} + const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, + .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, @@ -4771,6 +4901,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { #endif .get_survey = iwl_mvm_mac_get_survey, .sta_statistics = iwl_mvm_mac_sta_statistics, + .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, #ifdef CONFIG_IWLWIFI_DEBUGFS .sta_add_debugfs = iwl_mvm_sta_add_debugfs, #endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 1aa690e081ff..12e9ecc3ee27 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -83,7 +83,6 @@ #include "sta.h" #include "fw-api.h" #include "constants.h" -#include "tof.h" #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" @@ -95,6 +94,8 @@ /* RSSI offset for WkP */ #define IWL_RSSI_OFFSET 50 #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 +#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16 + /* A TimeUnit is 1024 microsecond */ #define MSEC_TO_TU(_msec) (_msec*1000/1024) @@ -299,17 +300,38 @@ enum iwl_bt_force_ant_mode { }; /** + * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs + * @LOW_LATENCY_FORCE_UNSET: unset force mode + * @LOW_LATENCY_FORCE_ON: for low latency on + * @LOW_LATENCY_FORCE_OFF: for low latency off + * @NUM_LOW_LATENCY_FORCE: max num of modes + */ +enum iwl_mvm_low_latency_force { + LOW_LATENCY_FORCE_UNSET, + LOW_LATENCY_FORCE_ON, + LOW_LATENCY_FORCE_OFF, + NUM_LOW_LATENCY_FORCE +}; + +/** * struct iwl_mvm_low_latency_cause - low latency set causes * @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected * @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs * @LOW_LATENCY_VCMD: low latency mode set from vendor command * @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap) +* @LOW_LATENCY_DEBUGFS_FORCE_ENABLE: indicate that force mode is enabled +* the actual set/unset is done with LOW_LATENCY_DEBUGFS_FORCE +* @LOW_LATENCY_DEBUGFS_FORCE: low latency force mode from debugfs +* set this with LOW_LATENCY_DEBUGFS_FORCE_ENABLE flag +* in low_latency. */ enum iwl_mvm_low_latency_cause { LOW_LATENCY_TRAFFIC = BIT(0), LOW_LATENCY_DEBUGFS = BIT(1), LOW_LATENCY_VCMD = BIT(2), LOW_LATENCY_VIF_TYPE = BIT(3), + LOW_LATENCY_DEBUGFS_FORCE_ENABLE = BIT(4), + LOW_LATENCY_DEBUGFS_FORCE = BIT(5), }; /** @@ -360,8 +382,10 @@ struct iwl_probe_resp_data { * @pm_enabled - Indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. - * @low_latency: indicates low latency is set, see - * enum &iwl_mvm_low_latency_cause for causes. + * @low_latency: bit flags for low latency + * see enum &iwl_mvm_low_latency_cause for causes. + * @low_latency_actual: boolean, indicates low latency is set, + * as a result from low_latency bit flags and takes force into account. * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following @@ -393,7 +417,8 @@ struct iwl_mvm_vif { bool ap_ibss_active; bool pm_enabled; bool monitor_active; - u8 low_latency; + u8 low_latency: 6; + u8 low_latency_actual: 1; bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; @@ -778,6 +803,39 @@ struct iwl_mvm_geo_profile { u8 values[ACPI_GEO_TABLE_SIZE]; }; +struct iwl_mvm_txq { + struct list_head list; + u16 txq_id; + atomic_t tx_request; + bool stopped; +}; + +static inline struct iwl_mvm_txq * +iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq) +{ + return (void *)txq->drv_priv; +} + +static inline struct iwl_mvm_txq * +iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid) +{ + if (tid == IWL_MAX_TID_COUNT) + tid = IEEE80211_NUM_TIDS; + + return (void *)sta->txq[tid]->drv_priv; +} + +/** + * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid + * + * @sta_id: sta id + * @txq_tid: txq tid + */ +struct iwl_mvm_tvqm_txq_info { + u8 sta_id; + u8 txq_tid; +}; + struct iwl_mvm_dqa_txq_info { u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ bool reserved; /* Is this the TXQ reserved for a STA */ @@ -843,13 +901,13 @@ struct iwl_mvm { u64 on_time_scan; } radio_stats, accu_radio_stats; - u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; - - struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; + struct list_head add_stream_txqs; + union { + struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; + struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES]; + }; struct work_struct add_stream_wk; /* To add streams to queues */ - atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; - const char *nvm_file_name; struct iwl_nvm_data *nvm_data; /* NVM sections */ @@ -863,7 +921,6 @@ struct iwl_mvm { /* data related to data path */ struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; - unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; u8 rx_ba_sessions; /* configured by mac80211 */ @@ -932,6 +989,7 @@ struct iwl_mvm { struct debugfs_blob_wrapper nvm_calib_blob; struct debugfs_blob_wrapper nvm_prod_blob; struct debugfs_blob_wrapper nvm_phy_sku_blob; + struct debugfs_blob_wrapper nvm_reg_blob; struct iwl_mvm_frame_stats drv_rx_stats; spinlock_t drv_stats_lock; @@ -955,6 +1013,7 @@ struct iwl_mvm { u8 refs[IWL_MVM_REF_COUNT]; u8 vif_count; + struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER]; /* -1 for always, 0 for never, >0 for that many times */ s8 fw_restart; @@ -1090,7 +1149,6 @@ struct iwl_mvm { u32 ciphers[IWL_MVM_NUM_CIPHERS]; struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; - struct iwl_mvm_tof_data tof_data; struct ieee80211_vif *nan_vif; #define IWL_MAX_BAID 32 @@ -1106,6 +1164,10 @@ struct iwl_mvm { /* does a monitor vif exist (only one can exist hence bool) */ bool monitor_on; + + /* sniffer data to include in radiotap */ + __le16 cur_aid; + #ifdef CONFIG_ACPI struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM]; struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES]; @@ -1150,7 +1212,6 @@ enum iwl_mvm_init_status { IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0), IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1), IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE = BIT(2), - IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE = BIT(3), }; static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) @@ -1207,6 +1268,19 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) return iwl_mvm_sta_from_mac80211(sta); } +static inline struct ieee80211_vif * +iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) +{ + if (WARN_ON(vif_id >= ARRAY_SIZE(mvm->vif_id_to_mac))) + return NULL; + + if (rcu) + return rcu_dereference(mvm->vif_id_to_mac[vif_id]); + + return rcu_dereference_protected(mvm->vif_id_to_mac[vif_id], + lockdep_is_held(&mvm->mutex)); +} + static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) { return !iwlwifi_mod_params.d0i3_disable && @@ -1470,6 +1544,11 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc); +void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq); +unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + unsigned int tid); + #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status); #else @@ -1599,7 +1678,6 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override); int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, @@ -1615,8 +1693,6 @@ void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, - struct ieee80211_vif *exclude_vif); void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, @@ -1870,17 +1946,43 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) * binding, so this has no real impact. For now, just return * the current desired low-latency state. */ - return mvmvif->low_latency; + return mvmvif->low_latency_actual; } static inline void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set, enum iwl_mvm_low_latency_cause cause) { + u8 new_state; + if (set) mvmvif->low_latency |= cause; else mvmvif->low_latency &= ~cause; + + /* + * if LOW_LATENCY_DEBUGFS_FORCE_ENABLE is enabled no changes are + * allowed to actual mode. + */ + if (mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE && + cause != LOW_LATENCY_DEBUGFS_FORCE_ENABLE) + return; + + if (cause == LOW_LATENCY_DEBUGFS_FORCE_ENABLE && set) + /* + * We enter force state + */ + new_state = !!(mvmvif->low_latency & + LOW_LATENCY_DEBUGFS_FORCE); + else + /* + * Check if any other one set low latency + */ + new_state = !!(mvmvif->low_latency & + ~(LOW_LATENCY_DEBUGFS_FORCE_ENABLE | + LOW_LATENCY_DEBUGFS_FORCE)); + + mvmvif->low_latency_actual = new_state; } /* Return a bitmask with all the hw supported queues, except for the @@ -1895,6 +1997,16 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); + /* If IWL_MVM_STATUS_HW_RESTART_REQUESTED bit is set then we received + * an assert. Since we failed to bring the interface up, mac80211 + * will not attempt to reconfig the device, + * which handles the dump collection in assert flow, + * so trigger dump collection here. + */ + if (test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)) + iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, + false, 0); /* calling this function without using dump_start/end since at this * point we already hold the op mode mutex */ @@ -1906,10 +2018,6 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) iwl_trans_stop_device(mvm->trans); } -/* Stop/start all mac queues in a given bitmap */ -void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq); -void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq); - /* Re-configure the SCD for a queue that has already been configured */ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn); @@ -2015,4 +2123,59 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct dentry *dir); #endif +/* Channel info utils */ +static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS); +} + +static inline void *iwl_mvm_chan_info_cmd_tail(struct iwl_mvm *mvm, + struct iwl_fw_channel_info *ci) +{ + return (u8 *)ci + (iwl_mvm_has_ultra_hb_channel(mvm) ? + sizeof(struct iwl_fw_channel_info) : + sizeof(struct iwl_fw_channel_info_v1)); +} + +static inline size_t iwl_mvm_chan_info_padding(struct iwl_mvm *mvm) +{ + return iwl_mvm_has_ultra_hb_channel(mvm) ? 0 : + sizeof(struct iwl_fw_channel_info) - + sizeof(struct iwl_fw_channel_info_v1); +} + +static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm, + struct iwl_fw_channel_info *ci, + u32 chan, u8 band, u8 width, + u8 ctrl_pos) +{ + if (iwl_mvm_has_ultra_hb_channel(mvm)) { + ci->channel = cpu_to_le32(chan); + ci->band = band; + ci->width = width; + ci->ctrl_pos = ctrl_pos; + } else { + struct iwl_fw_channel_info_v1 *ci_v1 = + (struct iwl_fw_channel_info_v1 *)ci; + + ci_v1->channel = chan; + ci_v1->band = band; + ci_v1->width = width; + ci_v1->ctrl_pos = ctrl_pos; + } +} + +static inline void +iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm, + struct iwl_fw_channel_info *ci, + struct cfg80211_chan_def *chandef) +{ + iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value, + (chandef->chan->band == NL80211_BAND_2GHZ ? + PHY_BAND_24 : PHY_BAND_5), + iwl_mvm_get_channel_width(chandef), + iwl_mvm_get_ctrl_pos(chandef)); +} + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 6fc5cc1f2b5b..7bdbd010ae6b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -179,7 +179,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM access command failed with status %d (device: %s)\n", ret, mvm->cfg->name); - ret = -EIO; + ret = -ENODATA; } goto exit; } @@ -380,8 +380,12 @@ int iwl_nvm_init(struct iwl_mvm *mvm) /* we override the constness for initial read */ ret = iwl_nvm_read_section(mvm, section, nvm_buffer, size_read); - if (ret < 0) + if (ret == -ENODATA) { + ret = 0; continue; + } + if (ret < 0) + break; size_read += ret; temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); if (!temp) { @@ -412,6 +416,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm) mvm->nvm_phy_sku_blob.data = temp; mvm->nvm_phy_sku_blob.size = ret; break; + case NVM_SECTION_TYPE_REGULATORY_SDP: + case NVM_SECTION_TYPE_REGULATORY: + mvm->nvm_reg_blob.data = temp; + mvm->nvm_reg_blob.size = ret; + break; default: if (section == mvm->cfg->nvm_hw_section_num) { mvm->nvm_hw_blob.data = temp; @@ -454,7 +463,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm) IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n", mvm->nvm_data->nvm_version); - return 0; + return ret < 0 ? ret : 0; } struct iwl_mcc_update_resp * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 30c5127034a0..5e4f8b767d10 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -301,8 +301,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, RX_HANDLER_SYNC), - RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, - RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF, iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, @@ -329,8 +327,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(SCAN_REQ_UMAC), HCMD_NAME(SCAN_ABORT_UMAC), HCMD_NAME(SCAN_COMPLETE_UMAC), - HCMD_NAME(TOF_CMD), - HCMD_NAME(TOF_NOTIFICATION), HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID), HCMD_NAME(ADD_STA_KEY), HCMD_NAME(ADD_STA), @@ -449,6 +445,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), HCMD_NAME(STA_HE_CTXT_CMD), HCMD_NAME(RFH_QUEUE_CONFIG_CMD), + HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), HCMD_NAME(STA_PM_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(RX_QUEUES_NOTIFICATION), @@ -464,6 +461,22 @@ static const struct iwl_hcmd_names iwl_mvm_debug_names[] = { /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ +static const struct iwl_hcmd_names iwl_mvm_location_names[] = { + HCMD_NAME(TOF_RANGE_REQ_CMD), + HCMD_NAME(TOF_CONFIG_CMD), + HCMD_NAME(TOF_RANGE_ABORT_CMD), + HCMD_NAME(TOF_RANGE_REQ_EXT_CMD), + HCMD_NAME(TOF_RESPONDER_CONFIG_CMD), + HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD), + HCMD_NAME(TOF_LC_NOTIF), + HCMD_NAME(TOF_RESPONDER_STATS), + HCMD_NAME(TOF_MCSI_DEBUG_NOTIF), + HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { HCMD_NAME(STORED_BEACON_NTF), }; @@ -483,6 +496,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), + [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), @@ -685,6 +699,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); + INIT_LIST_HEAD(&mvm->add_stream_txqs); spin_lock_init(&mvm->d0i3_tx_lock); spin_lock_init(&mvm->refs_lock); @@ -736,6 +751,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.rx_buf_size = rb_size_default; } + BUILD_BUG_ON(sizeof(struct iwl_ldbg_config_cmd) != + LDBG_CFG_COMMAND_SIZE); + trans->wide_cmd_header = true; trans_cfg.bc_table_dword = mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560; @@ -842,8 +860,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (iwl_mvm_is_d0i3_supported(mvm)) iwl_trans_unref(mvm->trans); - iwl_mvm_tof_init(mvm); - iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); return op_mode; @@ -909,8 +925,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) cancel_delayed_work_sync(&mvm->tcm.work); - iwl_mvm_tof_clean(mvm); - iwl_fw_runtime_free(&mvm->fwrt); mutex_destroy(&mvm->mutex); mutex_destroy(&mvm->d0i3_suspend_mutex); @@ -1079,24 +1093,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, iwl_mvm_rx_common(mvm, rxb, pkt); } -void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq) -{ - int q; - - if (WARN_ON_ONCE(!mq)) - return; - - for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { - if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) { - IWL_DEBUG_TX_QUEUES(mvm, - "mac80211 %d already stopped\n", q); - continue; - } - - ieee80211_stop_queue(mvm->hw, q); - } -} - static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, const struct iwl_device_cmd *cmd) { @@ -1109,38 +1105,66 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, iwl_trans_block_txq_ptrs(mvm->trans, false); } -static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) +static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode, + int hw_queue, bool start) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue]; + struct ieee80211_sta *sta; + struct ieee80211_txq *txq; + struct iwl_mvm_txq *mvmtxq; + int i; + unsigned long tid_bitmap; + struct iwl_mvm_sta *mvmsta; + u8 sta_id; - iwl_mvm_stop_mac_queues(mvm, mq); -} + sta_id = iwl_mvm_has_new_tx_api(mvm) ? + mvm->tvqm_info[hw_queue].sta_id : + mvm->queue_info[hw_queue].ra_sta_id; -void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq) -{ - int q; - - if (WARN_ON_ONCE(!mq)) + if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) return; - for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { - if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) { - IWL_DEBUG_TX_QUEUES(mvm, - "mac80211 %d still stopped\n", q); - continue; - } + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (IS_ERR_OR_NULL(sta)) + goto out; + mvmsta = iwl_mvm_sta_from_mac80211(sta); - ieee80211_wake_queue(mvm->hw, q); + if (iwl_mvm_has_new_tx_api(mvm)) { + int tid = mvm->tvqm_info[hw_queue].txq_tid; + + tid_bitmap = BIT(tid); + } else { + tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap; } + + for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + int tid = i; + + if (tid == IWL_MAX_TID_COUNT) + tid = IEEE80211_NUM_TIDS; + + txq = sta->txq[tid]; + mvmtxq = iwl_mvm_txq_from_mac80211(txq); + mvmtxq->stopped = !start; + + if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) + iwl_mvm_mac_itxq_xmit(mvm->hw, txq); + } + +out: + rcu_read_unlock(); } -static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) +static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue]; + iwl_mvm_queue_state_change(op_mode, hw_queue, false); +} - iwl_mvm_start_mac_queues(mvm, mq); +static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) +{ + iwl_mvm_queue_state_change(op_mode, hw_queue, true); } static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 7f5434b34d0d..f369173db11c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -143,14 +143,11 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, u8 chains_static, u8 chains_dynamic) { u8 active_cnt, idle_cnt; + struct iwl_phy_context_cmd_tail *tail = + iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci); /* Set the channel info data */ - cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? - PHY_BAND_24 : PHY_BAND_5); - - cmd->ci.channel = chandef->chan->hw_value; - cmd->ci.width = iwl_mvm_get_channel_width(chandef); - cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); + iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef); /* Set rx the chains */ idle_cnt = chains_static; @@ -168,17 +165,17 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, active_cnt = 2; } - cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << + tail->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << PHY_RX_CHAIN_VALID_POS); - cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); - cmd->rxchain_info |= cpu_to_le32(active_cnt << + tail->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); + tail->rxchain_info |= cpu_to_le32(active_cnt << PHY_RX_CHAIN_MIMO_CNT_POS); #ifdef CONFIG_IWLWIFI_DEBUGFS if (unlikely(mvm->dbgfs_rx_phyinfo)) - cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); + tail->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); #endif - cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); + tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); } /* @@ -195,6 +192,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, { struct iwl_phy_context_cmd cmd; int ret; + u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm); /* Set the command header fields */ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); @@ -203,9 +201,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, chains_static, chains_dynamic); - ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, - sizeof(struct iwl_phy_context_cmd), - &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, len, &cmd); if (ret) IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index dabbc04853ac..a28283ff7295 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -149,14 +149,9 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm, if (he_cap && he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[3] & - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) { + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK)) flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK; - if (he_cap->he_cap_elem.phy_cap_info[3] & - IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2) - flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK; - } - return flags; } @@ -320,12 +315,26 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) { u16 size = le32_to_cpu(notif->amsdu_size); + int i; if (WARN_ON(sta->max_amsdu_len < size)) goto out; mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); mvmsta->max_amsdu_len = size; + sta->max_rc_amsdu_len = mvmsta->max_amsdu_len; + + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + if (mvmsta->amsdu_enabled & BIT(i)) + sta->max_tid_amsdu_len[i] = + iwl_mvm_max_amsdu_size(mvm, sta, i); + else + /* + * Not so elegant, but this will effectively + * prevent AMSDU on this TID + */ + sta->max_tid_amsdu_len[i] = 1; + } IWL_DEBUG_RATE(mvm, "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 089972280daa..09866f50857a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1744,6 +1744,7 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum rs_action scale_action) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + int i; /* * In case TLC offload is not active amsdu_enabled is either 0xFFFF @@ -1757,6 +1758,19 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta, mvmsta->amsdu_enabled = 0xFFFF; mvmsta->max_amsdu_len = sta->max_amsdu_len; + sta->max_rc_amsdu_len = mvmsta->max_amsdu_len; + + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + if (mvmsta->amsdu_enabled) + sta->max_tid_amsdu_len[i] = + iwl_mvm_max_amsdu_size(mvm, sta, i); + else + /* + * Not so elegant, but this will effectively + * prevent AMSDU on this TID + */ + sta->max_tid_amsdu_len[i] = 1; + } } /* @@ -3332,12 +3346,12 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm, /* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI * column the rate table should look like this: * - * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI - * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI - * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI - * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI - * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI - * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI + * rate[0] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI + * rate[1] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI + * rate[2] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI + * rate[3] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI + * rate[4] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI + * rate[5] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 6653a238f32e..235ab26ca429 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -599,8 +599,8 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, * data copied into the "data" struct, but rather the data from * the notification directly. */ - if (iwl_mvm_is_cdb_supported(mvm)) { - struct mvm_statistics_general_cdb *general = + if (iwl_mvm_has_new_rx_stats_api(mvm)) { + struct mvm_statistics_general *general = data->general; mvmvif->beacon_stats.num_beacons = @@ -723,7 +723,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, else expected_size = sizeof(struct iwl_notif_statistics_v10); } else { - expected_size = sizeof(struct iwl_notif_statistics_cdb); + expected_size = sizeof(struct iwl_notif_statistics); } if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size, @@ -753,7 +753,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, flags = stats->flag; } else { - struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data; + struct iwl_notif_statistics *stats = (void *)&pkt->data; data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = @@ -792,7 +792,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, bytes = (void *)&v11->load_stats.byte_count; air_time = (void *)&v11->load_stats.air_time; } else { - struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data; + struct iwl_notif_statistics *stats = (void *)&pkt->data; energy = (void *)&stats->load_stats.avg_energy; bytes = (void *)&stats->load_stats.byte_count; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 7bd8676508f5..2c56f73d688e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -192,27 +192,48 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, } } +static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm, + struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_vendor_radiotap *radiotap; + int size = sizeof(*radiotap) + sizeof(__le16); + + if (!mvm->cur_aid) + return; + + radiotap = skb_put(skb, size); + radiotap->align = 1; + /* Intel OUI */ + radiotap->oui[0] = 0xf6; + radiotap->oui[1] = 0x54; + radiotap->oui[2] = 0x25; + /* radiotap sniffer config sub-namespace */ + radiotap->subns = 1; + radiotap->present = 0x1; + radiotap->len = size - sizeof(*radiotap); + radiotap->pad = 0; + + /* fill the data now */ + memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid)); + + rx_status->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA; +} + /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct napi_struct *napi, struct sk_buff *skb, int queue, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, + bool csi) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); if (!(rx_status->flag & RX_FLAG_NO_PSDU) && - iwl_mvm_check_pn(mvm, skb, queue, sta)) { + iwl_mvm_check_pn(mvm, skb, queue, sta)) kfree_skb(skb); - } else { - unsigned int radiotap_len = 0; - - if (rx_status->flag & RX_FLAG_RADIOTAP_HE) - radiotap_len += sizeof(struct ieee80211_radiotap_he); - if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU) - radiotap_len += sizeof(struct ieee80211_radiotap_he_mu); - __skb_push(skb, radiotap_len); + else ieee80211_rx_napi(mvm->hw, sta, skb, napi); - } } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, @@ -473,7 +494,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, while ((skb = __skb_dequeue(skb_list))) { iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, reorder_buf->queue, - sta); + sta, false); reorder_buf->num_stored--; } } @@ -666,6 +687,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, * This also covers the case of receiving a Block Ack Request * outside a BA session; we'll pass it to mac80211 and that * then sends a delBA action frame. + * This also covers pure monitor mode, in which case we won't + * have any BA sessions. */ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID) return false; @@ -1158,14 +1181,12 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, /* temporarily hide the radiotap data */ __skb_pull(skb, radiotap_len); - if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_SU) { - /* report the AMPDU-EOF bit on single frames */ - if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { - rx_status->flag |= RX_FLAG_AMPDU_DETAILS; - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; - } + /* report the AMPDU-EOF bit on single frames */ + if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) @@ -1178,9 +1199,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; /* toggle is switched whenever new aggregation starts */ - if (toggle_bit != mvm->ampdu_toggle && - (he_type == RATE_MCS_HE_TYPE_MU || - he_type == RATE_MCS_HE_TYPE_SU)) { + if (toggle_bit != mvm->ampdu_toggle) { rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; @@ -1314,6 +1333,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, .d4 = desc->phy_data4, .info_type = IWL_RX_PHY_INFO_TYPE_NONE, }; + bool csi = false; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; @@ -1412,7 +1432,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } /* set the preamble flag if appropriate */ - if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) + if (rate_n_flags & RATE_MCS_CCK_MSK && + phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { @@ -1441,14 +1462,23 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; rx_status->flag |= RX_FLAG_AMPDU_DETAILS; - rx_status->ampdu_reference = mvm->ampdu_ref; - /* toggle is switched whenever new aggregation starts */ + /* + * Toggle is switched whenever new aggregation starts. Make + * sure ampdu_reference is never 0 so we can later use it to + * see if the frame was really part of an A-MPDU or not. + */ if (toggle_bit != mvm->ampdu_toggle) { mvm->ampdu_ref++; + if (mvm->ampdu_ref == 0) + mvm->ampdu_ref++; mvm->ampdu_toggle = toggle_bit; } + rx_status->ampdu_reference = mvm->ampdu_ref; } + if (unlikely(mvm->monitor_on)) + iwl_mvm_add_rtap_sniffer_config(mvm, skb); + rcu_read_lock(); if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { @@ -1602,7 +1632,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, + sta, csi); out: rcu_read_unlock(); } @@ -1705,15 +1736,24 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi, } else if (rate_n_flags & RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->nss = - ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; - } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) { + /* + * take the nss from the rx_vec since the rate_n_flags has + * only 2 bits for the nss which gives a max of 4 ss but + * there may be up to 8 spatial streams + */ + rx_status->nss = + le32_get_bits(desc->rx_vec[0], + RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1; + } else if (rate_n_flags & RATE_MCS_HE_MSK) { + rx_status->nss = + le32_get_bits(desc->rx_vec[0], + RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1; + } else { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); @@ -1726,7 +1766,7 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->rate_idx = rate; } - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta, false); out: rcu_read_unlock(); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index d1d76bb9a750..9da0dae78510 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -7,6 +7,7 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,6 +29,7 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -64,7 +66,7 @@ struct iwl_mvm_active_iface_iterator_data { struct ieee80211_vif *ignore_vif; u8 sta_vif_ap_sta_id; enum iwl_sf_state sta_vif_state; - int num_active_macs; + u32 num_active_macs; }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index e28009832da0..c5a01470a3bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -356,24 +356,16 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, return ret; } -static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, - int mac80211_queue, u8 tid, u8 flags) +static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int queue, u8 tid, u8 flags) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; - bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; int ret; - if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) - return -EINVAL; - if (iwl_mvm_has_new_tx_api(mvm)) { - if (remove_mac_queue) - mvm->hw_queue_to_mac80211[queue] &= - ~BIT(mac80211_queue); - iwl_trans_txq_free(mvm->trans, queue); return 0; @@ -384,36 +376,15 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); - /* - * If there is another TID with the same AC - don't remove the MAC queue - * from the mapping - */ - if (tid < IWL_MAX_TID_COUNT) { - unsigned long tid_bitmap = - mvm->queue_info[queue].tid_bitmap; - int ac = tid_to_mac80211_ac[tid]; - int i; - - for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { - if (tid_to_mac80211_ac[i] == ac) - remove_mac_queue = false; - } - } - - if (remove_mac_queue) - mvm->hw_queue_to_mac80211[queue] &= - ~BIT(mac80211_queue); - cmd.action = mvm->queue_info[queue].tid_bitmap ? SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; if (cmd.action == SCD_CFG_DISABLE_QUEUE) mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; IWL_DEBUG_TX_QUEUES(mvm, - "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", + "Disabling TXQ #%d tids=0x%x\n", queue, - mvm->queue_info[queue].tid_bitmap, - mvm->hw_queue_to_mac80211[queue]); + mvm->queue_info[queue].tid_bitmap); /* If the queue is still enabled - nothing left to do in this func */ if (cmd.action == SCD_CFG_ENABLE_QUEUE) @@ -423,15 +394,19 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, cmd.tid = mvm->queue_info[queue].txq_tid; /* Make sure queue info is correct even though we overwrite it */ - WARN(mvm->queue_info[queue].tid_bitmap || - mvm->hw_queue_to_mac80211[queue], - "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n", - queue, mvm->hw_queue_to_mac80211[queue], - mvm->queue_info[queue].tid_bitmap); + WARN(mvm->queue_info[queue].tid_bitmap, + "TXQ #%d info out-of-sync - tids=0x%x\n", + queue, mvm->queue_info[queue].tid_bitmap); /* If we are here - the queue is freed and we can zero out these vals */ mvm->queue_info[queue].tid_bitmap = 0; - mvm->hw_queue_to_mac80211[queue] = 0; + + if (sta) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_tid(sta, tid); + + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + } /* Regardless if this is a reserved TXQ for a STA - mark it as false */ mvm->queue_info[queue].reserved = false; @@ -517,9 +492,14 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) spin_lock_bh(&mvmsta->lock); /* Unmap MAC queues and TIDs from this queue */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_tid(sta, tid); + if (mvmsta->tid_data[tid].state == IWL_AGG_ON) disable_agg_tids |= BIT(tid); mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; + + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; } mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ @@ -541,10 +521,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) } static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, + struct ieee80211_sta *old_sta, u8 new_sta_id) { struct iwl_mvm_sta *mvmsta; - u8 txq_curr_ac, sta_id, tid; + u8 sta_id, tid; unsigned long disable_agg_tids = 0; bool same_sta; int ret; @@ -554,7 +535,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; - txq_curr_ac = mvm->queue_info[queue].mac80211_ac; sta_id = mvm->queue_info[queue].ra_sta_id; tid = mvm->queue_info[queue].txq_tid; @@ -570,9 +550,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); - ret = iwl_mvm_disable_txq(mvm, queue, - mvmsta->vif->hw_queue[txq_curr_ac], - tid, 0); + ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", @@ -662,16 +640,15 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, * in such a case, otherwise - if no redirection required - it does nothing, * unless the %force param is true. */ -static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, - int ac, int ssn, unsigned int wdg_timeout, - bool force) +static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, + int ac, int ssn, unsigned int wdg_timeout, + bool force, struct iwl_mvm_txq *txq) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; bool shared_queue; - unsigned long mq; int ret; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) @@ -695,14 +672,14 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; cmd.tid = mvm->queue_info[queue].txq_tid; - mq = mvm->hw_queue_to_mac80211[queue]; shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", queue, iwl_mvm_ac_to_tx_fifo[ac]); - /* Stop MAC queues and wait for this queue to empty */ - iwl_mvm_stop_mac_queues(mvm, mq); + /* Stop the queue and wait for it to empty */ + txq->stopped = true; + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue %d before reconfig\n", @@ -743,8 +720,8 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); out: - /* Continue using the MAC queues */ - iwl_mvm_start_mac_queues(mvm, mq); + /* Continue using the queue */ + txq->stopped = false; return ret; } @@ -769,7 +746,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, return -ENOSPC; } -static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, +static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, u8 sta_id, u8 tid, unsigned int timeout) { int queue, size = IWL_DEFAULT_QUEUE_SIZE; @@ -792,10 +769,7 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", queue, sta_id, tid); - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); - IWL_DEBUG_TX_QUEUES(mvm, - "Enabling TXQ #%d (mac80211 map:0x%x)\n", - queue, mvm->hw_queue_to_mac80211[queue]); + IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue); return queue; } @@ -805,9 +779,10 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_tid(sta, tid); unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); - u8 mac_queue = mvmsta->vif->hw_queue[ac]; int queue = -1; lockdep_assert_held(&mvm->mutex); @@ -815,11 +790,16 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue for sta %d on tid %d\n", mvmsta->sta_id, tid); - queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid, - wdg_timeout); + queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout); if (queue < 0) return queue; + if (sta) { + mvmtxq->txq_id = queue; + mvm->tvqm_info[queue].txq_tid = tid; + mvm->tvqm_info[queue].sta_id = mvmsta->sta_id; + } + IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); spin_lock_bh(&mvmsta->lock); @@ -829,8 +809,9 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, return 0; } -static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, - int mac80211_queue, u8 sta_id, u8 tid) +static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + int queue, u8 sta_id, u8 tid) { bool enable_queue = true; @@ -845,14 +826,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, if (mvm->queue_info[queue].tid_bitmap) enable_queue = false; - if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) { - WARN(mac80211_queue >= - BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]), - "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n", - mac80211_queue, queue, sta_id, tid); - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); - } - mvm->queue_info[queue].tid_bitmap |= BIT(tid); mvm->queue_info[queue].ra_sta_id = sta_id; @@ -866,16 +839,22 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, mvm->queue_info[queue].txq_tid = tid; } + if (sta) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_tid(sta, tid); + + mvmtxq->txq_id = queue; + } + IWL_DEBUG_TX_QUEUES(mvm, - "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", - queue, mvm->queue_info[queue].tid_bitmap, - mvm->hw_queue_to_mac80211[queue]); + "Enabling TXQ #%d tids=0x%x\n", + queue, mvm->queue_info[queue].tid_bitmap); return enable_queue; } -static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, - int mac80211_queue, u16 ssn, +static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) { @@ -895,8 +874,7 @@ static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, return false; /* Send the enabling command if we need to */ - if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, - cfg->sta_id, cfg->tid)) + if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) return false; inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, @@ -989,9 +967,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); - ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, - tid_to_mac80211_ac[tid], ssn, - wdg_timeout, true); + ret = iwl_mvm_redirect_queue(mvm, queue, tid, + tid_to_mac80211_ac[tid], ssn, + wdg_timeout, true, + iwl_mvm_txq_from_tid(sta, tid)); if (ret) { IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); return; @@ -1068,11 +1047,9 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, * Remove the ones that did. */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; u16 tid_bitmap; mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; - mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); tid_bitmap = mvm->queue_info[queue].tid_bitmap; @@ -1105,10 +1082,6 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, * sure all TIDs have existing corresponding mac queues enabled */ tid_bitmap = mvm->queue_info[queue].tid_bitmap; - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - mvm->hw_queue_to_mac80211[queue] |= - BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); - } /* If the queue is marked as shared - "unshare" it */ if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && @@ -1136,6 +1109,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) unsigned long unshare_queues = 0; unsigned long changetid_queues = 0; int i, ret, free_queue = -ENOSPC; + struct ieee80211_sta *queue_owner = NULL; lockdep_assert_held(&mvm->mutex); @@ -1201,13 +1175,14 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) inactive_tid_bitmap, &unshare_queues, &changetid_queues); - if (ret >= 0 && free_queue < 0) + if (ret >= 0 && free_queue < 0) { + queue_owner = sta; free_queue = ret; + } /* only unlock sta lock - we still need the queue info lock */ spin_unlock_bh(&mvmsta->lock); } - rcu_read_unlock(); /* Reconfigure queues requiring reconfiguation */ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) @@ -1216,18 +1191,21 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) iwl_mvm_change_queue_tid(mvm, i); if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { - ret = iwl_mvm_free_inactive_queue(mvm, free_queue, + ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); - if (ret) + if (ret) { + rcu_read_unlock(); return ret; + } } + rcu_read_unlock(); + return free_queue; } static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, u8 ac, int tid, - struct ieee80211_hdr *hdr) + struct ieee80211_sta *sta, u8 ac, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_trans_txq_scd_cfg cfg = { @@ -1238,7 +1216,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, }; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); - u8 mac_queue = mvmsta->vif->hw_queue[ac]; int queue = -1; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; @@ -1257,12 +1234,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); spin_unlock_bh(&mvmsta->lock); - /* - * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one - * exists - */ - if (!ieee80211_is_data_qos(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control)) { + if (tid == IWL_MAX_TID_COUNT) { queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_MGMT_QUEUE, IWL_MVM_DQA_MAX_MGMT_QUEUE); @@ -1341,8 +1313,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, } } - inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue, - ssn, &cfg, wdg_timeout); + inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* * Mark queue as shared in transport if shared @@ -1384,8 +1355,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, } } else { /* Redirect queue, if needed */ - ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, - wdg_timeout, false); + ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, + wdg_timeout, false, + iwl_mvm_txq_from_tid(sta, tid)); if (ret) goto out_err; } @@ -1393,7 +1365,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, return 0; out_err: - iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); + iwl_mvm_disable_txq(mvm, sta, queue, tid, 0); return ret; } @@ -1406,87 +1378,32 @@ static inline u8 iwl_mvm_tid_to_ac_queue(int tid) return tid_to_mac80211_ac[tid]; } -static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, int tid) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - struct sk_buff *skb; - struct ieee80211_hdr *hdr; - struct sk_buff_head deferred_tx; - u8 mac_queue; - bool no_queue = false; /* Marks if there is a problem with the queue */ - u8 ac; - - lockdep_assert_held(&mvm->mutex); - - skb = skb_peek(&tid_data->deferred_tx_frames); - if (!skb) - return; - hdr = (void *)skb->data; - - ac = iwl_mvm_tid_to_ac_queue(tid); - mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; - - if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE && - iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { - IWL_ERR(mvm, - "Can't alloc TXQ for sta %d tid %d - dropping frame\n", - mvmsta->sta_id, tid); - - /* - * Mark queue as problematic so later the deferred traffic is - * freed, as we can do nothing with it - */ - no_queue = true; - } - - __skb_queue_head_init(&deferred_tx); - - /* Disable bottom-halves when entering TX path */ - local_bh_disable(); - spin_lock(&mvmsta->lock); - skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); - mvmsta->deferred_traffic_tid_map &= ~BIT(tid); - spin_unlock(&mvmsta->lock); - - while ((skb = __skb_dequeue(&deferred_tx))) - if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) - ieee80211_free_txskb(mvm->hw, skb); - local_bh_enable(); - - /* Wake queue */ - iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); -} - void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, add_stream_wk); - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - unsigned long deferred_tid_traffic; - int sta_id, tid; mutex_lock(&mvm->mutex); iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); - /* Go over all stations with deferred traffic */ - for_each_set_bit(sta_id, mvm->sta_deferred_frames, - IWL_MVM_STATION_COUNT) { - clear_bit(sta_id, mvm->sta_deferred_frames); - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(sta)) - continue; + while (!list_empty(&mvm->add_stream_txqs)) { + struct iwl_mvm_txq *mvmtxq; + struct ieee80211_txq *txq; + u8 tid; - mvmsta = iwl_mvm_sta_from_mac80211(sta); - deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; + mvmtxq = list_first_entry(&mvm->add_stream_txqs, + struct iwl_mvm_txq, list); + + txq = container_of((void *)mvmtxq, struct ieee80211_txq, + drv_priv); + tid = txq->tid; + if (tid == IEEE80211_NUM_TIDS) + tid = IWL_MAX_TID_COUNT; - for_each_set_bit(tid, &deferred_tid_traffic, - IWL_MAX_TID_COUNT + 1) - iwl_mvm_tx_deferred_stream(mvm, sta, tid); + iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); + list_del_init(&mvmtxq->list); + iwl_mvm_mac_itxq_xmit(mvm->hw, txq); } mutex_unlock(&mvm->mutex); @@ -1542,10 +1459,11 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, * Note that re-enabling aggregations isn't done in this function. */ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta) + struct ieee80211_sta *sta) { - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + unsigned int wdg = + iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); int i; struct iwl_trans_txq_scd_cfg cfg = { .sta_id = mvm_sta->sta_id, @@ -1561,23 +1479,18 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; int txq_id = tid_data->txq_id; int ac; - u8 mac_queue; if (txq_id == IWL_MVM_INVALID_QUEUE) continue; - skb_queue_head_init(&tid_data->deferred_tx_frames); - ac = tid_to_mac80211_ac[i]; - mac_queue = mvm_sta->vif->hw_queue[ac]; if (iwl_mvm_has_new_tx_api(mvm)) { IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d\n", mvm_sta->sta_id, i); - txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, - mvm_sta->sta_id, - i, wdg_timeout); + txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, + i, wdg); tid_data->txq_id = txq_id; /* @@ -1600,8 +1513,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, "Re-mapping sta %d tid %d to queue %d\n", mvm_sta->sta_id, i, txq_id); - iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg, - wdg_timeout); + iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } } @@ -1691,7 +1603,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, if (ret) goto err; - iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); + iwl_mvm_realloc_queues_after_restart(mvm, sta); sta_update = true; sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; goto update_fw; @@ -1724,9 +1636,17 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, * frames until the queue is allocated */ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; - skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); } - mvm_sta->deferred_traffic_tid_map = 0; + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(sta->txq[i]); + + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + INIT_LIST_HEAD(&mvmtxq->list); + atomic_set(&mvmtxq->tx_request, 0); + } + mvm_sta->agg_tids = 0; if (iwl_mvm_has_new_rx_api(mvm) && @@ -1861,9 +1781,9 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_sta *mvm_sta) + struct ieee80211_sta *sta) { - int ac; + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int i; lockdep_assert_held(&mvm->mutex); @@ -1872,11 +1792,17 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; - ac = iwl_mvm_tid_to_ac_queue(i); - iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, - vif->hw_queue[ac], i, 0); + iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i, + 0); mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(sta->txq[i]); + + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + } } int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, @@ -1938,7 +1864,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); - iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); + iwl_mvm_disable_sta_queues(mvm, vif, sta); /* If there is a TXQ still marked as reserved - free it */ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { @@ -2044,7 +1970,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, if (iwl_mvm_has_new_tx_api(mvm)) { int tvqm_queue = - iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id, + iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, wdg_timeout); *queue = tvqm_queue; @@ -2057,7 +1983,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, .frame_limit = IWL_FRAME_LIMIT, }; - iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout); + iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout); } } @@ -2135,8 +2061,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue, - IWL_MAX_TID_COUNT, 0); + iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -2195,8 +2120,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) bsta->tfd_queue_msk |= BIT(queue); - iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, - &cfg, wdg_timeout); + iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); } if (vif->type == NL80211_IFTYPE_ADHOC) @@ -2215,8 +2139,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { - queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0], - bsta->sta_id, + queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, IWL_MAX_TID_COUNT, wdg_timeout); @@ -2254,7 +2177,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, return; } - iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0); + iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0); if (iwl_mvm_has_new_tx_api(mvm)) return; @@ -2377,10 +2300,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * Note that this is done here as we want to avoid making DQA * changes in mac80211 layer. */ - if (vif->type == NL80211_IFTYPE_ADHOC) { - vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; - mvmvif->cab_queue = vif->cab_queue; - } + if (vif->type == NL80211_IFTYPE_ADHOC) + mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; /* * While in previous FWs we had to exclude cab queue from TFD queue @@ -2388,9 +2309,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) */ if (!iwl_mvm_has_new_tx_api(mvm) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { - iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, - &cfg, timeout); - msta->tfd_queue_msk |= BIT(vif->cab_queue); + iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, + timeout); + msta->tfd_queue_msk |= BIT(mvmvif->cab_queue); } ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, mvmvif->id, mvmvif->color); @@ -2407,15 +2328,14 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * tfd_queue_mask. */ if (iwl_mvm_has_new_tx_api(mvm)) { - int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, - msta->sta_id, + int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, 0, timeout); mvmvif->cab_queue = queue; } else if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) - iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, - &cfg, timeout); + iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, + timeout); if (mvmvif->ap_wep_key) { u8 key_offset = iwl_mvm_set_fw_key_idx(mvm); @@ -2446,8 +2366,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); - iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, - 0, 0); + iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) @@ -2781,7 +2700,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data; u16 normalized_ssn; - int txq_id; + u16 txq_id; int ret; if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) @@ -2823,17 +2742,24 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, */ txq_id = mvmsta->tid_data[tid].txq_id; if (txq_id == IWL_MVM_INVALID_QUEUE) { - txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, - IWL_MVM_DQA_MIN_DATA_QUEUE, - IWL_MVM_DQA_MAX_DATA_QUEUE); - if (txq_id < 0) { - ret = txq_id; + ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); + if (ret < 0) { IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto out; } + txq_id = ret; + /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; + } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) { + ret = -ENXIO; + IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", + tid, IWL_MAX_HW_QUEUES - 1); + goto out; + } else if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { ret = -ENXIO; @@ -2976,8 +2902,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } if (alloc_queue) - iwl_mvm_enable_txq(mvm, queue, - vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, + iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* Send ADD_STA command to enable aggs only if the queue isn't shared */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index d52cd888f77d..0614296244b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -297,7 +297,6 @@ enum iwl_mvm_agg_state { /** * struct iwl_mvm_tid_data - holds the states for each RA / TID - * @deferred_tx_frames: deferred TX frames for this RA/TID * @seq_number: the next WiFi sequence number to use * @next_reclaimed: the WiFi sequence number of the next packet to be acked. * This is basically (last acked packet++). @@ -318,7 +317,6 @@ enum iwl_mvm_agg_state { * tpt_meas_start */ struct iwl_mvm_tid_data { - struct sk_buff_head deferred_tx_frames; u16 seq_number; u16 next_reclaimed; /* The rest is Tx AGG related */ @@ -427,8 +425,6 @@ struct iwl_mvm_sta { struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_rxq_dup_data *dup_data; - u16 deferred_traffic_tid_map; - u8 reserved_queue; /* Temporary, until the new TLC will control the Tx protection */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index e02f4eb20359..859aa5a4e6b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -399,6 +399,9 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, struct ieee80211_tx_info *info; struct ieee80211_hdr *hdr; struct iwl_tdls_channel_switch_cmd cmd = {0}; + struct iwl_tdls_channel_switch_cmd_tail *tail = + iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci); + u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm); int ret; lockdep_assert_held(&mvm->mutex); @@ -414,9 +417,9 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, } cmd.switch_type = type; - cmd.timing.frame_timestamp = cpu_to_le32(timestamp); - cmd.timing.switch_time = cpu_to_le32(switch_time); - cmd.timing.switch_timeout = cpu_to_le32(switch_timeout); + tail->timing.frame_timestamp = cpu_to_le32(timestamp); + tail->timing.switch_time = cpu_to_le32(switch_time); + tail->timing.switch_timeout = cpu_to_le32(switch_timeout); rcu_read_lock(); sta = ieee80211_find_sta(vif, peer); @@ -448,21 +451,16 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, } } - if (chandef) { - cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? - PHY_BAND_24 : PHY_BAND_5); - cmd.ci.channel = chandef->chan->hw_value; - cmd.ci.width = iwl_mvm_get_channel_width(chandef); - cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); - } + if (chandef) + iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef); /* keep quota calculation simple for now - 50% of DTIM for TDLS */ - cmd.timing.max_offchan_duration = + tail->timing.max_offchan_duration = cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int) / 2); /* Switch time is the first element in the switch-timing IE. */ - cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); + tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); info = IEEE80211_SKB_CB(skb); hdr = (void *)skb->data; @@ -472,20 +470,19 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, ret = -EINVAL; goto out; } - iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd); + iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd); } - iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info, + iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info, mvmsta->sta_id); - iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta, + iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta, hdr->frame_control); rcu_read_unlock(); - memcpy(cmd.frame.data, skb->data, skb->len); + memcpy(tail->frame.data, skb->data, skb->len); - ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, - sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd); if (ret) { IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n", ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index e1a6f4e22253..5b34100e9099 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -334,6 +334,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, switch (te_data->vif->type) { case NL80211_IFTYPE_P2P_DEVICE: ieee80211_remain_on_channel_expired(mvm->hw); + set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); iwl_mvm_roc_finished(mvm); break; case NL80211_IFTYPE_STATION: @@ -686,6 +687,8 @@ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data) { struct iwl_hs20_roc_req aux_cmd = {}; + u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm); + u32 uid; int ret; @@ -699,7 +702,7 @@ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm, IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n", le32_to_cpu(aux_cmd.event_unique_id)); ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, - sizeof(aux_cmd), &aux_cmd); + len, &aux_cmd); if (WARN_ON(ret)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c deleted file mode 100644 index 01e0a999063b..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c +++ /dev/null @@ -1,305 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include "mvm.h" -#include "fw/api/tof.h" - -#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256 - -void iwl_mvm_tof_init(struct iwl_mvm *mvm) -{ - struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return; - - memset(tof_data, 0, sizeof(*tof_data)); - - tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (IWL_MVM_TOF_IS_RESPONDER) { - tof_data->responder_cfg.sub_grp_cmd_id = - cpu_to_le32(TOF_RESPONDER_CONFIG_CMD); - tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA; - } -#endif - - tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD); - tof_data->range_req.req_timeout = 1; - tof_data->range_req.initiator = 1; - tof_data->range_req.report_policy = 3; - - tof_data->range_req_ext.sub_grp_cmd_id = - cpu_to_le32(TOF_RANGE_REQ_EXT_CMD); - - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - mvm->init_status |= IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE; -} - -void iwl_mvm_tof_clean(struct iwl_mvm *mvm) -{ - struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; - - if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_TOF_SUPPORT) || - !(mvm->init_status & IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE)) - return; - - memset(tof_data, 0, sizeof(*tof_data)); - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - mvm->init_status &= ~IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE; -} - -static void iwl_tof_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - bool *enabled = _data; - - /* non bss vif exists */ - if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) - *enabled = false; -} - -int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm) -{ - struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg; - bool enabled; - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_tof_iterator, &enabled); - if (!enabled) { - IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n"); - return -EINVAL; - } - - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(*cmd), cmd); -} - -int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id) -{ - struct iwl_tof_range_abort_cmd cmd = { - .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD), - .request_id = id, - }; - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (id != mvm->tof_data.active_range_request) { - IWL_ERR(mvm, "Invalid range request id %d (active %d)\n", - id, mvm->tof_data.active_range_request); - return -EINVAL; - } - - /* after abort is sent there's no active request anymore */ - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(cmd), &cmd); -} - -#ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (vif->p2p || vif->type != NL80211_IFTYPE_AP || - !mvmvif->ap_ibss_active) { - IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); - return -EIO; - } - - cmd->sta_id = mvmvif->bcast_sta.sta_id; - memcpy(cmd->bssid, vif->addr, ETH_ALEN); - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(*cmd), cmd); -} -#endif - -int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_host_cmd cmd = { - .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0), - .len = { sizeof(mvm->tof_data.range_req), }, - /* no copy because of the command size */ - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { - IWL_ERR(mvm, "Cannot send range request, not STA mode\n"); - return -EIO; - } - - /* nesting of range requests is not supported in FW */ - if (mvm->tof_data.active_range_request != - IWL_MVM_TOF_RANGE_REQ_MAX_ID) { - IWL_ERR(mvm, "Cannot send range req, already active req %d\n", - mvm->tof_data.active_range_request); - return -EIO; - } - - mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id; - - cmd.data[0] = &mvm->tof_data.range_req; - return iwl_mvm_send_cmd(mvm, &cmd); -} - -int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { - IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n"); - return -EIO; - } - - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(mvm->tof_data.range_req_ext), - &mvm->tof_data.range_req_ext); -} - -static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data) -{ - struct iwl_tof_range_rsp_ntfy *resp = (void *)data; - - if (resp->request_id != mvm->tof_data.active_range_request) { - IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n", - resp->request_id, mvm->tof_data.active_range_request); - return -EIO; - } - - memcpy(&mvm->tof_data.range_resp, resp, - sizeof(struct iwl_tof_range_rsp_ntfy)); - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - - return 0; -} - -static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data) -{ - struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data; - - IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token); - return 0; -} - -static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data) -{ - struct iwl_tof_neighbor_report *report = - (struct iwl_tof_neighbor_report *)data; - - IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n", - report->bssid, report->request_token, report->status); - return 0; -} - -void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data; - - lockdep_assert_held(&mvm->mutex); - - switch (le32_to_cpu(resp->sub_grp_cmd_id)) { - case TOF_RANGE_RESPONSE_NOTIF: - iwl_mvm_tof_range_resp(mvm, resp->data); - break; - case TOF_MCSI_DEBUG_NOTIF: - iwl_mvm_tof_mcsi_notif(mvm, resp->data); - break; - case TOF_NEIGHBOR_REPORT_RSP_NOTIF: - iwl_mvm_tof_nb_report_notif(mvm, resp->data); - break; - default: - IWL_ERR(mvm, "Unknown sub-group command 0x%x\n", - resp->sub_grp_cmd_id); - break; - } -} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h deleted file mode 100644 index 8138d0606c52..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h +++ /dev/null @@ -1,89 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __tof_h__ -#define __tof_h__ - -#include "fw/api/tof.h" - -struct iwl_mvm_tof_data { - struct iwl_tof_config_cmd tof_cfg; - struct iwl_tof_range_req_cmd range_req; - struct iwl_tof_range_req_ext_cmd range_req_ext; -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct iwl_tof_responder_config_cmd responder_cfg; -#endif - struct iwl_tof_range_rsp_ntfy range_resp; - u8 last_abort_id; - u16 active_range_request; -}; - -void iwl_mvm_tof_init(struct iwl_mvm *mvm); -void iwl_mvm_tof_clean(struct iwl_mvm *mvm); -int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm); -int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id); -int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -#ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -#endif -#endif /* __tof_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 995fe2a6abbb..ac62eb8c4b36 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -533,10 +533,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, /* * For data packets rate info comes from the fw. Only - * set rate/antenna during connection establishment. + * set rate/antenna during connection establishment or in case + * no station is given. */ - if (sta && (!ieee80211_is_data(hdr->frame_control) || - mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) { + if (!sta || !ieee80211_is_data(hdr->frame_control) || + mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { flags |= IWL_TX_FLAGS_CMD_RATE; rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, @@ -602,11 +603,12 @@ static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, } static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, - struct ieee80211_tx_info *info, __le16 fc) + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr) { - struct iwl_mvm_vif *mvmvif; - - mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(info->control.vif); + __le16 fc = hdr->frame_control; switch (info->control.vif->type) { case NL80211_IFTYPE_AP: @@ -625,7 +627,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, (!ieee80211_is_bufferable_mmpdu(fc) || ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) return mvm->probe_queue; - if (info->hw_queue == info->control.vif->cab_queue) + + if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) && + is_multicast_ether_addr(hdr->addr1)) return mvmvif->cab_queue; WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, @@ -634,8 +638,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) return mvm->p2p_dev_queue; - if (info->hw_queue == info->control.vif->cab_queue) - return mvmvif->cab_queue; WARN_ON_ONCE(1); return mvm->p2p_dev_queue; @@ -713,6 +715,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); __le16 fc = hdr->frame_control; + bool offchannel = IEEE80211_SKB_CB(skb)->flags & + IEEE80211_TX_CTL_TX_OFFCHAN; int queue = -1; memcpy(&info, skb->cb, sizeof(info)); @@ -720,11 +724,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) return -1; - if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && - (!info.control.vif || - info.hw_queue != info.control.vif->cab_queue))) - return -1; - if (info.control.vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info.control.vif); @@ -737,14 +736,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) else sta_id = mvmvif->mcast_sta.sta_id; - queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, - hdr->frame_control); - + queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr); } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->snif_queue; sta_id = mvm->snif_sta.sta_id; } else if (info.control.vif->type == NL80211_IFTYPE_STATION && - info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) { + offchannel) { /* * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets * that can be used in 2 different types of vifs, P2P & @@ -758,8 +755,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) } } - if (queue < 0) + if (queue < 0) { + IWL_ERR(mvm, "No queue was found. Dropping TX\n"); return -1; + } if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); @@ -781,6 +780,35 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return 0; } +unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, unsigned int tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; + u8 ac = tid_to_mac80211_ac[tid]; + unsigned int txf; + int lmac = IWL_LMAC_24G_INDEX; + + if (iwl_mvm_is_cdb_supported(mvm) && + band == NL80211_BAND_5GHZ) + lmac = IWL_LMAC_5G_INDEX; + + /* For HE redirect to trigger based fifos */ + if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) + ac += 4; + + txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); + + /* + * Don't send an AMSDU that will be longer than the TXF. + * Add a security margin of 256 for the TX command + headers. + * We also want to have the start of the next packet inside the + * fifo to be able to send bursts. + */ + return min_t(unsigned int, mvmsta->max_amsdu_len, + mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); +} + #ifdef CONFIG_INET static int @@ -850,36 +878,6 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, return 0; } -static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - unsigned int tid) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; - u8 ac = tid_to_mac80211_ac[tid]; - unsigned int txf; - int lmac = IWL_LMAC_24G_INDEX; - - if (iwl_mvm_is_cdb_supported(mvm) && - band == NL80211_BAND_5GHZ) - lmac = IWL_LMAC_5G_INDEX; - - /* For HE redirect to trigger based fifos */ - if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) - ac += 4; - - txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); - - /* - * Don't send an AMSDU that will be longer than the TXF. - * Add a security margin of 256 for the TX command + headers. - * We also want to have the start of the next packet inside the - * fifo to be able to send bursts. - */ - return min_t(unsigned int, mvmsta->max_amsdu_len, - mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); -} - static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, @@ -1002,34 +1000,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, } #endif -static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta, u8 tid, - struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - u8 mac_queue = info->hw_queue; - struct sk_buff_head *deferred_tx_frames; - - lockdep_assert_held(&mvm_sta->lock); - - mvm_sta->deferred_traffic_tid_map |= BIT(tid); - set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames); - - deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames; - - skb_queue_tail(deferred_tx_frames, skb); - - /* - * The first deferred frame should've stopped the MAC queues, so we - * should never get a second deferred frame for the RA/TID. - * In case of GSO the first packet may have been split, so don't warn. - */ - if (skb_queue_len(deferred_tx_frames) == 1) { - iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue)); - schedule_work(&mvm->add_stream_wk); - } -} - /* Check if there are any timed-out TIDs on a given shared TXQ */ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) { @@ -1054,7 +1024,12 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, int airtime) { int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; - struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + struct iwl_mvm_tcm_mac *mdata; + + if (mac >= NUM_MAC_INDEX_DRIVER) + return; + + mdata = &mvm->tcm.data[mac]; if (mvm->tcm.paused) return; @@ -1065,14 +1040,21 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, mdata->tx.airtime += airtime; } -static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvmsta, int tid) +static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, int tid) { u32 ac = tid_to_mac80211_ac[tid]; int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; - struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + struct iwl_mvm_tcm_mac *mdata; + + if (mac >= NUM_MAC_INDEX_DRIVER) + return -EINVAL; + + mdata = &mvm->tcm.data[mac]; mdata->tx.pkts[ac]++; + + return 0; } /* @@ -1088,7 +1070,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; - u16 txq_id = info->hw_queue; + u16 txq_id; bool is_ampdu = false; int hdrlen; @@ -1152,14 +1134,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); - /* Check if TXQ needs to be allocated or re-activated */ - if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) { - iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); - - /* - * The frame is now deferred, and the worker scheduled - * will re-allocate it, so we can free it for now. - */ + if (WARN_ON_ONCE(txq_id == IWL_MVM_INVALID_QUEUE)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); return 0; @@ -1199,7 +1174,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid); + if (iwl_mvm_tx_pkt_queued(mvm, mvmsta, + tid == IWL_MAX_TID_COUNT ? 0 : tid)) + goto drop; return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index d116c6ae18ff..211c4638d690 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -248,7 +248,7 @@ void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", le16_to_cpu(err_resp->bad_cmd_seq_num), le32_to_cpu(err_resp->error_service)); - IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", + IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n", le64_to_cpu(err_resp->timestamp)); } @@ -463,6 +463,9 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table, sizeof(table)); + if (table.valid) + mvm->fwrt.dump.umac_err_id = table.error_id; + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", @@ -486,11 +489,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); } -static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) +static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num) { struct iwl_trans *trans = mvm->trans; struct iwl_error_event_table table; - u32 val; + u32 val, base = mvm->error_event_table[lmac_num]; if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) { if (!base) @@ -541,7 +544,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); if (table.valid) - mvm->fwrt.dump.rt_status = table.error_id; + mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id; if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); @@ -598,10 +601,10 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) return; } - iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]); + iwl_mvm_dump_lmac_error_log(mvm, 0); if (mvm->error_event_table[1]) - iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]); + iwl_mvm_dump_lmac_error_log(mvm, 1); iwl_mvm_dump_umac_error_log(mvm); } @@ -1133,19 +1136,14 @@ static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk) "AP isn't using AMPDU with uAPSD enabled"); } -static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) +static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - int *mac_id = data; if (vif->type != NL80211_IFTYPE_STATION) return; - if (mvmvif->id != *mac_id) - return; - if (!vif->bss_conf.assoc) return; @@ -1155,10 +1153,10 @@ static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac, !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) return; - if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected) + if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected) return; - mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true; + mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true; IWL_INFO(mvm, "detected AP should do aggregation but isn't, likely due to U-APSD\n"); schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ); @@ -1171,6 +1169,7 @@ static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm, u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes; u64 tpt; unsigned long rate; + struct ieee80211_vif *vif; rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate); @@ -1199,9 +1198,11 @@ static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm, return; } - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_uapsd_agg_disconnect_iter, &mac); + rcu_read_lock(); + vif = rcu_dereference(mvm->vif_id_to_mac[mac]); + if (vif) + iwl_mvm_uapsd_agg_disconnect(mvm, vif); + rcu_read_unlock(); } static void iwl_mvm_tcm_iterator(void *_data, u8 *mac, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 353581ccc01e..a22e47639a4e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -513,10 +513,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ - {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -531,17 +531,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -556,20 +556,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)}, @@ -593,24 +594,26 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -628,17 +631,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -656,17 +659,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)}, @@ -684,18 +687,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_160_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_160_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_160_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, @@ -710,18 +713,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -739,17 +742,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -767,19 +770,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -805,18 +808,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -834,17 +837,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -862,41 +865,88 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)}, /* 22000 Series */ - {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x06F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22560_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0074, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x007C, iwl22560_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x4070, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl22560_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)}, - {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22560_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl22560_2ax_cfg_hr)}, + + {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)}, + + {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)}, + {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)}, #endif /* CONFIG_IWLMVM */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index d6fc6ce73e0a..0d16bcc3141f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -1,13 +1,15 @@ /****************************************************************************** * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,12 +20,46 @@ * more details. * * The full GNU General Public License is included in this distribution in the - * file called LICENSE. + * file called COPYING. * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * + * BSD LICENSE + * + * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ #ifndef __iwl_trans_int_pcie_h__ #define __iwl_trans_int_pcie_h__ @@ -1029,8 +1065,6 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); -void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); - void iwl_pcie_rx_allocator_work(struct work_struct *data); /* common functions that are used by gen2 transport */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 9e850c25877b..c260d1251b5f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1,13 +1,15 @@ /****************************************************************************** * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,12 +20,46 @@ * more details. * * The full GNU General Public License is included in this distribution in the - * file called LICENSE. + * file called COPYING. * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * + * BSD LICENSE + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ #include <linux/sched.h> #include <linux/wait.h> @@ -256,6 +292,9 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans, bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); } + + IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", + (u32)rxb->vid, rxq->id, rxq->write); } /* @@ -860,30 +899,6 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } -void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable) -{ - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000) - return; - - if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP) - return; - - if (!trans->cfg->integrated) - return; - - /* - * Turn on the chicken-bits that cause MAC wakeup for RX-related - * values. - * This costs some power, but needed for W/A 9000 integrated A-step - * bug where shadow registers are not in the retention list and their - * value is lost when NIC powers down - */ - iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, - CSR_MAC_SHADOW_REG_CTRL_RX_WAKE); - iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2, - CSR_MAC_SHADOW_REG_CTL2_RX_WAKE); -} - static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -971,8 +986,6 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); - - iwl_pcie_enable_rx_wake(trans, true); } void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) @@ -1360,6 +1373,8 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, if (rxb->invalid) goto out_err; + IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE; @@ -1411,11 +1426,12 @@ restart: emergency = true; } + IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); + rxb = iwl_pcie_get_rxb(trans, rxq, i); if (!rxb) goto out; - IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); i = (i + 1) & (rxq->queue_size - 1); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index f97aea5ffc44..f74281508197 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1530,8 +1530,6 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, iwl_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_init_done)); - iwl_pcie_enable_rx_wake(trans, false); - if (reset) { /* * reset TX queues -- some of their registers reset during S3 @@ -1558,8 +1556,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } - iwl_pcie_enable_rx_wake(trans, true); - iwl_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); iwl_set_bit(trans, CSR_GP_CNTRL, @@ -1968,7 +1964,7 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk) struct iwl_trans_pcie_removal *removal = container_of(wk, struct iwl_trans_pcie_removal, work); struct pci_dev *pdev = removal->pdev; - char *prop[] = {"EVENT=INACCESSIBLE", NULL}; + static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; dev_err(&pdev->dev, "Device gone - attempting removal\n"); kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); @@ -3118,7 +3114,7 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, return len; } -static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len) +static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) { if (trans->num_blocks) { *len += sizeof(struct iwl_fw_error_dump_data) + @@ -3173,8 +3169,7 @@ static struct iwl_trans_dump_data struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; - u32 len, num_rbs = 0; - u32 monitor_len; + u32 len, num_rbs = 0, monitor_len = 0; int i, ptr; bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && !trans->cfg->mq_rx_supported && @@ -3191,19 +3186,8 @@ static struct iwl_trans_dump_data cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); /* FW monitor */ - monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); - - if (dump_mask == BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) { - dump_data = vzalloc(len); - if (!dump_data) - return NULL; - - data = (void *)dump_data->data; - len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); - dump_data->len = len; - - return dump_data; - } + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) + monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); /* CSR registers */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) @@ -3569,24 +3553,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } - /* - * 9000-series integrated A-step has a problem with suspend/resume - * and sometimes even causes the whole platform to get stuck. This - * workaround makes the hardware not go into the problematic state. - */ - if (trans->cfg->integrated && - trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && - CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP) - iwl_set_bit(trans, CSR_HOST_CHICKEN, - CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME); + IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); #if IS_ENABLED(CONFIG_IWLMVM) trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); - if (cfg == &iwl22000_2ax_cfg_hr) { + if (cfg == &iwl22560_2ax_cfg_hr) { if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { - trans->cfg = &iwl22000_2ax_cfg_hr; + trans->cfg = &iwl22560_2ax_cfg_hr; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { trans->cfg = &iwl22000_2ax_cfg_jf; @@ -3602,7 +3577,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_no_pci; } } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && + (trans->cfg != &iwl22260_2ax_cfg || + trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { u32 hw_status; hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 156ca1b1f621..f3d2e8fe920b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -214,7 +214,11 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); - struct iwl_tfh_tb *tb = &tfd->tbs[idx]; + struct iwl_tfh_tb *tb; + + if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) + return -EINVAL; + tb = &tfd->tbs[idx]; /* Each TFD can point to a maximum max_tbs Tx buffers */ if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) { @@ -408,7 +412,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, goto out_err; /* building the A-MSDU might have changed this data, memcpy it now */ - memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); return tfd; out_err: @@ -469,7 +473,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); /* The first TB points to bi-directional DMA data */ - memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); @@ -834,14 +838,14 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); - memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); + memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size); iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx), tb0_size); /* map first command fragment, if any remains */ if (copy_size > tb0_size) { phys_addr = dma_map_single(trans->dev, - ((u8 *)&out_cmd->hdr) + tb0_size, + (u8 *)out_cmd + tb0_size, copy_size - tb0_size, DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index ee990a7a5411..07395502f419 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,13 +1,15 @@ /****************************************************************************** * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. - * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,12 +20,46 @@ * more details. * * The full GNU General Public License is included in this distribution in the - * file called LICENSE. + * file called COPYING. * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * + * BSD LICENSE + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ #include <linux/etherdevice.h> #include <linux/ieee80211.h> @@ -2438,8 +2474,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* building the A-MSDU might have changed this data, so memcpy it now */ - memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, - IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ diff --git a/drivers/net/wireless/marvell/libertas/debugfs.c b/drivers/net/wireless/marvell/libertas/debugfs.c index c83f44f9ddf1..fe14814af300 100644 --- a/drivers/net/wireless/marvell/libertas/debugfs.c +++ b/drivers/net/wireless/marvell/libertas/debugfs.c @@ -708,8 +708,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) goto exit; priv->debugfs_dir = debugfs_create_dir(dev->name, lbs_dir); - if (!priv->debugfs_dir) - goto exit; for (i=0; i<ARRAY_SIZE(debugfs_files); i++) { files = &debugfs_files[i]; @@ -721,8 +719,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) } priv->events_dir = debugfs_create_dir("subscribed_events", priv->debugfs_dir); - if (!priv->events_dir) - goto exit; for (i=0; i<ARRAY_SIZE(debugfs_events_files); i++) { files = &debugfs_events_files[i]; @@ -734,8 +730,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) } priv->regs_dir = debugfs_create_dir("registers", priv->debugfs_dir); - if (!priv->regs_dir) - goto exit; for (i=0; i<ARRAY_SIZE(debugfs_regs_files); i++) { files = &debugfs_regs_files[i]; diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c index b0cb16ef8d1d..2315fdff56c2 100644 --- a/drivers/net/wireless/marvell/libertas/mesh.c +++ b/drivers/net/wireless/marvell/libertas/mesh.c @@ -797,7 +797,12 @@ static void lbs_persist_config_init(struct net_device *dev) { int ret; ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group); + if (ret) + pr_err("failed to create boot_opts_group.\n"); + ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group); + if (ret) + pr_err("failed to create mesh_ie_group.\n"); } static void lbs_persist_config_remove(struct net_device *dev) diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c index 1d45da187b9b..a7cb7d06e5e6 100644 --- a/drivers/net/wireless/marvell/libertas_tf/main.c +++ b/drivers/net/wireless/marvell/libertas_tf/main.c @@ -676,7 +676,7 @@ int lbtf_remove_card(struct lbtf_private *priv) ieee80211_unregister_hw(hw); ieee80211_free_hw(hw); - lbtf_deb_leave(LBTF_DEB_MAIN); + lbtf_deb_leave(LBTF_DEB_MAIN); return 0; } EXPORT_SYMBOL_GPL(lbtf_remove_card); diff --git a/drivers/net/wireless/marvell/mwifiex/Kconfig b/drivers/net/wireless/marvell/mwifiex/Kconfig index 279167ddd293..524fd565cb2a 100644 --- a/drivers/net/wireless/marvell/mwifiex/Kconfig +++ b/drivers/net/wireless/marvell/mwifiex/Kconfig @@ -9,7 +9,7 @@ config MWIFIEX mwifiex. config MWIFIEX_SDIO - tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997" + tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997" depends on MWIFIEX && MMC select FW_LOADER select WANT_DEV_COREDUMP diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index cbe4493b3266..8ab114cf3467 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -922,9 +922,8 @@ mwifiex_reset_write(struct file *file, } #define MWIFIEX_DFS_ADD_FILE(name) do { \ - if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir, \ - priv, &mwifiex_dfs_##name##_fops)) \ - return; \ + debugfs_create_file(#name, 0644, priv->dfs_dev_dir, priv, \ + &mwifiex_dfs_##name##_fops); \ } while (0); #define MWIFIEX_DFS_FILE_OPS(name) \ diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index d49fbd58afa7..a85648342d15 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -489,6 +489,8 @@ static void mwifiex_sdio_coredump(struct device *dev) #define SDIO_DEVICE_ID_MARVELL_8887 (0x9135) /* Device ID for SD8801 */ #define SDIO_DEVICE_ID_MARVELL_8801 (0x9139) +/* Device ID for SD8977 */ +#define SDIO_DEVICE_ID_MARVELL_8977 (0x9145) /* Device ID for SD8997 */ #define SDIO_DEVICE_ID_MARVELL_8997 (0x9141) @@ -507,6 +509,8 @@ static const struct sdio_device_id mwifiex_ids[] = { .driver_data = (unsigned long)&mwifiex_sdio_sd8887}, {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801), .driver_data = (unsigned long)&mwifiex_sdio_sd8801}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977), + .driver_data = (unsigned long)&mwifiex_sdio_sd8977}, {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997), .driver_data = (unsigned long)&mwifiex_sdio_sd8997}, {}, @@ -2726,4 +2730,5 @@ MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME); +MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index dccf7fd1aef3..912de2cde8d9 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -36,6 +36,7 @@ #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin" #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin" #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin" +#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin" #define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin" #define BLOCK_MODE 1 @@ -371,6 +372,59 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = { 0x59, 0x5c, 0x5d}, }; +static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8977 = { + .start_rd_port = 0, + .start_wr_port = 0, + .base_0_reg = 0xF8, + .base_1_reg = 0xF9, + .poll_reg = 0x5C, + .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK | + CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK, + .host_int_rsr_reg = 0x4, + .host_int_status_reg = 0x0C, + .host_int_mask_reg = 0x08, + .status_reg_0 = 0xE8, + .status_reg_1 = 0xE9, + .sdio_int_mask = 0xff, + .data_port_mask = 0xffffffff, + .io_port_0_reg = 0xE4, + .io_port_1_reg = 0xE5, + .io_port_2_reg = 0xE6, + .max_mp_regs = 196, + .rd_bitmap_l = 0x10, + .rd_bitmap_u = 0x11, + .rd_bitmap_1l = 0x12, + .rd_bitmap_1u = 0x13, + .wr_bitmap_l = 0x14, + .wr_bitmap_u = 0x15, + .wr_bitmap_1l = 0x16, + .wr_bitmap_1u = 0x17, + .rd_len_p0_l = 0x18, + .rd_len_p0_u = 0x19, + .card_misc_cfg_reg = 0xd8, + .card_cfg_2_1_reg = 0xd9, + .cmd_rd_len_0 = 0xc0, + .cmd_rd_len_1 = 0xc1, + .cmd_rd_len_2 = 0xc2, + .cmd_rd_len_3 = 0xc3, + .cmd_cfg_0 = 0xc4, + .cmd_cfg_1 = 0xc5, + .cmd_cfg_2 = 0xc6, + .cmd_cfg_3 = 0xc7, + .fw_dump_host_ready = 0xcc, + .fw_dump_ctrl = 0xf0, + .fw_dump_start = 0xf1, + .fw_dump_end = 0xf8, + .func1_dump_reg_start = 0x10, + .func1_dump_reg_end = 0x17, + .func1_scratch_reg = 0xe8, + .func1_spec_reg_num = 13, + .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, + 0x60, 0x61, 0x62, 0x64, + 0x65, 0x66, 0x68, 0x69, + 0x6a}, +}; + static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = { .start_rd_port = 0, .start_wr_port = 0, @@ -532,6 +586,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { .can_ext_scan = true, }; +static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = { + .firmware = SD8977_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd8977, + .max_ports = 32, + .mp_agg_pkt_limit = 16, + .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, + .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, + .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, + .supports_sdio_new_mode = true, + .has_control_mask = false, + .can_dump_fw = true, + .fw_dump_enh = true, + .can_auto_tdls = false, + .can_ext_scan = true, +}; + static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = { .firmware = SD8997_DEFAULT_FW_NAME, .reg = &mwifiex_reg_sd8997, diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index e2ba26378575..e769c8a555dd 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -300,7 +300,7 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, if (q->queued + (n + 1) / 2 >= q->ndesc - 1) goto unmap; - return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t); + return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t); unmap: ret = -ENOMEM; @@ -318,7 +318,7 @@ free: EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb); static int -mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi) +mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) { dma_addr_t addr; void *buf; @@ -392,7 +392,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) mt76_dma_rx_cleanup(dev, q); mt76_dma_sync_idx(dev, q); - mt76_dma_rx_fill(dev, q, false); + mt76_dma_rx_fill(dev, q); } static void @@ -417,10 +417,9 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, static int mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) { + int len, data_len, done = 0; struct sk_buff *skb; unsigned char *data; - int len; - int done = 0; bool more; while (done < budget) { @@ -430,6 +429,19 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) if (!data) break; + if (q->rx_head) + data_len = q->buf_size; + else + data_len = SKB_WITH_OVERHEAD(q->buf_size); + + if (data_len < len + q->buf_offset) { + dev_kfree_skb(q->rx_head); + q->rx_head = NULL; + + skb_free_frag(data); + continue; + } + if (q->rx_head) { mt76_add_fragment(dev, q, data, len, more); continue; @@ -440,12 +452,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) skb_free_frag(data); continue; } - skb_reserve(skb, q->buf_offset); - if (skb->tail + len > skb->end) { - dev_kfree_skb(skb); - continue; - } if (q == &dev->q_rx[MT_RXQ_MCU]) { u32 *rxfce = (u32 *) skb->cb; @@ -463,7 +470,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) dev->drv->rx_skb(dev, q - dev->q_rx, skb); } - mt76_dma_rx_fill(dev, q, true); + mt76_dma_rx_fill(dev, q); return done; } @@ -504,7 +511,7 @@ mt76_dma_init(struct mt76_dev *dev) for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, 64); - mt76_dma_rx_fill(dev, &dev->q_rx[i], false); + mt76_dma_rx_fill(dev, &dev->q_rx[i]); skb_queue_head_init(&dev->rx_skb[i]); napi_enable(&dev->napi[i]); } diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 7b926dfa6b97..ee3b65a14870 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -328,6 +328,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, AP_LINK_PS); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -547,7 +548,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb) } static void -mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb) +mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; @@ -566,6 +567,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb) sta = container_of((void *) wcid, struct ieee80211_sta, drv_priv); + if (status->signal <= 0) + ewma_signal_add(&wcid->rssi, -status->signal); + + wcid->inactive_count = 0; + if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags)) return; @@ -625,7 +631,7 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, __skb_queue_head_init(&frames); while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) { - mt76_check_ps(dev, skb); + mt76_check_sta(dev, skb); mt76_rx_aggr_reorder(skb, &frames); } @@ -659,6 +665,7 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif, mt76_txq_init(dev, sta->txq[i]); } + ewma_signal_init(&wcid->rssi); rcu_assign_pointer(dev->wcid[wcid->idx], wcid); out: @@ -709,3 +716,60 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return 0; } EXPORT_SYMBOL_GPL(mt76_sta_state); + +int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int *dbm) +{ + struct mt76_dev *dev = hw->priv; + int n_chains = __sw_hweight8(dev->antenna_mask); + + *dbm = dev->txpower_cur / 2; + + /* convert from per-chain power to combined + * output on 2x2 devices + */ + if (n_chains > 1) + *dbm += 3; + + return 0; +} +EXPORT_SYMBOL_GPL(mt76_get_txpower); + +static void +__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + if (vif->csa_active && ieee80211_csa_is_complete(vif)) + ieee80211_csa_finish(vif); +} + +void mt76_csa_finish(struct mt76_dev *dev) +{ + if (!dev->csa_complete) + return; + + ieee80211_iterate_active_interfaces_atomic(dev->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + __mt76_csa_finish, dev); + + dev->csa_complete = 0; +} +EXPORT_SYMBOL_GPL(mt76_csa_finish); + +static void +__mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt76_dev *dev = priv; + + if (!vif->csa_active) + return; + + dev->csa_complete |= ieee80211_csa_is_complete(vif); +} + +void mt76_csa_check(struct mt76_dev *dev) +{ + ieee80211_iterate_active_interfaces_atomic(dev->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + __mt76_csa_check, dev); +} +EXPORT_SYMBOL_GPL(mt76_csa_check); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 5cd508a68609..2bb9db4ed80a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -23,6 +23,7 @@ #include <linux/skbuff.h> #include <linux/leds.h> #include <linux/usb.h> +#include <linux/average.h> #include <net/mac80211.h> #include "util.h" @@ -174,6 +175,8 @@ enum mt76_wcid_flags { #define MT76_N_WCIDS 128 +DECLARE_EWMA(signal, 10, 8); + struct mt76_wcid { struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; @@ -181,6 +184,9 @@ struct mt76_wcid { unsigned long flags; + struct ewma_signal rssi; + int inactive_count; + u8 idx; u8 hw_key_idx; @@ -239,7 +245,9 @@ struct mt76_rx_tid { #define MT_TX_CB_TXS_FAILED BIT(2) #define MT_PACKET_ID_MASK GENMASK(7, 0) -#define MT_PACKET_ID_NO_ACK MT_PACKET_ID_MASK +#define MT_PACKET_ID_NO_ACK 0 +#define MT_PACKET_ID_NO_SKB 1 +#define MT_PACKET_ID_FIRST 2 #define MT_TX_STATUS_SKB_TIMEOUT HZ @@ -421,6 +429,7 @@ struct mt76_dev { struct mt76_queue q_tx[__MT_TXQ_MAX]; struct mt76_queue q_rx[__MT_RXQ_MAX]; const struct mt76_queue_ops *queue_ops; + int tx_dma_idx[4]; wait_queue_head_t tx_wait; struct sk_buff_head status_list; @@ -454,6 +463,8 @@ struct mt76_dev { bool led_al; u8 led_pin; + u8 csa_complete; + u32 rxfilter; union { @@ -488,7 +499,7 @@ struct mt76_rx_status { u8 rate_idx; u8 nss; u8 band; - u8 signal; + s8 signal; u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; }; @@ -677,6 +688,14 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); +int mt76_get_min_avg_rssi(struct mt76_dev *dev); + +int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int *dbm); + +void mt76_csa_check(struct mt76_dev *dev); +void mt76_csa_finish(struct mt76_dev *dev); + /* internal */ void mt76_tx_free(struct mt76_dev *dev); struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 497e762978cc..b2cabce1d74d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -212,24 +212,24 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) mt76x02_add_rate_power_offset(t, delta); } -void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) +void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp) { struct mt76x0_chan_map { u8 chan; u8 offset; } chan_map[] = { - { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 }, - { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 }, - { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 }, - { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 }, - { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 }, - { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 }, - { 167, 17 }, { 171, 18 }, { 173, 19 }, + { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 }, + { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 }, + { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 }, + { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 }, + { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 }, + { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 }, + { 167, 34 }, { 171, 36 }, { 175, 38 }, }; struct ieee80211_channel *chan = dev->mt76.chandef.chan; u8 offset, addr; + int i, idx = 0; u16 data; - int i; if (mt76x0_tssi_enabled(dev)) { s8 target_power; @@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) else data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; - info[0] = target_power + mt76x0_get_delta(dev); - info[1] = 0; + *tp = target_power + mt76x0_get_delta(dev); return; } for (i = 0; i < ARRAY_SIZE(chan_map); i++) { - if (chan_map[i].chan <= chan->hw_value) { + if (chan->hw_value <= chan_map[i].chan) { + idx = (chan->hw_value == chan_map[i].chan); offset = chan_map[i].offset; break; } @@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) addr = MT_EE_TX_POWER_DELTA_BW80 + offset; } else { switch (chan->hw_value) { + case 42: + offset = 2; + break; case 58: offset = 8; break; case 106: offset = 14; break; - case 112: + case 122: offset = 20; break; case 155: @@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) } data = mt76x02_eeprom_get(dev, addr); - - info[0] = data; - if (!info[0] || info[0] > 0x3f) - info[0] = 5; - - info[1] = data >> 8; - if (!info[1] || info[1] > 0x3f) - info[1] = 5; + *tp = data >> (8 * idx); + if (*tp < 0 || *tp > 0x3f) + *tp = 5; } static int mt76x0_check_eeprom(struct mt76x02_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h index ee9ade9f3c8b..42b259f90b6d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h @@ -26,7 +26,7 @@ struct mt76x02_dev; int mt76x0_eeprom_init(struct mt76x02_dev *dev); void mt76x0_read_rx_gain(struct mt76x02_dev *dev); void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); -void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); +void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp); static inline s8 s6_to_s8(u32 val) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h index a1657922758e..0290ba5869a5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h @@ -88,6 +88,7 @@ static const struct mt76_reg_pair mt76x0_mac_reg_table[] = { { MT_TX_PROT_CFG6, 0xe3f42004 }, { MT_TX_PROT_CFG7, 0xe3f42084 }, { MT_TX_PROT_CFG8, 0xe3f42104 }, + { MT_VHT_HT_FBK_CFG1, 0xedcba980 }, }; static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index d895b6f3dc44..1472c8699b29 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -99,7 +99,7 @@ static const struct ieee80211_ops mt76x0e_ops = { .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .wake_tx_queue = mt76_wake_tx_queue, .get_survey = mt76_get_survey, - .get_txpower = mt76x02_get_txpower, + .get_txpower = mt76_get_txpower, .flush = mt76x0e_flush, .set_tim = mt76x0e_set_tim, .release_buffered_frames = mt76_release_buffered_frames, @@ -141,6 +141,15 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev) mt76_clear(dev, 0x110, BIT(9)); mt76_set(dev, MT_MAX_LEN_CFG, BIT(13)); + mt76_wr(dev, MT_CH_TIME_CFG, + MT_CH_TIME_CFG_TIMER_EN | + MT_CH_TIME_CFG_TX_AS_BUSY | + MT_CH_TIME_CFG_RX_AS_BUSY | + MT_CH_TIME_CFG_NAV_AS_BUSY | + MT_CH_TIME_CFG_EIFS_AS_BUSY | + MT_CH_CCA_RC_EN | + FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); + err = mt76x0_register_device(dev); if (err < 0) return err; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 1eb1a802ed20..1117cdc15b04 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -845,17 +845,17 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev) void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) { struct mt76_rate_power *t = &dev->mt76.rate_power; - u8 info[2]; + s8 info; mt76x0_get_tx_power_per_rate(dev); - mt76x0_get_power_info(dev, info); + mt76x0_get_power_info(dev, &info); - mt76x02_add_rate_power_offset(t, info[0]); + mt76x02_add_rate_power_offset(t, info); mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); - mt76x02_add_rate_power_offset(t, -info[0]); + mt76x02_add_rate_power_offset(t, -info); - mt76x02_phy_set_txpower(dev, info[0], info[1]); + mt76x02_phy_set_txpower(dev, info, info); } void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) @@ -1013,6 +1013,8 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev, mt76x0_phy_calibrate(dev, false); mt76x0_phy_set_txpower(dev); + mt76x02_edcca_init(dev); + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); @@ -1075,7 +1077,9 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev) u8 gain_delta; int low_gain; - dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); + dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76); + if (!dev->cal.avg_rssi_all) + dev->cal.avg_rssi_all = -75; low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 0e6b43bb4678..3987adaaf2bd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -155,42 +155,52 @@ static const struct ieee80211_ops mt76x0u_ops = { .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .set_rts_threshold = mt76x02_set_rts_threshold, .wake_tx_queue = mt76_wake_tx_queue, - .get_txpower = mt76x02_get_txpower, + .get_txpower = mt76_get_txpower, }; -static int mt76x0u_register_device(struct mt76x02_dev *dev) +static int mt76x0u_init_hardware(struct mt76x02_dev *dev) { - struct ieee80211_hw *hw = dev->mt76.hw; int err; - err = mt76u_alloc_queues(&dev->mt76); - if (err < 0) - goto out_err; - - err = mt76u_mcu_init_rx(&dev->mt76); - if (err < 0) - goto out_err; - mt76x0_chip_onoff(dev, true, true); - if (!mt76x02_wait_for_mac(&dev->mt76)) { - err = -ETIMEDOUT; - goto out_err; - } + + if (!mt76x02_wait_for_mac(&dev->mt76)) + return -ETIMEDOUT; err = mt76x0u_mcu_init(dev); if (err < 0) - goto out_err; + return err; mt76x0_init_usb_dma(dev); err = mt76x0_init_hardware(dev); if (err < 0) - goto out_err; + return err; mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); mt76_wr(dev, MT_TXOP_CTRL_CFG, FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); + return 0; +} + +static int mt76x0u_register_device(struct mt76x02_dev *dev) +{ + struct ieee80211_hw *hw = dev->mt76.hw; + int err; + + err = mt76u_alloc_queues(&dev->mt76); + if (err < 0) + goto out_err; + + err = mt76u_mcu_init_rx(&dev->mt76); + if (err < 0) + goto out_err; + + err = mt76x0u_init_hardware(dev); + if (err < 0) + goto out_err; + err = mt76x0_register_device(dev); if (err < 0) goto out_err; @@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, mt76u_stop_queues(&dev->mt76); mt76x0u_mac_stop(dev); + clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); + mt76x0_chip_onoff(dev, false, false); usb_kill_urb(usb->mcu.res.urb); return 0; @@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) tasklet_enable(&usb->rx_tasklet); tasklet_enable(&usb->tx_tasklet); - ret = mt76x0_init_hardware(dev); + ret = mt76x0u_init_hardware(dev); if (ret) goto err; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c index 9d7585029df9..f391d2d21fbc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c @@ -15,6 +15,7 @@ */ #include <linux/kernel.h> #include <linux/firmware.h> +#include <linux/module.h> #include "mt76x0.h" #include "mcu.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 6782665049dd..6d96766a6ed3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -15,8 +15,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef __MT76X02_UTIL_H -#define __MT76X02_UTIL_H +#ifndef __MT76x02_H +#define __MT76x02_H #include <linux/kfifo.h> @@ -28,6 +28,9 @@ #define MT_CALIBRATE_INTERVAL HZ +#define MT_WATCHDOG_TIME (HZ / 10) +#define MT_TX_HANG_TH 10 + #define MT_MAX_CHAINS 2 struct mt76x02_rx_freq_cal { s8 high_gain[MT_MAX_CHAINS]; @@ -79,6 +82,7 @@ struct mt76x02_dev { struct tasklet_struct pre_tbtt_tasklet; struct delayed_work cal_work; struct delayed_work mac_work; + struct delayed_work wdt_work; u32 aggr_stats[32]; @@ -89,6 +93,9 @@ struct mt76x02_dev { u8 tbtt_count; u16 beacon_int; + u32 tx_hang_reset; + u8 tx_hang_check; + struct mt76x02_calibration cal; s8 target_power; @@ -101,6 +108,12 @@ struct mt76x02_dev { u8 slottime; struct mt76x02_dfs_pattern_detector dfs_pd; + + /* edcca monitor */ + bool ed_tx_blocked; + bool ed_monitor; + u8 ed_trigger; + u8 ed_silent; }; extern struct ieee80211_rate mt76x02_rates[12]; @@ -136,6 +149,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, const struct ieee80211_tx_rate *rate); s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj); +void mt76x02_wdt_work(struct work_struct *work); void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); void mt76x02_set_tx_ackto(struct mt76x02_dev *dev); void mt76x02_set_coverage_class(struct ieee80211_hw *hw, @@ -158,8 +172,6 @@ void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac); void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -int mt76x02_get_txpower(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, int *dbm); void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); void mt76x02_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -224,4 +236,4 @@ mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast) return &sta->vif->group_wcid; } -#endif +#endif /* __MT76x02_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c index a9d52ba1e270..7580c5c986ff 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c @@ -133,5 +133,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev) read_txpower); debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc); + + debugfs_create_u32("tx_hang_reset", 0400, dir, &dev->tx_hang_reset); } EXPORT_SYMBOL_GPL(mt76x02_init_debugfs); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c index 054609c634a2..19fdcab746a0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c @@ -881,12 +881,18 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev, { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + mutex_lock(&dev->mt76.mutex); if (dfs_pd->region != region) { tasklet_disable(&dfs_pd->dfs_tasklet); + + dev->ed_monitor = region == NL80211_DFS_ETSI; + mt76x02_edcca_init(dev); + dfs_pd->region = region; mt76x02_dfs_init_params(dev); tasklet_enable(&dfs_pd->dfs_tasklet); } + mutex_unlock(&dev->mt76.mutex); } void mt76x02_regd_notifier(struct wiphy *wiphy, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index c08bf371e527..63fa27d2c404 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -130,10 +130,8 @@ static __le16 mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev, const struct ieee80211_tx_rate *rate, u8 *nss_val) { + u8 phy, rate_idx, nss, bw = 0; u16 rateval; - u8 phy, rate_idx; - u8 nss = 1; - u8 bw = 0; if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { rate_idx = rate->idx; @@ -164,7 +162,7 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev, phy = val >> 8; rate_idx = val & 0xff; - bw = 0; + nss = 1; } rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx); @@ -435,7 +433,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, } if (wcid) { - if (stat->pktid) + if (stat->pktid >= MT_PACKET_ID_FIRST) status.skb = mt76_tx_status_skb_get(mdev, wcid, stat->pktid, &list); if (status.skb) @@ -478,7 +476,9 @@ out: } static int -mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate) +mt76x02_mac_process_rate(struct mt76x02_dev *dev, + struct mt76_rx_status *status, + u16 rate) { u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); @@ -510,11 +510,15 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate) status->encoding = RX_ENC_HT; status->rate_idx = idx; break; - case MT_PHY_TYPE_VHT: + case MT_PHY_TYPE_VHT: { + u8 n_rxstream = dev->mt76.chainmask & 0xf; + status->encoding = RX_ENC_VHT; status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx); - status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1; + status->nss = min_t(u8, n_rxstream, + FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1); break; + } default: return -EINVAL; } @@ -644,7 +648,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, status->chains = BIT(0); signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0); - for (i = 1; i < nstreams; i++) { + for (i = 0; i < nstreams; i++) { status->chains |= BIT(i); status->chain_signal[i] = mt76x02_mac_get_rssi(dev, rxwi->rssi[i], @@ -658,12 +662,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); - if (sta) { - ewma_signal_add(&sta->rssi, status->signal); - sta->inactive_count = 0; - } - - return mt76x02_mac_process_rate(status, rate); + return mt76x02_mac_process_rate(dev, status, rate); } void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq) @@ -715,7 +714,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, } EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb); -void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val) +void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val) { u32 data = 0; @@ -729,20 +728,89 @@ void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val) MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); mt76_rmw(dev, MT_OFDM_PROT_CFG, MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_MM20_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_MM40_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_GF20_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_GF40_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_TX_PROT_CFG6, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_TX_PROT_CFG7, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_TX_PROT_CFG8, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); +} + +void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot, + int ht_mode) +{ + int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION; + bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + u32 prot[6]; + u32 vht_prot[3]; + int i; + u16 rts_thr; + + for (i = 0; i < ARRAY_SIZE(prot); i++) { + prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4); + prot[i] &= ~MT_PROT_CFG_CTRL; + if (i >= 2) + prot[i] &= ~MT_PROT_CFG_RATE; + } + + for (i = 0; i < ARRAY_SIZE(vht_prot); i++) { + vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4); + vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE); + } + + rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH); + + if (rts_thr != 0xffff) + prot[0] |= MT_PROT_CTRL_RTS_CTS; + + if (legacy_prot) { + prot[1] |= MT_PROT_CTRL_CTS2SELF; + + prot[2] |= MT_PROT_RATE_CCK_11; + prot[3] |= MT_PROT_RATE_CCK_11; + prot[4] |= MT_PROT_RATE_CCK_11; + prot[5] |= MT_PROT_RATE_CCK_11; + + vht_prot[0] |= MT_PROT_RATE_CCK_11; + vht_prot[1] |= MT_PROT_RATE_CCK_11; + vht_prot[2] |= MT_PROT_RATE_CCK_11; + } else { + if (rts_thr != 0xffff) + prot[1] |= MT_PROT_CTRL_RTS_CTS; + + prot[2] |= MT_PROT_RATE_OFDM_24; + prot[3] |= MT_PROT_RATE_DUP_OFDM_24; + prot[4] |= MT_PROT_RATE_OFDM_24; + prot[5] |= MT_PROT_RATE_DUP_OFDM_24; + + vht_prot[0] |= MT_PROT_RATE_OFDM_24; + vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24; + vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24; + } + + switch (mode) { + case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: + prot[2] |= MT_PROT_CTRL_RTS_CTS; + prot[3] |= MT_PROT_CTRL_RTS_CTS; + prot[4] |= MT_PROT_CTRL_RTS_CTS; + prot[5] |= MT_PROT_CTRL_RTS_CTS; + vht_prot[0] |= MT_PROT_CTRL_RTS_CTS; + vht_prot[1] |= MT_PROT_CTRL_RTS_CTS; + vht_prot[2] |= MT_PROT_CTRL_RTS_CTS; + break; + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: + prot[3] |= MT_PROT_CTRL_RTS_CTS; + prot[5] |= MT_PROT_CTRL_RTS_CTS; + vht_prot[1] |= MT_PROT_CTRL_RTS_CTS; + vht_prot[2] |= MT_PROT_CTRL_RTS_CTS; + break; + } + + if (non_gf) { + prot[4] |= MT_PROT_CTRL_RTS_CTS; + prot[5] |= MT_PROT_CTRL_RTS_CTS; + } + + for (i = 0; i < ARRAY_SIZE(prot); i++) + mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]); + + for (i = 0; i < ARRAY_SIZE(vht_prot); i++) + mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]); } void mt76x02_update_channel(struct mt76_dev *mdev) @@ -774,8 +842,90 @@ static void mt76x02_check_mac_err(struct mt76x02_dev *dev) mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); udelay(10); - mt76_clear(dev, MT_MAC_SYS_CTRL, - MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); +} + +static void +mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable) +{ + if (enable) { + u32 data; + + mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); + mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN); + /* enable pa-lna */ + data = mt76_rr(dev, MT_TX_PIN_CFG); + data |= MT_TX_PIN_CFG_TXANT | + MT_TX_PIN_CFG_RXANT | + MT_TX_PIN_RFTR_EN | + MT_TX_PIN_TRSW_EN; + mt76_wr(dev, MT_TX_PIN_CFG, data); + } else { + mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); + mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN); + /* disable pa-lna */ + mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT); + mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT); + } + dev->ed_tx_blocked = !enable; +} + +void mt76x02_edcca_init(struct mt76x02_dev *dev) +{ + dev->ed_trigger = 0; + dev->ed_silent = 0; + + if (dev->ed_monitor) { + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20; + + mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN); + mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); + mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0), + ed_th << 8 | ed_th); + if (!is_mt76x2(dev)) + mt76_set(dev, MT_TXOP_HLDR_ET, + MT_TXOP_HLDR_TX40M_BLK_EN); + } else { + mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN); + mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); + if (is_mt76x2(dev)) { + mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070); + } else { + mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464); + mt76_clear(dev, MT_TXOP_HLDR_ET, + MT_TXOP_HLDR_TX40M_BLK_EN); + } + } + mt76x02_edcca_tx_enable(dev, true); +} +EXPORT_SYMBOL_GPL(mt76x02_edcca_init); + +#define MT_EDCCA_TH 90 +#define MT_EDCCA_BLOCK_TH 2 +static void mt76x02_edcca_check(struct mt76x02_dev *dev) +{ + u32 val, busy; + + val = mt76_rr(dev, MT_ED_CCA_TIMER); + busy = (val * 100) / jiffies_to_usecs(MT_CALIBRATE_INTERVAL); + busy = min_t(u32, busy, 100); + + if (busy > MT_EDCCA_TH) { + dev->ed_trigger++; + dev->ed_silent = 0; + } else { + dev->ed_silent++; + dev->ed_trigger = 0; + } + + if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && + !dev->ed_tx_blocked) + mt76x02_edcca_tx_enable(dev, false); + else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && + dev->ed_tx_blocked) + mt76x02_edcca_tx_enable(dev, true); } void mt76x02_mac_work(struct work_struct *work) @@ -784,6 +934,8 @@ void mt76x02_mac_work(struct work_struct *work) mac_work.work); int i, idx; + mutex_lock(&dev->mt76.mutex); + mt76x02_update_channel(&dev->mt76); for (i = 0, idx = 0; i < 16; i++) { u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i)); @@ -792,10 +944,14 @@ void mt76x02_mac_work(struct work_struct *work) dev->aggr_stats[idx++] += val >> 16; } - /* XXX: check beacon stuck for ap mode */ if (!dev->beacon_mask) mt76x02_check_mac_err(dev); + if (dev->ed_monitor) + mt76x02_edcca_check(dev); + + mutex_unlock(&dev->mt76.mutex); + mt76_tx_status_check(&dev->mt76, NULL, false); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h index 4e597004c445..940c07f665cd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h @@ -18,8 +18,6 @@ #ifndef __MT76X02_MAC_H #define __MT76X02_MAC_H -#include <linux/average.h> - struct mt76x02_dev; struct mt76x02_tx_status { @@ -41,8 +39,6 @@ struct mt76x02_vif { u8 idx; }; -DECLARE_EWMA(signal, 10, 8); - struct mt76x02_sta { struct mt76_wcid wcid; /* must be first */ @@ -50,8 +46,6 @@ struct mt76x02_sta { struct mt76x02_tx_status status; int n_frames; - struct ewma_signal rssi; - int inactive_count; }; #define MT_RXINFO_BA BIT(0) @@ -194,7 +188,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, struct mt76x02_tx_status *stat, u8 *update); int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, void *rxi); -void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val); +void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot, + int ht_mode); +void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val); void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr); void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, @@ -210,4 +206,6 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, struct sk_buff *skb); void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, bool val); + +void mt76x02_edcca_init(struct mt76x02_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 66315410aebe..374bc9d91f12 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -116,14 +116,20 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg) IEEE80211_IFACE_ITER_RESUME_ALL, mt76x02_update_beacon_iter, dev); + mt76_csa_check(&dev->mt76); + + if (dev->mt76.csa_complete) + return; + do { nframes = skb_queue_len(&data.q); ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), IEEE80211_IFACE_ITER_RESUME_ALL, mt76x02_add_buffered_bc, &data); - } while (nframes != skb_queue_len(&data.q)); + } while (nframes != skb_queue_len(&data.q) && + skb_queue_len(&data.q) < 8); - if (!nframes) + if (!skb_queue_len(&data.q)) return; for (i = 0; i < ARRAY_SIZE(data.tail); i++) { @@ -308,8 +314,12 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance) tasklet_schedule(&dev->pre_tbtt_tasklet); /* send buffered multicast frames now */ - if (intr & MT_INT_TBTT) - mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); + if (intr & MT_INT_TBTT) { + if (dev->mt76.csa_complete) + mt76_csa_finish(&dev->mt76); + else + mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); + } if (intr & MT_INT_TX_STAT) { mt76x02_mac_poll_tx_status(dev, true); @@ -384,3 +394,127 @@ void mt76x02_mac_start(struct mt76x02_dev *dev) MT_INT_TX_STAT); } EXPORT_SYMBOL_GPL(mt76x02_mac_start); + +static bool mt76x02_tx_hang(struct mt76x02_dev *dev) +{ + u32 dma_idx, prev_dma_idx; + struct mt76_queue *q; + int i; + + for (i = 0; i < 4; i++) { + q = &dev->mt76.q_tx[i]; + + if (!q->queued) + continue; + + prev_dma_idx = dev->mt76.tx_dma_idx[i]; + dma_idx = ioread32(&q->regs->dma_idx); + dev->mt76.tx_dma_idx[i] = dma_idx; + + if (prev_dma_idx == dma_idx) + break; + } + + return i < 4; +} + +static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) +{ + u32 mask = dev->mt76.mmio.irqmask; + int i; + + ieee80211_stop_queues(dev->mt76.hw); + set_bit(MT76_RESET, &dev->mt76.state); + + tasklet_disable(&dev->pre_tbtt_tasklet); + tasklet_disable(&dev->tx_tasklet); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) + napi_disable(&dev->mt76.napi[i]); + + mutex_lock(&dev->mt76.mutex); + + if (dev->beacon_mask) + mt76_clear(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_BEACON_TX | + MT_BEACON_TIME_CFG_TBTT_EN); + + mt76x02_irq_disable(dev, mask); + + /* perform device reset */ + mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); + mt76_wr(dev, MT_MAC_SYS_CTRL, 0); + mt76_clear(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN); + usleep_range(5000, 10000); + mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff); + + /* let fw reset DMA */ + mt76_set(dev, 0x734, 0x3); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) + mt76_queue_tx_cleanup(dev, i, true); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) + mt76_queue_rx_reset(dev, i); + + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); + mt76_set(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN); + if (dev->ed_monitor) + mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); + + if (dev->beacon_mask) + mt76_set(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_BEACON_TX | + MT_BEACON_TIME_CFG_TBTT_EN); + + mt76x02_irq_enable(dev, mask); + + mutex_unlock(&dev->mt76.mutex); + + clear_bit(MT76_RESET, &dev->mt76.state); + + tasklet_enable(&dev->tx_tasklet); + tasklet_schedule(&dev->tx_tasklet); + + tasklet_enable(&dev->pre_tbtt_tasklet); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) { + napi_enable(&dev->mt76.napi[i]); + napi_schedule(&dev->mt76.napi[i]); + } + + ieee80211_wake_queues(dev->mt76.hw); + + mt76_txq_schedule_all(&dev->mt76); +} + +static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) +{ + if (mt76x02_tx_hang(dev)) { + if (++dev->tx_hang_check < MT_TX_HANG_TH) + return; + + mt76x02_watchdog_reset(dev); + + dev->tx_hang_reset++; + dev->tx_hang_check = 0; + memset(dev->mt76.tx_dma_idx, 0xff, + sizeof(dev->mt76.tx_dma_idx)); + } else { + dev->tx_hang_check = 0; + } +} + +void mt76x02_wdt_work(struct work_struct *work) +{ + struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, + wdt_work.work); + + mt76x02_check_tx_hang(dev); + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work, + MT_WATCHDOG_TIME); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c index 977a8e7e26df..a020c757ba5c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c @@ -132,53 +132,6 @@ void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1) } EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower); -int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev) -{ - struct mt76x02_sta *sta; - struct mt76_wcid *wcid; - int i, j, min_rssi = 0; - s8 cur_rssi; - - local_bh_disable(); - rcu_read_lock(); - - for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) { - unsigned long mask = dev->mt76.wcid_mask[i]; - - if (!mask) - continue; - - for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { - if (!(mask & 1)) - continue; - - wcid = rcu_dereference(dev->mt76.wcid[j]); - if (!wcid) - continue; - - sta = container_of(wcid, struct mt76x02_sta, wcid); - spin_lock(&dev->mt76.rx_lock); - if (sta->inactive_count++ < 5) - cur_rssi = ewma_signal_read(&sta->rssi); - else - cur_rssi = 0; - spin_unlock(&dev->mt76.rx_lock); - - if (cur_rssi < min_rssi) - min_rssi = cur_rssi; - } - } - - rcu_read_unlock(); - local_bh_enable(); - - if (!min_rssi) - return -75; - - return min_rssi; -} -EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi); - void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl) { int core_val, agc_val; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h index 2b316cf7c70c..d2971db06f13 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h @@ -51,7 +51,6 @@ void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit); int mt76x02_get_max_rate_power(struct mt76_rate_power *r); void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev); void mt76x02_phy_set_txdac(struct mt76x02_dev *dev); -int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev); void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl); void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h index f7de77d09d28..7401cb94fb72 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h @@ -230,6 +230,29 @@ #define MT_COM_REG2 0x0738 #define MT_COM_REG3 0x073C +#define MT_LED_CTRL 0x0770 +#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n))) +#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n))) +#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n))) +#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n))) + +#define MT_LED_TX_BLINK_0 0x0774 +#define MT_LED_TX_BLINK_1 0x0778 + +#define MT_LED_S0_BASE 0x077C +#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n)) +#define MT_LED_S1_BASE 0x0780 +#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n)) +#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24) +#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \ + MT_LED_STATUS_OFF_MASK) +#define MT_LED_STATUS_ON_MASK GENMASK(23, 16) +#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \ + MT_LED_STATUS_ON_MASK) +#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8) +#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \ + MT_LED_STATUS_DURATION_MASK) + #define MT_FCE_PSE_CTRL 0x0800 #define MT_FCE_PARAMETERS 0x0804 #define MT_FCE_CSO 0x0808 @@ -318,6 +341,7 @@ #define MT_CH_TIME_CFG_NAV_AS_BUSY BIT(3) #define MT_CH_TIME_CFG_EIFS_AS_BUSY BIT(4) #define MT_CH_TIME_CFG_MDRDY_CNT_EN BIT(5) +#define MT_CH_CCA_RC_EN BIT(6) #define MT_CH_TIME_CFG_CH_TIMER_CLR GENMASK(9, 8) #define MT_CH_TIME_CFG_MDRDY_CLR GENMASK(11, 10) @@ -378,6 +402,9 @@ #define MT_TX_PWR_CFG_4 0x1324 #define MT_TX_PIN_CFG 0x1328 #define MT_TX_PIN_CFG_TXANT GENMASK(3, 0) +#define MT_TX_PIN_CFG_RXANT GENMASK(11, 8) +#define MT_TX_PIN_RFTR_EN BIT(16) +#define MT_TX_PIN_TRSW_EN BIT(18) #define MT_TX_BAND_CFG 0x132c #define MT_TX_BAND_CFG_UPPER_40M BIT(0) @@ -398,6 +425,7 @@ #define MT_TXOP_CTRL_CFG 0x1340 #define MT_TXOP_TRUN_EN GENMASK(5, 0) #define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8) +#define MT_TXOP_ED_CCA_EN BIT(20) #define MT_TX_RTS_CFG 0x1344 #define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0) @@ -409,6 +437,7 @@ #define MT_TX_RETRY_CFG 0x134c #define MT_TX_LINK_CFG 0x1350 +#define MT_TX_CFACK_EN BIT(12) #define MT_VHT_HT_FBK_CFG0 0x1354 #define MT_VHT_HT_FBK_CFG1 0x1358 #define MT_LG_FBK_CFG0 0x135c @@ -440,9 +469,10 @@ #define MT_PROT_TXOP_ALLOW_GF40 BIT(25) #define MT_PROT_RTS_THR_EN BIT(26) #define MT_PROT_RATE_CCK_11 0x0003 -#define MT_PROT_RATE_OFDM_6 0x4000 -#define MT_PROT_RATE_OFDM_24 0x4004 -#define MT_PROT_RATE_DUP_OFDM_24 0x4084 +#define MT_PROT_RATE_OFDM_6 0x2000 +#define MT_PROT_RATE_OFDM_24 0x2004 +#define MT_PROT_RATE_DUP_OFDM_24 0x2084 +#define MT_PROT_RATE_SGI_OFDM_24 0x2104 #define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20) #define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \ ~MT_PROT_TXOP_ALLOW_MM40 & \ @@ -511,6 +541,7 @@ #define MT_RX_FILTR_CFG_CTRL_RSV BIT(16) #define MT_AUTO_RSP_CFG 0x1404 +#define MT_AUTO_RSP_EN BIT(0) #define MT_AUTO_RSP_PREAMB_SHORT BIT(4) #define MT_LEGACY_BASIC_RATE 0x1408 #define MT_HT_BASIC_RATE 0x140c @@ -532,6 +563,7 @@ #define MT_PN_PAD_MODE 0x150c #define MT_TXOP_HLDR_ET 0x1608 +#define MT_TXOP_HLDR_TX40M_BLK_EN BIT(1) #define MT_PROT_AUTO_TX_CFG 0x1648 #define MT_PROT_AUTO_TX_CFG_PROT_PADJ GENMASK(11, 8) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c index 4598cb2cc3ff..a5413a309a0a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c @@ -177,7 +177,7 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (ret < 0) return ret; - if (pid && pid != MT_PACKET_ID_NO_ACK) + if (pid >= MT_PACKET_ID_FIRST) qsel = MT_QSEL_MGMT; *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 81970cf777c0..098d05e109e7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -87,8 +87,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, pid = mt76_tx_status_skb_add(mdev, wcid, skb); txwi->pktid = pid; - if ((pid && pid != MT_PACKET_ID_NO_ACK) || - q2ep(q->hw_idx) == MT_EP_OUT_HCCA) + if (pid >= MT_PACKET_ID_FIRST || q2ep(q->hw_idx) == MT_EP_OUT_HCCA) qsel = MT_QSEL_MGMT; else qsel = MT_QSEL_EDCA; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index 38bd466cff16..062614ad0d51 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -75,6 +75,58 @@ static const struct ieee80211_iface_combination mt76x02_if_comb[] = { } }; +static void +mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on, + u8 delay_off) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, + mt76); + u32 val; + + val = MT_LED_STATUS_DURATION(0xff) | + MT_LED_STATUS_OFF(delay_off) | + MT_LED_STATUS_ON(delay_on); + + mt76_wr(dev, MT_LED_S0(mdev->led_pin), val); + mt76_wr(dev, MT_LED_S1(mdev->led_pin), val); + + val = MT_LED_CTRL_REPLAY(mdev->led_pin) | + MT_LED_CTRL_KICK(mdev->led_pin); + if (mdev->led_al) + val |= MT_LED_CTRL_POLARITY(mdev->led_pin); + mt76_wr(dev, MT_LED_CTRL, val); +} + +static int +mt76x02_led_set_blink(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev, + led_cdev); + u8 delta_on, delta_off; + + delta_off = max_t(u8, *delay_off / 10, 1); + delta_on = max_t(u8, *delay_on / 10, 1); + + mt76x02_led_set_config(mdev, delta_on, delta_off); + + return 0; +} + +static void +mt76x02_led_set_brightness(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev, + led_cdev); + + if (!brightness) + mt76x02_led_set_config(mdev, 0, 0xff); + else + mt76x02_led_set_config(mdev, 0xff, 0); +} + void mt76x02_init_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); @@ -93,6 +145,8 @@ void mt76x02_init_device(struct mt76x02_dev *dev) MT_DMA_HDR_LEN; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); } else { + INIT_DELAYED_WORK(&dev->wdt_work, mt76x02_wdt_work); + mt76x02_dfs_init_detector(dev); wiphy->reg_notifier = mt76x02_regd_notifier; @@ -106,7 +160,16 @@ void mt76x02_init_device(struct mt76x02_dev *dev) #endif BIT(NL80211_IFTYPE_ADHOC); + wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + + /* init led callbacks */ + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + dev->mt76.led_cdev.brightness_set = + mt76x02_led_set_brightness; + dev->mt76.led_cdev.blink_set = mt76x02_led_set_blink; + } } hw->sta_data_size = sizeof(struct mt76x02_sta); @@ -189,8 +252,6 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (vif->type == NL80211_IFTYPE_AP) set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); - ewma_signal_init(&msta->rssi); - return 0; } EXPORT_SYMBOL_GPL(mt76x02_sta_add); @@ -463,7 +524,7 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val) return -EINVAL; mutex_lock(&dev->mt76.mutex); - mt76x02_mac_set_tx_protection(dev, val); + mt76x02_mac_set_rts_thresh(dev, val); mutex_unlock(&dev->mt76.mutex); return 0; @@ -546,24 +607,6 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, } EXPORT_SYMBOL_GPL(mt76x02_sw_scan_complete); -int mt76x02_get_txpower(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, int *dbm) -{ - struct mt76x02_dev *dev = hw->priv; - u8 nstreams = dev->mt76.chainmask & 0xf; - - *dbm = dev->mt76.txpower_cur / 2; - - /* convert from per-chain power to combined - * output on 2x2 devices - */ - if (nstreams > 1) - *dbm += 3; - - return 0; -} -EXPORT_SYMBOL_GPL(mt76x02_get_txpower); - void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) { @@ -661,6 +704,10 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw, tasklet_enable(&dev->pre_tbtt_tasklet); } + if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT) + mt76x02_mac_set_tx_protection(dev, info->use_cts_prot, + info->ht_operation_mode); + if (changed & BSS_CHANGED_BEACON_INT) { mt76_rmw_field(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_INTVAL, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c index 54a9b5fac787..f8534362e2c8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c @@ -143,6 +143,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev) { MT_VHT_HT_FBK_CFG1, 0xedcba980 }, { MT_PROT_AUTO_TX_CFG, 0x00830083 }, { MT_HT_CTRL_CFG, 0x000001ff }, + { MT_TX_LINK_CFG, 0x00001020 }, }; struct mt76_reg_pair prot_vals[] = { { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK }, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h index acfa2b570c7c..40ef43926c06 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h @@ -26,29 +26,6 @@ #define MT_MCU_PCIE_REMAP_BASE2 0x0744 #define MT_MCU_PCIE_REMAP_BASE3 0x0748 -#define MT_LED_CTRL 0x0770 -#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n))) -#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n))) -#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n))) -#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n))) - -#define MT_LED_TX_BLINK_0 0x0774 -#define MT_LED_TX_BLINK_1 0x0778 - -#define MT_LED_S0_BASE 0x077C -#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n)) -#define MT_LED_S1_BASE 0x0780 -#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n)) -#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24) -#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \ - MT_LED_STATUS_OFF_MASK) -#define MT_LED_STATUS_ON_MASK GENMASK(23, 16) -#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \ - MT_LED_STATUS_ON_MASK) -#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8) -#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \ - MT_LED_STATUS_DURATION_MASK) - #define MT_MCU_ROM_PATCH_OFFSET 0x80000 #define MT_MCU_ROM_PATCH_ADDR 0x90000 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h index b259e4b50f1e..28ec19acf9db 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -53,7 +53,6 @@ struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev); int mt76x2_register_device(struct mt76x02_dev *dev); void mt76x2_phy_power_on(struct mt76x02_dev *dev); -int mt76x2_init_hardware(struct mt76x02_dev *dev); void mt76x2_stop_hardware(struct mt76x02_dev *dev); int mt76x2_eeprom_init(struct mt76x02_dev *dev); int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index 7f4ea2d00f42..4347d5e7a915 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -151,6 +151,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) MT_CH_TIME_CFG_RX_AS_BUSY | MT_CH_TIME_CFG_NAV_AS_BUSY | MT_CH_TIME_CFG_EIFS_AS_BUSY | + MT_CH_CCA_RC_EN | FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); mt76x02_set_tx_ackto(dev); @@ -260,7 +261,7 @@ mt76x2_power_on(struct mt76x02_dev *dev) mt76x2_power_on_rf(dev, 1); } -int mt76x2_init_hardware(struct mt76x02_dev *dev) +static int mt76x2_init_hardware(struct mt76x02_dev *dev) { int ret; @@ -300,6 +301,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev) { cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->mac_work); + cancel_delayed_work_sync(&dev->wdt_work); mt76x02_mcu_set_radio_state(dev, false); mt76x2_mac_stop(dev, false); } @@ -340,54 +342,6 @@ struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev) return dev; } -static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on, - u8 delay_off) -{ - struct mt76x02_dev *dev = container_of(mt76, struct mt76x02_dev, - mt76); - u32 val; - - val = MT_LED_STATUS_DURATION(0xff) | - MT_LED_STATUS_OFF(delay_off) | - MT_LED_STATUS_ON(delay_on); - - mt76_wr(dev, MT_LED_S0(mt76->led_pin), val); - mt76_wr(dev, MT_LED_S1(mt76->led_pin), val); - - val = MT_LED_CTRL_REPLAY(mt76->led_pin) | - MT_LED_CTRL_KICK(mt76->led_pin); - if (mt76->led_al) - val |= MT_LED_CTRL_POLARITY(mt76->led_pin); - mt76_wr(dev, MT_LED_CTRL, val); -} - -static int mt76x2_led_set_blink(struct led_classdev *led_cdev, - unsigned long *delay_on, - unsigned long *delay_off) -{ - struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev, - led_cdev); - u8 delta_on, delta_off; - - delta_off = max_t(u8, *delay_off / 10, 1); - delta_on = max_t(u8, *delay_on / 10, 1); - - mt76x2_led_set_config(mt76, delta_on, delta_off); - return 0; -} - -static void mt76x2_led_set_brightness(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev, - led_cdev); - - if (!brightness) - mt76x2_led_set_config(mt76, 0, 0xff); - else - mt76x2_led_set_config(mt76, 0xff, 0); -} - int mt76x2_register_device(struct mt76x02_dev *dev) { int ret; @@ -402,12 +356,6 @@ int mt76x2_register_device(struct mt76x02_dev *dev) mt76x02_config_mac_addr_list(dev); - /* init led callbacks */ - if (IS_ENABLED(CONFIG_MT76_LEDS)) { - dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; - dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; - } - ret = mt76_register_device(&dev->mt76, true, mt76x02_rates, ARRAY_SIZE(mt76x02_rates)); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index b54a32397486..06a26a152ba4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c @@ -34,6 +34,8 @@ mt76x2_start(struct ieee80211_hw *hw) ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, MT_CALIBRATE_INTERVAL); + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work, + MT_WATCHDOG_TIME); set_bit(MT76_STATE_RUNNING, &dev->mt76.state); @@ -189,7 +191,7 @@ const struct ieee80211_ops mt76x2_ops = { .sw_scan_complete = mt76x02_sw_scan_complete, .flush = mt76x2_flush, .ampdu_action = mt76x02_ampdu_action, - .get_txpower = mt76x02_get_txpower, + .get_txpower = mt76_get_txpower, .wake_tx_queue = mt76_wake_tx_queue, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .release_buffered_frames = mt76_release_buffered_frames, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c index da7cd40f56ff..65ed62229a5b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c @@ -254,6 +254,8 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev, 0x38); } + mt76x02_edcca_init(dev); + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, MT_CALIBRATE_INTERVAL); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c index c9634a774705..e2ee5e498da7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c @@ -284,7 +284,9 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) int low_gain; u32 val; - dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); + dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76); + if (!dev->cal.avg_rssi_all) + dev->cal.avg_rssi_all = -75; low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c index 2b48cc51a30d..286c7f451090 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c @@ -138,5 +138,5 @@ const struct ieee80211_ops mt76x2u_ops = { .sw_scan_start = mt76x02_sw_scan, .sw_scan_complete = mt76x02_sw_scan_complete, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, - .get_txpower = mt76x02_get_txpower, + .get_txpower = mt76_get_txpower, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c index 45a95ee3a415..152d41fe9ff5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c @@ -39,7 +39,7 @@ static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev) static void mt76x2u_mcu_enable_patch(struct mt76x02_dev *dev) { struct mt76_usb *usb = &dev->mt76.usb; - const u8 data[] = { + static const u8 data[] = { 0x6f, 0xfc, 0x08, 0x01, 0x20, 0x04, 0x00, 0x00, 0x00, 0x09, 0x00, diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 7b711058807d..ef38e8626da9 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -170,21 +170,22 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, int pid; if (!wcid) - return 0; + return MT_PACKET_ID_NO_ACK; if (info->flags & IEEE80211_TX_CTL_NO_ACK) return MT_PACKET_ID_NO_ACK; if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_CTL_RATE_CTRL_PROBE))) - return 0; + return MT_PACKET_ID_NO_SKB; spin_lock_bh(&dev->status_list.lock); memset(cb, 0, sizeof(*cb)); wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK; - if (!wcid->packet_id || wcid->packet_id == MT_PACKET_ID_NO_ACK) - wcid->packet_id = 1; + if (wcid->packet_id == MT_PACKET_ID_NO_ACK || + wcid->packet_id == MT_PACKET_ID_NO_SKB) + wcid->packet_id = MT_PACKET_ID_FIRST; pid = wcid->packet_id; cb->wcid = wcid->idx; @@ -330,7 +331,8 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; if (last) - info->flags |= IEEE80211_TX_STATUS_EOSP; + info->flags |= IEEE80211_TX_STATUS_EOSP | + IEEE80211_TX_CTL_REQ_TX_STATUS; mt76_skb_set_moredata(skb, !last); dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta); @@ -394,6 +396,11 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, bool probe; int idx; + if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) { + *empty = true; + return 0; + } + skb = mt76_txq_dequeue(dev, mtxq, false); if (!skb) { *empty = true; diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index b061263453d4..6a2507524c6c 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -407,17 +407,15 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb) if (len < 0) return 0; + data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN); + if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) + return 0; + skb = build_skb(data, q->buf_size); if (!skb) return 0; - data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN); skb_reserve(skb, MT_DMA_HDR_LEN); - if (skb->tail + data_len > skb->end) { - dev_kfree_skb(skb); - return 1; - } - __skb_put(skb, data_len); len -= data_len; @@ -585,6 +583,7 @@ static void mt76u_stop_rx(struct mt76_dev *dev) static void mt76u_tx_tasklet(unsigned long data) { struct mt76_dev *dev = (struct mt76_dev *)data; + struct mt76_queue_entry entry; struct mt76u_buf *buf; struct mt76_queue *q; bool wake; @@ -599,17 +598,18 @@ static void mt76u_tx_tasklet(unsigned long data) if (!buf->done || !q->queued) break; - dev->drv->tx_complete_skb(dev, q, - &q->entry[q->head], - false); - if (q->entry[q->head].schedule) { q->entry[q->head].schedule = false; q->swq_queued--; } + entry = q->entry[q->head]; q->head = (q->head + 1) % q->ndesc; q->queued--; + + spin_unlock_bh(&q->lock); + dev->drv->tx_complete_skb(dev, q, &entry, false); + spin_lock_bh(&q->lock); } mt76_txq_schedule(dev, q); wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c index 0c35b8db58cd..69270c1a9091 100644 --- a/drivers/net/wireless/mediatek/mt76/util.c +++ b/drivers/net/wireless/mediatek/mt76/util.c @@ -75,4 +75,46 @@ int mt76_wcid_alloc(unsigned long *mask, int size) } EXPORT_SYMBOL_GPL(mt76_wcid_alloc); +int mt76_get_min_avg_rssi(struct mt76_dev *dev) +{ + struct mt76_wcid *wcid; + int i, j, min_rssi = 0; + s8 cur_rssi; + + local_bh_disable(); + rcu_read_lock(); + + for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { + unsigned long mask = dev->wcid_mask[i]; + + if (!mask) + continue; + + for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { + if (!(mask & 1)) + continue; + + wcid = rcu_dereference(dev->wcid[j]); + if (!wcid) + continue; + + spin_lock(&dev->rx_lock); + if (wcid->inactive_count++ < 5) + cur_rssi = -ewma_signal_read(&wcid->rssi); + else + cur_rssi = 0; + spin_unlock(&dev->rx_lock); + + if (cur_rssi < min_rssi) + min_rssi = cur_rssi; + } + } + + rcu_read_unlock(); + local_bh_enable(); + + return min_rssi; +} +EXPORT_SYMBOL_GPL(mt76_get_min_avg_rssi); + MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index 7f3e3983b781..f7edeffb2b19 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -124,9 +124,9 @@ static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len) u16 dma_len = get_unaligned_le16(data); if (data_len < min_seg_len || - WARN_ON(!dma_len) || - WARN_ON(dma_len + MT_DMA_HDRS > data_len) || - WARN_ON(dma_len & 0x3)) + WARN_ON_ONCE(!dma_len) || + WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) || + WARN_ON_ONCE(dma_len & 0x3)) return 0; return MT_DMA_HDRS + dma_len; diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h index 662d12703b69..57b503ae63f1 100644 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.h +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h @@ -17,7 +17,7 @@ struct mt7601u_dev; -#define MT7601U_EE_MAX_VER 0x0c +#define MT7601U_EE_MAX_VER 0x0d #define MT7601U_EEPROM_SIZE 256 #define MT7601U_DEFAULT_TX_POWER 6 diff --git a/drivers/net/wireless/quantenna/Makefile b/drivers/net/wireless/quantenna/Makefile index baebfbde119e..cea83d178d2e 100644 --- a/drivers/net/wireless/quantenna/Makefile +++ b/drivers/net/wireless/quantenna/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 # # Copyright (c) 2015-2016 Quantenna Communications, Inc. # All rights reserved. diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h index 528ca7f5e070..14b569b6d1b5 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/bus.h +++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015 Quantenna Communications - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015 Quantenna Communications. All rights reserved. */ #ifndef QTNFMAC_BUS_H #define QTNFMAC_BUS_H @@ -135,7 +122,5 @@ static __always_inline void qtnf_bus_unlock(struct qtnf_bus *bus) int qtnf_core_attach(struct qtnf_bus *bus); void qtnf_core_detach(struct qtnf_bus *bus); -void qtnf_txflowblock(struct device *dev, bool state); -void qtnf_txcomplete(struct device *dev, struct sk_buff *txp, bool success); #endif /* QTNFMAC_BUS_H */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 51b33ec78fac..45f4cef7de9c 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2012-2012 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include <linux/kernel.h> #include <linux/etherdevice.h> @@ -122,7 +109,8 @@ qtnf_change_virtual_intf(struct wiphy *wiphy, struct vif_params *params) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); - u8 *mac_addr; + u8 *mac_addr = NULL; + int use4addr = 0; int ret; ret = qtnf_validate_iface_combinations(wiphy, vif, type); @@ -132,14 +120,14 @@ qtnf_change_virtual_intf(struct wiphy *wiphy, return ret; } - if (params) + if (params) { mac_addr = params->macaddr; - else - mac_addr = NULL; + use4addr = params->use_4addr; + } qtnf_scan_done(vif->mac, true); - ret = qtnf_cmd_send_change_intf_type(vif, type, mac_addr); + ret = qtnf_cmd_send_change_intf_type(vif, type, use4addr, mac_addr); if (ret) { pr_err("VIF%u.%u: failed to change type to %d\n", vif->mac->macid, vif->vifid, type); @@ -190,6 +178,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, struct qtnf_wmac *mac; struct qtnf_vif *vif; u8 *mac_addr = NULL; + int use4addr = 0; int ret; mac = wiphy_priv(wiphy); @@ -225,10 +214,12 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, return ERR_PTR(-ENOTSUPP); } - if (params) + if (params) { mac_addr = params->macaddr; + use4addr = params->use_4addr; + } - ret = qtnf_cmd_send_add_intf(vif, type, mac_addr); + ret = qtnf_cmd_send_add_intf(vif, type, use4addr, mac_addr); if (ret) { pr_err("VIF%u.%u: failed to add VIF %pM\n", mac->macid, vif->vifid, mac_addr); @@ -359,11 +350,6 @@ static int qtnf_set_wiphy_params(struct wiphy *wiphy, u32 changed) return -EFAULT; } - if (changed & (WIPHY_PARAM_RETRY_LONG | WIPHY_PARAM_RETRY_SHORT)) { - pr_err("MAC%u: can't modify retry params\n", mac->macid); - return -EOPNOTSUPP; - } - ret = qtnf_cmd_send_update_phy_params(mac, changed); if (ret) pr_err("MAC%u: failed to update PHY params\n", mac->macid); @@ -1107,7 +1093,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_AP_UAPSD | - WIPHY_FLAG_HAS_CHANNEL_SWITCH; + WIPHY_FLAG_HAS_CHANNEL_SWITCH | + WIPHY_FLAG_4ADDR_STATION; wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h index b73425122a10..c374857283ac 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_CFG80211_H_ #define _QTN_FMAC_CFG80211_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 659e7649fe22..0f48f541de41 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -1,17 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include <linux/types.h> #include <linux/skbuff.h> @@ -72,6 +60,8 @@ static int qtnf_cmd_resp_result_decode(enum qlink_cmd_result qcode) return -EADDRINUSE; case QLINK_CMD_RESULT_EADDRNOTAVAIL: return -EADDRNOTAVAIL; + case QLINK_CMD_RESULT_EBUSY: + return -EBUSY; default: return -EFAULT; } @@ -97,14 +87,12 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, vif_id = cmd->vifid; cmd->mhdr.len = cpu_to_le16(cmd_skb->len); - pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, - le16_to_cpu(cmd->cmd_id)); + pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, cmd_id); if (bus->fw_state != QTNF_FW_STATE_ACTIVE && - le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT) { + cmd_id != QLINK_CMD_FW_INIT) { pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n", - mac_id, vif_id, le16_to_cpu(cmd->cmd_id), - bus->fw_state); + mac_id, vif_id, cmd_id, bus->fw_state); dev_kfree_skb(cmd_skb); return -ENODEV; } @@ -138,7 +126,7 @@ out: return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result)); pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n", - mac_id, vif_id, le16_to_cpu(cmd->cmd_id), ret); + mac_id, vif_id, cmd_id, ret); return ret; } @@ -732,6 +720,7 @@ out: static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype, + int use4addr, u8 *mac_addr, enum qlink_cmd_type cmd_type) { @@ -749,6 +738,7 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif, qtnf_bus_lock(vif->mac->bus); cmd = (struct qlink_cmd_manage_intf *)cmd_skb->data; + cmd->intf_info.use4addr = use4addr; switch (iftype) { case NL80211_IFTYPE_AP: @@ -784,17 +774,19 @@ out: return ret; } -int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, - enum nl80211_iftype iftype, u8 *mac_addr) +int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype, + int use4addr, u8 *mac_addr) { - return qtnf_cmd_send_add_change_intf(vif, iftype, mac_addr, + return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr, QLINK_CMD_ADD_INTF); } int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif, - enum nl80211_iftype iftype, u8 *mac_addr) + enum nl80211_iftype iftype, + int use4addr, + u8 *mac_addr) { - return qtnf_cmd_send_add_change_intf(vif, iftype, mac_addr, + return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr, QLINK_CMD_CHANGE_INTF); } @@ -914,9 +906,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) return -E2BIG; - hwinfo->rd = kzalloc(sizeof(*hwinfo->rd) - + sizeof(struct ieee80211_reg_rule) - * resp->n_reg_rules, GFP_KERNEL); + hwinfo->rd = kzalloc(struct_size(hwinfo->rd, reg_rules, + resp->n_reg_rules), GFP_KERNEL); if (!hwinfo->rd) return -ENOMEM; @@ -1558,11 +1549,11 @@ static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac, switch (tlv_type) { case QTN_TLV_ID_FRAG_THRESH: phy_thr = (void *)tlv; - mac_info->frag_thr = (u32)le16_to_cpu(phy_thr->thr); + mac_info->frag_thr = le32_to_cpu(phy_thr->thr); break; case QTN_TLV_ID_RTS_THRESH: phy_thr = (void *)tlv; - mac_info->rts_thr = (u32)le16_to_cpu(phy_thr->thr); + mac_info->rts_thr = le32_to_cpu(phy_thr->thr); break; case QTN_TLV_ID_SRETRY_LIMIT: limit = (void *)tlv; @@ -1810,15 +1801,23 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed) qtnf_bus_lock(mac->bus); if (changed & WIPHY_PARAM_FRAG_THRESHOLD) - qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_FRAG_THRESH, + qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_FRAG_THRESH, wiphy->frag_threshold); if (changed & WIPHY_PARAM_RTS_THRESHOLD) - qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_RTS_THRESH, + qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_RTS_THRESH, wiphy->rts_threshold); if (changed & WIPHY_PARAM_COVERAGE_CLASS) qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS, wiphy->coverage_class); + if (changed & WIPHY_PARAM_RETRY_LONG) + qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_LRETRY_LIMIT, + wiphy->retry_long); + + if (changed & WIPHY_PARAM_RETRY_SHORT) + qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_SRETRY_LIMIT, + wiphy->retry_short); + ret = qtnf_cmd_send(mac->bus, cmd_skb); if (ret) goto out; diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index 1ac41156c192..96dff643bbc4 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -1,17 +1,5 @@ -/* - * Copyright (c) 2016 Quantenna Communications, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016 Quantenna Communications. All rights reserved. */ #ifndef QLINK_COMMANDS_H_ #define QLINK_COMMANDS_H_ @@ -26,9 +14,11 @@ void qtnf_cmd_send_deinit_fw(struct qtnf_bus *bus); int qtnf_cmd_get_hw_info(struct qtnf_bus *bus); int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac); int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype, - u8 *mac_addr); + int use4addr, u8 *mac_addr); int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif, - enum nl80211_iftype iftype, u8 *mac_addr); + enum nl80211_iftype iftype, + int use4addr, + u8 *mac_addr); int qtnf_cmd_send_del_intf(struct qtnf_vif *vif); int qtnf_cmd_band_info_get(struct qtnf_wmac *mac, struct ieee80211_supported_band *band); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index 5d18a4a917c9..ee1b75fda1dd 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include <linux/kernel.h> #include <linux/module.h> @@ -195,6 +182,7 @@ static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr) qtnf_scan_done(vif->mac, true); ret = qtnf_cmd_send_change_intf_type(vif, vif->wdev.iftype, + vif->wdev.use_4addr, sa->sa_data); if (ret) @@ -545,7 +533,8 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid) goto error; } - ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype, vif->mac_addr); + ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype, + vif->wdev.use_4addr, vif->mac_addr); if (ret) { pr_err("MAC%u: failed to add VIF\n", macid); goto error; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index 293055049caa..a31cff46e964 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_CORE_H_ #define _QTN_FMAC_CORE_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.c b/drivers/net/wireless/quantenna/qtnfmac/debug.c index 9f826b9ef5d9..598ece753a4b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/debug.c +++ b/drivers/net/wireless/quantenna/qtnfmac/debug.c @@ -1,32 +1,11 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include "debug.h" -#undef pr_fmt -#define pr_fmt(fmt) "qtnfmac dbg: %s: " fmt, __func__ - void qtnf_debugfs_init(struct qtnf_bus *bus, const char *name) { bus->dbg_dir = debugfs_create_dir(name, NULL); - - if (IS_ERR_OR_NULL(bus->dbg_dir)) { - pr_warn("failed to create debugfs root dir\n"); - bus->dbg_dir = NULL; - } } void qtnf_debugfs_remove(struct qtnf_bus *bus) @@ -38,9 +17,5 @@ void qtnf_debugfs_remove(struct qtnf_bus *bus) void qtnf_debugfs_add_entry(struct qtnf_bus *bus, const char *name, int (*fn)(struct seq_file *seq, void *data)) { - struct dentry *entry; - - entry = debugfs_create_devm_seqfile(bus->dev, name, bus->dbg_dir, fn); - if (IS_ERR_OR_NULL(entry)) - pr_warn("failed to add entry (%s)\n", name); + debugfs_create_devm_seqfile(bus->dev, name, bus->dbg_dir, fn); } diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.h b/drivers/net/wireless/quantenna/qtnfmac/debug.h index d6dd12b5d434..61b45536b83a 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/debug.h +++ b/drivers/net/wireless/quantenna/qtnfmac/debug.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_DEBUG_H_ #define _QTN_FMAC_DEBUG_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 8b542b431b75..3fd1a9217737 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include <linux/kernel.h> #include <linux/module.h> @@ -158,6 +145,12 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif, const struct qlink_event_bss_join *join_info, u16 len) { + struct wiphy *wiphy = priv_to_wiphy(vif->mac); + enum ieee80211_statuscode status = le16_to_cpu(join_info->status); + struct cfg80211_chan_def chandef; + struct cfg80211_bss *bss = NULL; + u8 *ie = NULL; + if (unlikely(len < sizeof(*join_info))) { pr_err("VIF%u.%u: payload is too short (%u < %zu)\n", vif->mac->macid, vif->vifid, len, @@ -171,15 +164,80 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif, return -EPROTO; } - pr_debug("VIF%u.%u: BSSID:%pM\n", vif->mac->macid, vif->vifid, - join_info->bssid); + pr_debug("VIF%u.%u: BSSID:%pM status:%u\n", + vif->mac->macid, vif->vifid, join_info->bssid, status); + + if (status == WLAN_STATUS_SUCCESS) { + qlink_chandef_q2cfg(wiphy, &join_info->chan, &chandef); + if (!cfg80211_chandef_valid(&chandef)) { + pr_warn("MAC%u.%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n", + vif->mac->macid, vif->vifid, + chandef.chan->center_freq, + chandef.center_freq1, + chandef.center_freq2, + chandef.width); + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + goto done; + } + bss = cfg80211_get_bss(wiphy, chandef.chan, join_info->bssid, + NULL, 0, IEEE80211_BSS_TYPE_ESS, + IEEE80211_PRIVACY_ANY); + if (!bss) { + pr_warn("VIF%u.%u: add missing BSS:%pM chan:%u\n", + vif->mac->macid, vif->vifid, + join_info->bssid, chandef.chan->hw_value); + + if (!vif->wdev.ssid_len) { + pr_warn("VIF%u.%u: SSID unknown for BSS:%pM\n", + vif->mac->macid, vif->vifid, + join_info->bssid); + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + goto done; + } + + ie = kzalloc(2 + vif->wdev.ssid_len, GFP_KERNEL); + if (!ie) { + pr_warn("VIF%u.%u: IE alloc failed for BSS:%pM\n", + vif->mac->macid, vif->vifid, + join_info->bssid); + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + goto done; + } + + ie[0] = WLAN_EID_SSID; + ie[1] = vif->wdev.ssid_len; + memcpy(ie + 2, vif->wdev.ssid, vif->wdev.ssid_len); + + bss = cfg80211_inform_bss(wiphy, chandef.chan, + CFG80211_BSS_FTYPE_UNKNOWN, + join_info->bssid, 0, + WLAN_CAPABILITY_ESS, 100, + ie, 2 + vif->wdev.ssid_len, + 0, GFP_KERNEL); + if (!bss) { + pr_warn("VIF%u.%u: can't connect to unknown BSS: %pM\n", + vif->mac->macid, vif->vifid, + join_info->bssid); + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + goto done; + } + } + } + +done: cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, NULL, - 0, le16_to_cpu(join_info->status), GFP_KERNEL); + 0, status, GFP_KERNEL); + if (bss) { + if (!ether_addr_equal(vif->bssid, join_info->bssid)) + ether_addr_copy(vif->bssid, join_info->bssid); + cfg80211_put_bss(wiphy, bss); + } - if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS) + if (status == WLAN_STATUS_SUCCESS) netif_carrier_on(vif->netdev); + kfree(ie); return 0; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.h b/drivers/net/wireless/quantenna/qtnfmac/event.h index ae759b602c2a..533ad99d045d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.h +++ b/drivers/net/wireless/quantenna/qtnfmac/event.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_EVENT_H_ #define _QTN_FMAC_EVENT_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c index 598edb814421..cbcda57105f3 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c @@ -559,6 +559,9 @@ static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data) if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts)) return IRQ_NONE; + if (!priv->msi_enabled) + qtnf_deassert_intx(ts); + priv->pcie_irq_count++; qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in); @@ -571,9 +574,6 @@ static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data) tasklet_hi_schedule(&priv->reclaim_tq); - if (!priv->msi_enabled) - qtnf_deassert_intx(ts); - return IRQ_HANDLED; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 8d62addea895..27fdb5b01ee3 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -1,25 +1,12 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_QLINK_H_ #define _QTN_QLINK_H_ #include <linux/ieee80211.h> -#define QLINK_PROTO_VER 11 +#define QLINK_PROTO_VER 13 #define QLINK_MACID_RSVD 0xFF #define QLINK_VIFID_RSVD 0xFF @@ -105,7 +92,8 @@ struct qlink_intf_info { __le16 if_type; __le16 vlanid; u8 mac_addr[ETH_ALEN]; - u8 rsvd[2]; + u8 use4addr; + u8 rsvd[1]; } __packed; enum qlink_sta_flags { @@ -733,6 +721,7 @@ enum qlink_cmd_result { QLINK_CMD_RESULT_EALREADY, QLINK_CMD_RESULT_EADDRINUSE, QLINK_CMD_RESULT_EADDRNOTAVAIL, + QLINK_CMD_RESULT_EBUSY, }; /** @@ -986,11 +975,13 @@ struct qlink_event_sta_deauth { /** * struct qlink_event_bss_join - data for QLINK_EVENT_BSS_JOIN event * + * @chan: new operating channel definition * @bssid: BSSID of a BSS which interface tried to joined. * @status: status of joining attempt, see &enum ieee80211_statuscode. */ struct qlink_event_bss_join { struct qlink_event ehdr; + struct qlink_chandef chan; u8 bssid[ETH_ALEN]; __le16 status; } __packed; @@ -1182,7 +1173,7 @@ struct qlink_iface_limit_record { struct qlink_tlv_frag_rts_thr { struct qlink_tlv_hdr hdr; - __le16 thr; + __le32 thr; } __packed; struct qlink_tlv_rlimit { diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c index aeeda81b09ea..72bfd17cb687 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c @@ -1,17 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include <linux/nl80211.h> diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h index 960d5d97492f..781ea7fe79f2 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_QLINK_UTIL_H_ #define _QTN_FMAC_QLINK_UTIL_H_ @@ -69,6 +56,17 @@ static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb, memcpy(hdr->val, &tmp, sizeof(tmp)); } +static inline void qtnf_cmd_skb_put_tlv_u32(struct sk_buff *skb, + u16 tlv_id, u32 value) +{ + struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value)); + __le32 tmp = cpu_to_le32(value); + + hdr->type = cpu_to_le16(tlv_id); + hdr->len = cpu_to_le16(sizeof(value)); + memcpy(hdr->val, &tmp, sizeof(tmp)); +} + u16 qlink_iface_type_to_nl_mask(u16 qlink_type); u8 qlink_chan_width_mask_to_nl(u16 qlink_mask); void qlink_chandef_q2cfg(struct wiphy *wiphy, diff --git a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h index 40295a511224..82d879950b62 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_HW_IDS_H_ #define _QTN_HW_IDS_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c index 2ec334199c2b..ff678951d3b2 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c +++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include <linux/types.h> #include <linux/io.h> diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h index c2a3702a9ee7..52cac5439b03 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h +++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_SHM_IPC_H_ #define _QTN_FMAC_SHM_IPC_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h index 95a5f89a8b1a..78be70df1218 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h +++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_SHM_IPC_DEFS_H_ #define _QTN_FMAC_SHM_IPC_DEFS_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.c b/drivers/net/wireless/quantenna/qtnfmac/trans.c index 345f34ec9750..95356e280e23 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/trans.c +++ b/drivers/net/wireless/quantenna/qtnfmac/trans.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include <linux/types.h> #include <linux/export.h> diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.h b/drivers/net/wireless/quantenna/qtnfmac/trans.h index 9a473e07af0f..c0b76f871b31 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/trans.h +++ b/drivers/net/wireless/quantenna/qtnfmac/trans.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_TRANS_H_ #define _QTN_FMAC_TRANS_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c index 3bc96b264769..cda6f5f3f38a 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/util.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include "util.h" #include "qtn_hw_ids.h" diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h index b8744baac332..a14b7078a9c7 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/util.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015 Quantenna Communications - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015 Quantenna Communications. All rights reserved. */ #ifndef QTNFMAC_UTIL_H #define QTNFMAC_UTIL_H diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 0e95555aec62..7f813f6f8792 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -5477,7 +5477,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002); rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F); - rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x06060606); + rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000); rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0); rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0); rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6C6C666C); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index 61ba573e8bf1..05a2e8da412c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c @@ -656,36 +656,24 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) intf->driver_folder = debugfs_create_dir(intf->rt2x00dev->ops->name, rt2x00dev->hw->wiphy->debugfsdir); - if (IS_ERR(intf->driver_folder) || !intf->driver_folder) - goto exit; intf->driver_entry = rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob); - if (IS_ERR(intf->driver_entry) || !intf->driver_entry) - goto exit; intf->chipset_entry = rt2x00debug_create_file_chipset("chipset", intf, &intf->chipset_blob); - if (IS_ERR(intf->chipset_entry) || !intf->chipset_entry) - goto exit; intf->dev_flags = debugfs_create_file("dev_flags", 0400, intf->driver_folder, intf, &rt2x00debug_fop_dev_flags); - if (IS_ERR(intf->dev_flags) || !intf->dev_flags) - goto exit; intf->cap_flags = debugfs_create_file("cap_flags", 0400, intf->driver_folder, intf, &rt2x00debug_fop_cap_flags); - if (IS_ERR(intf->cap_flags) || !intf->cap_flags) - goto exit; intf->register_folder = debugfs_create_dir("register", intf->driver_folder); - if (IS_ERR(intf->register_folder) || !intf->register_folder) - goto exit; #define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ ({ \ @@ -695,9 +683,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) 0600, \ (__intf)->register_folder, \ &(__intf)->offset_##__name); \ - if (IS_ERR((__intf)->__name##_off_entry) || \ - !(__intf)->__name##_off_entry) \ - goto exit; \ \ (__intf)->__name##_val_entry = \ debugfs_create_file(__stringify(__name) "_value", \ @@ -705,9 +690,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) (__intf)->register_folder, \ (__intf), \ &rt2x00debug_fop_##__name); \ - if (IS_ERR((__intf)->__name##_val_entry) || \ - !(__intf)->__name##_val_entry) \ - goto exit; \ } \ }) @@ -721,15 +703,10 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) intf->queue_folder = debugfs_create_dir("queue", intf->driver_folder); - if (IS_ERR(intf->queue_folder) || !intf->queue_folder) - goto exit; intf->queue_frame_dump_entry = debugfs_create_file("dump", 0400, intf->queue_folder, intf, &rt2x00debug_fop_queue_dump); - if (IS_ERR(intf->queue_frame_dump_entry) - || !intf->queue_frame_dump_entry) - goto exit; skb_queue_head_init(&intf->frame_dump_skbqueue); init_waitqueue_head(&intf->frame_dump_waitqueue); @@ -747,10 +724,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) #endif return; - -exit: - rt2x00debug_deregister(rt2x00dev); - rt2x00_err(rt2x00dev, "Failed to register debug handler\n"); } void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index 4c5de8fc8f12..52b9fc480f8b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -321,97 +321,12 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { - struct hw_key_entry key_entry; - struct rt2x00_field32 field; - u32 mask; - u32 reg; - - if (crypto->cmd == SET_KEY) { - /* - * rt2x00lib can't determine the correct free - * key_idx for shared keys. We have 1 register - * with key valid bits. The goal is simple, read - * the register, if that is full we have no slots - * left. - * Note that each BSS is allowed to have up to 4 - * shared keys, so put a mask over the allowed - * entries. - */ - mask = (0xf << crypto->bssidx); - - reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR0); - reg &= mask; - - if (reg && reg == mask) - return -ENOSPC; - - key->hw_key_idx += reg ? ffz(reg) : 0; - - /* - * Upload key to hardware - */ - memcpy(key_entry.key, crypto->key, - sizeof(key_entry.key)); - memcpy(key_entry.tx_mic, crypto->tx_mic, - sizeof(key_entry.tx_mic)); - memcpy(key_entry.rx_mic, crypto->rx_mic, - sizeof(key_entry.rx_mic)); - - reg = SHARED_KEY_ENTRY(key->hw_key_idx); - rt2x00mmio_register_multiwrite(rt2x00dev, reg, - &key_entry, sizeof(key_entry)); - - /* - * The cipher types are stored over 2 registers. - * bssidx 0 and 1 keys are stored in SEC_CSR1 and - * bssidx 1 and 2 keys are stored in SEC_CSR5. - * Using the correct defines correctly will cause overhead, - * so just calculate the correct offset. - */ - if (key->hw_key_idx < 8) { - field.bit_offset = (3 * key->hw_key_idx); - field.bit_mask = 0x7 << field.bit_offset; - - reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR1); - rt2x00_set_field32(®, field, crypto->cipher); - rt2x00mmio_register_write(rt2x00dev, SEC_CSR1, reg); - } else { - field.bit_offset = (3 * (key->hw_key_idx - 8)); - field.bit_mask = 0x7 << field.bit_offset; - - reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR5); - rt2x00_set_field32(®, field, crypto->cipher); - rt2x00mmio_register_write(rt2x00dev, SEC_CSR5, reg); - } - - /* - * The driver does not support the IV/EIV generation - * in hardware. However it doesn't support the IV/EIV - * inside the ieee80211 frame either, but requires it - * to be provided separately for the descriptor. - * rt2x00lib will cut the IV/EIV data out of all frames - * given to us by mac80211, but we must tell mac80211 - * to generate the IV/EIV data. - */ - key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - } - /* - * SEC_CSR0 contains only single-bit fields to indicate - * a particular key is valid. Because using the FIELD32() - * defines directly will cause a lot of overhead, we use - * a calculation to determine the correct bit directly. + * Let the software handle the shared keys, + * since the hardware decryption does not work reliably, + * because the firmware does not know the key's keyidx. */ - mask = 1 << key->hw_key_idx; - - reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR0); - if (crypto->cmd == SET_KEY) - reg |= mask; - else if (crypto->cmd == DISABLE_KEY) - reg &= ~mask; - rt2x00mmio_register_write(rt2x00dev, SEC_CSR0, reg); - - return 0; + return -EOPNOTSUPP; } static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev, diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 33ad87528d9a..44a943d18b84 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -959,7 +959,7 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx, if (proto == htons(ETH_P_AARP) || proto == htons(ETH_P_IPX)) { /* This is the selective translation table, only 2 entries */ writeb(0xf8, - &((struct snaphdr_t __iomem *)ptx->var)->org[3]); + &((struct snaphdr_t __iomem *)ptx->var)->org[2]); } /* Copy body of ethernet packet without ethernet header */ memcpy_toio((void __iomem *)&ptx->var + @@ -2211,7 +2211,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, untranslate(local, skb, total_len); } } else { /* sniffer mode, so just pass whole packet */ - }; + } /************************/ /* Now pick up the rest of the fragments if any */ diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile index 2966681efaef..5d6b06d3c02c 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile @@ -2,4 +2,4 @@ rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o obj-$(CONFIG_RTL8180) += rtl818x_pci.o -ccflags-y += -Idrivers/net/wireless/realtek/rtl818x +ccflags-y += -I $(srctree)/$(src)/.. diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index 225c1c8851cc..e2b1bfbcfbd4 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c @@ -803,7 +803,7 @@ static void rtl8180_config_cardbus(struct ieee80211_hw *dev) rtl818x_iowrite16(priv, FEMR_SE, 0xffff); } else { reg16 = rtl818x_ioread16(priv, &priv->map->FEMR); - reg16 |= (1 << 15) | (1 << 14) | (1 << 4); + reg16 |= (1 << 15) | (1 << 14) | (1 << 4); rtl818x_iowrite16(priv, &priv->map->FEMR, reg16); } diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile index ff074912a095..95bac73ece7c 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile @@ -2,4 +2,4 @@ rtl8187-objs := dev.o rtl8225.o leds.o rfkill.o obj-$(CONFIG_RTL8187) += rtl8187.o -ccflags-y += -Idrivers/net/wireless/realtek/rtl818x +ccflags-y += -I $(srctree)/$(src)/.. diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index ef9b502ce576..7aa68fe5d791 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -2172,8 +2172,6 @@ label_lps_done: ; } - rtlpriv->link_info.num_rx_inperiod = 0; - rtlpriv->link_info.num_tx_inperiod = 0; for (tid = 0; tid <= 7; tid++) rtlpriv->link_info.tidtx_inperiod[tid] = 0; @@ -2236,6 +2234,8 @@ label_lps_done: rtlpriv->btcoexist.btc_info.in_4way = false; } + rtlpriv->link_info.num_rx_inperiod = 0; + rtlpriv->link_info.num_tx_inperiod = 0; rtlpriv->link_info.bcn_rx_inperiod = 0; /* <6> scan list */ diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 4bf7967590ca..ce23339bf9fb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -1957,5 +1957,7 @@ void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igvalue) dm_digtable->bt30_cur_igi = 0x32; dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX; dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; + dm_digtable->pre_cck_fa_state = 0; + dm_digtable->cur_cck_fa_state = 0; } EXPORT_SYMBOL(rtl_dm_diginit); diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c index d70385be9976..8186650efc56 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.c +++ b/drivers/net/wireless/realtek/rtlwifi/debug.c @@ -463,12 +463,9 @@ static const struct file_operations file_ops_common_write = { #define RTL_DEBUGFS_ADD_CORE(name, mode, fopname) \ do { \ rtl_debug_priv_ ##name.rtlpriv = rtlpriv; \ - if (!debugfs_create_file(#name, mode, \ - parent, &rtl_debug_priv_ ##name, \ - &file_ops_ ##fopname)) \ - pr_err("Unable to initialize debugfs:%s/%s\n", \ - rtlpriv->dbg.debugfs_name, \ - #name); \ + debugfs_create_file(#name, mode, parent, \ + &rtl_debug_priv_ ##name, \ + &file_ops_ ##fopname); \ } while (0) #define RTL_DEBUGFS_ADD(name) \ @@ -486,11 +483,6 @@ void rtl_debug_add_one(struct ieee80211_hw *hw) rtlpriv->dbg.debugfs_dir = debugfs_create_dir(rtlpriv->dbg.debugfs_name, debugfs_topdir); - if (!rtlpriv->dbg.debugfs_dir) { - pr_err("Unable to init debugfs:/%s/%s\n", rtlpriv->cfg->name, - rtlpriv->dbg.debugfs_name); - return; - } parent = rtlpriv->dbg.debugfs_dir; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c index 42a6fba90ba9..acfd54c6f8dd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c @@ -151,8 +151,14 @@ static u8 rtl8723e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + struct rtl_mac *mac = rtl_mac(rtlpriv); long rssi_val_min = 0; + if (mac->link_state == MAC80211_LINKED && + mac->opmode == NL80211_IFTYPE_STATION && + rtlpriv->link_info.bcn_rx_inperiod == 0) + return 0; + if ((dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) && (dm_digtable->cursta_cstate == DIG_STA_CONNECT)) { if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0) @@ -417,6 +423,8 @@ static void rtl8723e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) } else { rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47); + dm_digtable->pre_cck_fa_state = 0; + dm_digtable->cur_cck_fa_state = 0; } dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state; @@ -665,7 +673,7 @@ void rtl8723e_dm_check_txpower_tracking(struct ieee80211_hw *hw) void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rate_adaptive *p_ra = &(rtlpriv->ra); + struct rate_adaptive *p_ra = &rtlpriv->ra; p_ra->ratr_state = DM_RATR_STA_INIT; p_ra->pre_ratr_state = DM_RATR_STA_INIT; @@ -677,6 +685,89 @@ void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) } +void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rate_adaptive *p_ra = &rtlpriv->ra; + u32 low_rssithresh_for_ra, high_rssithresh_for_ra; + struct ieee80211_sta *sta = NULL; + + if (is_hal_stop(rtlhal)) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + " driver is going to unload\n"); + return; + } + + if (!rtlpriv->dm.useramask) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + " driver does not control rate adaptive mask\n"); + return; + } + + if (mac->link_state == MAC80211_LINKED && + mac->opmode == NL80211_IFTYPE_STATION) { + switch (p_ra->pre_ratr_state) { + case DM_RATR_STA_HIGH: + high_rssithresh_for_ra = 50; + low_rssithresh_for_ra = 20; + break; + case DM_RATR_STA_MIDDLE: + high_rssithresh_for_ra = 55; + low_rssithresh_for_ra = 20; + break; + case DM_RATR_STA_LOW: + high_rssithresh_for_ra = 60; + low_rssithresh_for_ra = 25; + break; + default: + high_rssithresh_for_ra = 50; + low_rssithresh_for_ra = 20; + break; + } + + if (rtlpriv->link_info.bcn_rx_inperiod == 0) + switch (p_ra->pre_ratr_state) { + case DM_RATR_STA_HIGH: + default: + p_ra->ratr_state = DM_RATR_STA_MIDDLE; + break; + case DM_RATR_STA_MIDDLE: + case DM_RATR_STA_LOW: + p_ra->ratr_state = DM_RATR_STA_LOW; + break; + } + else if (rtlpriv->dm.undec_sm_pwdb > high_rssithresh_for_ra) + p_ra->ratr_state = DM_RATR_STA_HIGH; + else if (rtlpriv->dm.undec_sm_pwdb > low_rssithresh_for_ra) + p_ra->ratr_state = DM_RATR_STA_MIDDLE; + else + p_ra->ratr_state = DM_RATR_STA_LOW; + + if (p_ra->pre_ratr_state != p_ra->ratr_state) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "RSSI = %ld\n", + rtlpriv->dm.undec_sm_pwdb); + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "RSSI_LEVEL = %d\n", p_ra->ratr_state); + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "PreState = %d, CurState = %d\n", + p_ra->pre_ratr_state, p_ra->ratr_state); + + rcu_read_lock(); + sta = rtl_find_sta(hw, mac->bssid); + if (sta) + rtlpriv->cfg->ops->update_rate_tbl(hw, sta, + p_ra->ratr_state, + true); + rcu_read_unlock(); + + p_ra->pre_ratr_state = p_ra->ratr_state; + } + } +} + void rtl8723e_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -826,7 +917,7 @@ void rtl8723e_dm_watchdog(struct ieee80211_hw *hw) rtl8723e_dm_dynamic_bb_powersaving(hw); rtl8723e_dm_dynamic_txpower(hw); rtl8723e_dm_check_txpower_tracking(hw); - /* rtl92c_dm_refresh_rate_adaptive_mask(hw); */ + rtl8723e_dm_refresh_rate_adaptive_mask(hw); rtl8723e_dm_bt_coexist(hw); rtl8723e_dm_check_edca_turbo(hw); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 07b82700d1de..3103151dda2b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -266,8 +266,8 @@ static struct rtl_hal_ops rtl8723e_hal_ops = { static struct rtl_mod_params rtl8723e_mod_params = { .sw_crypto = false, .inactiveps = true, - .swctrl_lps = false, - .fwctrl_lps = true, + .swctrl_lps = true, + .fwctrl_lps = false, .aspm_support = 1, .debug_level = 0, .debug_mask = 0, @@ -395,8 +395,8 @@ module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); -MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); -MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); +MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n"); +MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n"); MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 1263b12db5dc..11f94b1a436f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -332,7 +332,7 @@ static void _rtl8723be_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw, "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n", rate_section, path, txnum); break; - }; + } } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Invalid Band %d in PHY_SetTxPowerByRateBase()\n", @@ -374,7 +374,7 @@ static u8 _rtl8723be_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw, "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n", rate_section, path, txnum); break; - }; + } } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Invalid Band %d in PHY_GetTxPowerByRateBase()\n", @@ -694,7 +694,7 @@ static u8 _rtl8723be_get_rate_section_index(u32 regaddr) else if (regaddr >= 0xE20 && regaddr <= 0xE4C) index = (u8)((regaddr - 0xE20) / 4); break; - }; + } return index; } diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c index 8c6ca8e689e4..d0c35f3e2012 100644 --- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c +++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c @@ -297,11 +297,6 @@ int rsi_init_dbgfs(struct rsi_hw *adapter) dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL); - if (!dev_dbgfs->subdir) { - kfree(dev_dbgfs); - return -ENOMEM; - } - for (ii = 0; ii < adapter->num_debugfs_entries; ii++) { files = &dev_debugfs_files[ii]; dev_dbgfs->rsi_files[ii] = diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 182b06629371..1dbaab2a96b7 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -100,6 +100,9 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) mgmt_desc->frame_type = TX_DOT11_MGMT; mgmt_desc->header_len = MIN_802_11_HDR_LEN; mgmt_desc->xtend_desc_size = header_size - FRAME_DESC_SZ; + + if (ieee80211_is_probe_req(wh->frame_control)) + mgmt_desc->frame_info = cpu_to_le16(RSI_INSERT_SEQ_IN_FW); mgmt_desc->frame_info |= cpu_to_le16(RATE_INFO_ENABLE); if (is_broadcast_ether_addr(wh->addr1)) mgmt_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT); diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index e56fc83faf0e..aded1ae4fad5 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -229,6 +229,68 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band) /* sbands->ht_cap.mcs.rx_highest = 0x82; */ } +static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) +{ + struct cfg80211_scan_request *scan_req = &hw_req->req; + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; + struct ieee80211_bss_conf *bss = &vif->bss_conf; + + rsi_dbg(INFO_ZONE, "***** Hardware scan start *****\n"); + + if (common->fsm_state != FSM_MAC_INIT_DONE) + return -ENODEV; + + if ((common->wow_flags & RSI_WOW_ENABLED) || + scan_req->n_channels == 0) + return -EINVAL; + + /* Scan already in progress. So return */ + if (common->bgscan_en) + return -EBUSY; + + /* If STA is not connected, return with special value 1, in order + * to start sw_scan in mac80211 + */ + if (!bss->assoc) + return 1; + + mutex_lock(&common->mutex); + common->hwscan = scan_req; + if (!rsi_send_bgscan_params(common, RSI_START_BGSCAN)) { + if (!rsi_send_bgscan_probe_req(common, vif)) { + rsi_dbg(INFO_ZONE, "Background scan started...\n"); + common->bgscan_en = true; + } + } + mutex_unlock(&common->mutex); + + return 0; +} + +static void rsi_mac80211_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; + struct cfg80211_scan_info info; + + rsi_dbg(INFO_ZONE, "***** Hardware scan stop *****\n"); + mutex_lock(&common->mutex); + + if (common->bgscan_en) { + if (!rsi_send_bgscan_params(common, RSI_STOP_BGSCAN)) + common->bgscan_en = false; + info.aborted = false; + ieee80211_scan_completed(adapter->hw, &info); + rsi_dbg(INFO_ZONE, "Back ground scan cancelled\b\n"); + } + common->hwscan = NULL; + mutex_unlock(&common->mutex); +} + /** * rsi_mac80211_detach() - This function is used to de-initialize the * Mac80211 stack. @@ -1917,6 +1979,8 @@ static const struct ieee80211_ops mac80211_ops = { .suspend = rsi_mac80211_suspend, .resume = rsi_mac80211_resume, #endif + .hw_scan = rsi_mac80211_hw_scan_start, + .cancel_hw_scan = rsi_mac80211_cancel_hw_scan, }; /** @@ -1999,6 +2063,9 @@ int rsi_mac80211_attach(struct rsi_common *common) common->max_stations = wiphy->max_ap_assoc_sta; rsi_dbg(ERR_ZONE, "Max Stations Allowed = %d\n", common->max_stations); hw->sta_data_size = sizeof(struct rsi_sta); + + wiphy->max_scan_ssids = RSI_MAX_SCAN_SSIDS; + wiphy->max_scan_ie_len = RSI_MAX_SCAN_IE_LEN; wiphy->flags = WIPHY_FLAG_REPORTS_OBSS; wiphy->flags |= WIPHY_FLAG_AP_UAPSD; wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER; diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index 01d99ed985ee..ca3a55ed72e4 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -328,6 +328,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode) } rsi_default_ps_params(adapter); + init_bgscan_params(common); spin_lock_init(&adapter->ps_lock); timer_setup(&common->roc_timer, rsi_roc_timeout, 0); init_completion(&common->wlan_init_completion); diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 1095df7d9573..404241424a62 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -15,6 +15,7 @@ */ #include <linux/etherdevice.h> +#include <linux/timer.h> #include "rsi_mgmt.h" #include "rsi_common.h" #include "rsi_ps.h" @@ -236,6 +237,18 @@ static void rsi_set_default_parameters(struct rsi_common *common) common->dtim_cnt = RSI_DTIM_COUNT; } +void init_bgscan_params(struct rsi_common *common) +{ + memset((u8 *)&common->bgscan, 0, sizeof(struct rsi_bgscan_params)); + common->bgscan.bgscan_threshold = RSI_DEF_BGSCAN_THRLD; + common->bgscan.roam_threshold = RSI_DEF_ROAM_THRLD; + common->bgscan.bgscan_periodicity = RSI_BGSCAN_PERIODICITY; + common->bgscan.num_bgscan_channels = 0; + common->bgscan.two_probe = 1; + common->bgscan.active_scan_duration = RSI_ACTIVE_SCAN_TIME; + common->bgscan.passive_scan_duration = RSI_PASSIVE_SCAN_TIME; +} + /** * rsi_set_contention_vals() - This function sets the contention values for the * backoff procedure. @@ -1628,6 +1641,107 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags, } #endif +int rsi_send_bgscan_params(struct rsi_common *common, int enable) +{ + struct rsi_bgscan_params *params = &common->bgscan; + struct cfg80211_scan_request *scan_req = common->hwscan; + struct rsi_bgscan_config *bgscan; + struct sk_buff *skb; + u16 frame_len = sizeof(*bgscan); + u8 i; + + rsi_dbg(MGMT_TX_ZONE, "%s: Sending bgscan params frame\n", __func__); + + skb = dev_alloc_skb(frame_len); + if (!skb) + return -ENOMEM; + memset(skb->data, 0, frame_len); + + bgscan = (struct rsi_bgscan_config *)skb->data; + rsi_set_len_qno(&bgscan->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); + bgscan->desc_dword0.frame_type = BG_SCAN_PARAMS; + bgscan->bgscan_threshold = cpu_to_le16(params->bgscan_threshold); + bgscan->roam_threshold = cpu_to_le16(params->roam_threshold); + if (enable) + bgscan->bgscan_periodicity = + cpu_to_le16(params->bgscan_periodicity); + bgscan->active_scan_duration = + cpu_to_le16(params->active_scan_duration); + bgscan->passive_scan_duration = + cpu_to_le16(params->passive_scan_duration); + bgscan->two_probe = params->two_probe; + + bgscan->num_bgscan_channels = scan_req->n_channels; + for (i = 0; i < bgscan->num_bgscan_channels; i++) + bgscan->channels2scan[i] = + cpu_to_le16(scan_req->channels[i]->hw_value); + + skb_put(skb, frame_len); + + return rsi_send_internal_mgmt_frame(common, skb); +} + +/* This function sends the probe request to be used by firmware in + * background scan + */ +int rsi_send_bgscan_probe_req(struct rsi_common *common, + struct ieee80211_vif *vif) +{ + struct cfg80211_scan_request *scan_req = common->hwscan; + struct rsi_bgscan_probe *bgscan; + struct sk_buff *skb; + struct sk_buff *probereq_skb; + u16 frame_len = sizeof(*bgscan); + size_t ssid_len = 0; + u8 *ssid = NULL; + + rsi_dbg(MGMT_TX_ZONE, + "%s: Sending bgscan probe req frame\n", __func__); + + if (common->priv->sc_nvifs <= 0) + return -ENODEV; + + if (scan_req->n_ssids) { + ssid = scan_req->ssids[0].ssid; + ssid_len = scan_req->ssids[0].ssid_len; + } + + skb = dev_alloc_skb(frame_len + MAX_BGSCAN_PROBE_REQ_LEN); + if (!skb) + return -ENOMEM; + memset(skb->data, 0, frame_len + MAX_BGSCAN_PROBE_REQ_LEN); + + bgscan = (struct rsi_bgscan_probe *)skb->data; + bgscan->desc_dword0.frame_type = BG_SCAN_PROBE_REQ; + bgscan->flags = cpu_to_le16(HOST_BG_SCAN_TRIG); + if (common->band == NL80211_BAND_5GHZ) { + bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_6); + bgscan->def_chan = cpu_to_le16(40); + } else { + bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_1); + bgscan->def_chan = cpu_to_le16(11); + } + bgscan->channel_scan_time = cpu_to_le16(RSI_CHANNEL_SCAN_TIME); + + probereq_skb = ieee80211_probereq_get(common->priv->hw, vif->addr, ssid, + ssid_len, scan_req->ie_len); + + memcpy(&skb->data[frame_len], probereq_skb->data, probereq_skb->len); + + bgscan->probe_req_length = cpu_to_le16(probereq_skb->len); + + rsi_set_len_qno(&bgscan->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ + probereq_skb->len), + RSI_WIFI_MGMT_Q); + + skb_put(skb, frame_len + probereq_skb->len); + + dev_kfree_skb(probereq_skb); + + return rsi_send_internal_mgmt_frame(common, skb); +} + /** * rsi_handle_ta_confirm_type() - This function handles the confirm frames. * @common: Pointer to the driver private structure. @@ -1771,9 +1885,28 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common, return 0; } break; + + case SCAN_REQUEST: + rsi_dbg(INFO_ZONE, "Set channel confirm\n"); + break; + case WAKEUP_SLEEP_REQUEST: rsi_dbg(INFO_ZONE, "Wakeup/Sleep confirmation.\n"); return rsi_handle_ps_confirm(adapter, msg); + + case BG_SCAN_PROBE_REQ: + rsi_dbg(INFO_ZONE, "BG scan complete event\n"); + if (common->bgscan_en) { + struct cfg80211_scan_info info; + + if (!rsi_send_bgscan_params(common, RSI_STOP_BGSCAN)) + common->bgscan_en = 0; + info.aborted = false; + ieee80211_scan_completed(adapter->hw, &info); + } + rsi_dbg(INFO_ZONE, "Background scan completed\n"); + break; + default: rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n", __func__); diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 5733e440ecaf..b412b65eb1f4 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -230,16 +230,19 @@ static void rsi_reset_card(struct sdio_func *pfunction) rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err); /* Issue CMD5, arg = 0 */ - err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0, - (MMC_RSP_R4 | MMC_CMD_BCR), &resp); - if (err) - rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", __func__, err); - card->ocr = resp; + if (!host->ocr_avail) { + err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0, + (MMC_RSP_R4 | MMC_CMD_BCR), &resp); + if (err) + rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", + __func__, err); + host->ocr_avail = resp; + } /* Issue CMD5, arg = ocr. Wait till card is ready */ for (i = 0; i < 100; i++) { err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, - card->ocr, + host->ocr_avail, (MMC_RSP_R4 | MMC_CMD_BCR), &resp); if (err) { rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index a084f224bb03..4dc0c0123469 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -164,6 +164,24 @@ struct transmit_q_stats { u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 2]; }; +#define MAX_BGSCAN_CHANNELS_DUAL_BAND 38 +#define MAX_BGSCAN_PROBE_REQ_LEN 0x64 +#define RSI_DEF_BGSCAN_THRLD 0x0 +#define RSI_DEF_ROAM_THRLD 0xa +#define RSI_BGSCAN_PERIODICITY 0x1e +#define RSI_ACTIVE_SCAN_TIME 0x14 +#define RSI_PASSIVE_SCAN_TIME 0x46 +#define RSI_CHANNEL_SCAN_TIME 20 +struct rsi_bgscan_params { + u16 bgscan_threshold; + u16 roam_threshold; + u16 bgscan_periodicity; + u8 num_bgscan_channels; + u8 two_probe; + u16 active_scan_duration; + u16 passive_scan_duration; +}; + struct vif_priv { bool is_ht; bool sgi; @@ -289,6 +307,10 @@ struct rsi_common { bool eapol4_confirm; void *bt_adapter; + + struct cfg80211_scan_request *hwscan; + struct rsi_bgscan_params bgscan; + u8 bgscan_en; }; struct eepromrw_info { diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 359fbdf85739..ea83faa15c7e 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -228,6 +228,9 @@ #define RSI_MAX_TX_AGGR_FRMS 8 #define RSI_MAX_RX_AGGR_FRMS 8 +#define RSI_MAX_SCAN_SSIDS 16 +#define RSI_MAX_SCAN_IE_LEN 256 + enum opmode { RSI_OPMODE_UNSUPPORTED = -1, RSI_OPMODE_AP = 0, @@ -623,6 +626,34 @@ struct rsi_wowlan_req { u16 host_sleep_status; } __packed; +#define RSI_START_BGSCAN 1 +#define RSI_STOP_BGSCAN 0 +#define HOST_BG_SCAN_TRIG BIT(4) +struct rsi_bgscan_config { + struct rsi_cmd_desc_dword0 desc_dword0; + __le64 reserved; + __le32 reserved1; + __le16 bgscan_threshold; + __le16 roam_threshold; + __le16 bgscan_periodicity; + u8 num_bgscan_channels; + u8 two_probe; + __le16 active_scan_duration; + __le16 passive_scan_duration; + __le16 channels2scan[MAX_BGSCAN_CHANNELS_DUAL_BAND]; +} __packed; + +struct rsi_bgscan_probe { + struct rsi_cmd_desc_dword0 desc_dword0; + __le64 reserved; + __le32 reserved1; + __le16 mgmt_rate; + __le16 flags; + __le16 def_chan; + __le16 channel_scan_time; + __le16 probe_req_length; +} __packed; + static inline u32 rsi_get_queueno(u8 *addr, u16 offset) { return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12; @@ -694,4 +725,8 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags, #endif int rsi_send_ps_request(struct rsi_hw *adapter, bool enable, struct ieee80211_vif *vif); +void init_bgscan_params(struct rsi_common *common); +int rsi_send_bgscan_params(struct rsi_common *common, int enable); +int rsi_send_bgscan_probe_req(struct rsi_common *common, + struct ieee80211_vif *vif); #endif diff --git a/drivers/net/wireless/st/cw1200/debug.c b/drivers/net/wireless/st/cw1200/debug.c index 2231ba08bc1f..d94266d9d0b8 100644 --- a/drivers/net/wireless/st/cw1200/debug.c +++ b/drivers/net/wireless/st/cw1200/debug.c @@ -371,28 +371,14 @@ int cw1200_debug_init(struct cw1200_common *priv) d->debugfs_phy = debugfs_create_dir("cw1200", priv->hw->wiphy->debugfsdir); - if (!d->debugfs_phy) - goto err; - - if (!debugfs_create_file("status", 0400, d->debugfs_phy, - priv, &cw1200_status_fops)) - goto err; - - if (!debugfs_create_file("counters", 0400, d->debugfs_phy, - priv, &cw1200_counters_fops)) - goto err; - - if (!debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy, - priv, &fops_wsm_dumps)) - goto err; + debugfs_create_file("status", 0400, d->debugfs_phy, priv, + &cw1200_status_fops); + debugfs_create_file("counters", 0400, d->debugfs_phy, priv, + &cw1200_counters_fops); + debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy, priv, + &fops_wsm_dumps); return 0; - -err: - priv->debug = NULL; - debugfs_remove_recursive(d->debugfs_phy); - kfree(d); - return ret; } void cw1200_debug_release(struct cw1200_common *priv) diff --git a/drivers/net/wireless/st/cw1200/fwio.c b/drivers/net/wireless/st/cw1200/fwio.c index 30e7646d04af..b7881232499c 100644 --- a/drivers/net/wireless/st/cw1200/fwio.c +++ b/drivers/net/wireless/st/cw1200/fwio.c @@ -465,8 +465,8 @@ int cw1200_load_firmware(struct cw1200_common *priv) if (!(val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT)) { pr_err("Device is already in QUEUE mode!\n"); - ret = -EINVAL; - goto out; + ret = -EINVAL; + goto out; } switch (priv->hw_type) { diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c index 7c31b63b8258..7895efefa95d 100644 --- a/drivers/net/wireless/st/cw1200/queue.c +++ b/drivers/net/wireless/st/cw1200/queue.c @@ -283,7 +283,6 @@ int cw1200_queue_put(struct cw1200_queue *queue, struct cw1200_txpriv *txpriv) { int ret = 0; - LIST_HEAD(gc_list); struct cw1200_queue_stats *stats = queue->stats; if (txpriv->link_id >= queue->stats->map_capacity) diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c index 0a9eac93dd01..71e9b91cf15b 100644 --- a/drivers/net/wireless/st/cw1200/scan.c +++ b/drivers/net/wireless/st/cw1200/scan.c @@ -84,8 +84,11 @@ int cw1200_hw_scan(struct ieee80211_hw *hw, frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0, req->ie_len); - if (!frame.skb) + if (!frame.skb) { + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); return -ENOMEM; + } if (req->ie_len) skb_put_data(frame.skb, req->ie, req->ie_len); diff --git a/drivers/net/wireless/ti/wl1251/debugfs.c b/drivers/net/wireless/ti/wl1251/debugfs.c index 448da1f8c22f..c99b23aaa70e 100644 --- a/drivers/net/wireless/ti/wl1251/debugfs.c +++ b/drivers/net/wireless/ti/wl1251/debugfs.c @@ -54,11 +54,6 @@ static const struct file_operations name## _ops = { \ #define DEBUGFS_ADD(name, parent) \ wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \ wl, &name## _ops); \ - if (IS_ERR(wl->debugfs.name)) { \ - ret = PTR_ERR(wl->debugfs.name); \ - wl->debugfs.name = NULL; \ - goto out; \ - } #define DEBUGFS_DEL(name) \ do { \ @@ -354,10 +349,8 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl) DEBUGFS_DEL(excessive_retries); } -static int wl1251_debugfs_add_files(struct wl1251 *wl) +static void wl1251_debugfs_add_files(struct wl1251 *wl) { - int ret = 0; - DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow); DEBUGFS_FWSTATS_ADD(rx, out_of_mem); @@ -453,12 +446,6 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl) DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir); DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); - -out: - if (ret < 0) - wl1251_debugfs_delete_files(wl); - - return ret; } void wl1251_debugfs_reset(struct wl1251 *wl) @@ -471,56 +458,20 @@ void wl1251_debugfs_reset(struct wl1251 *wl) int wl1251_debugfs_init(struct wl1251 *wl) { - int ret; + wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), GFP_KERNEL); + if (!wl->stats.fw_stats) + return -ENOMEM; wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR(wl->debugfs.rootdir)) { - ret = PTR_ERR(wl->debugfs.rootdir); - wl->debugfs.rootdir = NULL; - goto err; - } - wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics", wl->debugfs.rootdir); - if (IS_ERR(wl->debugfs.fw_statistics)) { - ret = PTR_ERR(wl->debugfs.fw_statistics); - wl->debugfs.fw_statistics = NULL; - goto err_root; - } - - wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), - GFP_KERNEL); - - if (!wl->stats.fw_stats) { - ret = -ENOMEM; - goto err_fw; - } - wl->stats.fw_stats_update = jiffies; - ret = wl1251_debugfs_add_files(wl); - - if (ret < 0) - goto err_file; + wl1251_debugfs_add_files(wl); return 0; - -err_file: - kfree(wl->stats.fw_stats); - wl->stats.fw_stats = NULL; - -err_fw: - debugfs_remove(wl->debugfs.fw_statistics); - wl->debugfs.fw_statistics = NULL; - -err_root: - debugfs_remove(wl->debugfs.rootdir); - wl->debugfs.rootdir = NULL; - -err: - return ret; } void wl1251_debugfs_exit(struct wl1251 *wl) diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.c b/drivers/net/wireless/ti/wl12xx/debugfs.c index 0521cbf858cf..6c3c04eca8fd 100644 --- a/drivers/net/wireless/ti/wl12xx/debugfs.c +++ b/drivers/net/wireless/ti/wl12xx/debugfs.c @@ -125,20 +125,10 @@ WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u"); int wl12xx_debugfs_add_files(struct wl1271 *wl, struct dentry *rootdir) { - int ret = 0; - struct dentry *entry, *stats, *moddir; + struct dentry *stats, *moddir; moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir); - if (!moddir || IS_ERR(moddir)) { - entry = moddir; - goto err; - } - stats = debugfs_create_dir("fw_stats", moddir); - if (!stats || IS_ERR(stats)) { - entry = stats; - goto err; - } DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow); @@ -232,12 +222,4 @@ int wl12xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); return 0; - -err: - if (IS_ERR(entry)) - ret = PTR_ERR(entry); - else - ret = -ENOMEM; - - return ret; } diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index 597e934c4630..5f4ec997ca59 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -422,20 +422,10 @@ static const struct file_operations radar_debug_mode_ops = { int wl18xx_debugfs_add_files(struct wl1271 *wl, struct dentry *rootdir) { - int ret = 0; - struct dentry *entry, *stats, *moddir; + struct dentry *stats, *moddir; moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir); - if (!moddir || IS_ERR(moddir)) { - entry = moddir; - goto err; - } - stats = debugfs_create_dir("fw_stats", moddir); - if (!stats || IS_ERR(stats)) { - entry = stats; - goto err; - } DEBUGFS_ADD(clear_fw_stats, stats); @@ -590,12 +580,4 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_ADD(dynamic_fw_traces, moddir); return 0; - -err: - if (IS_ERR(entry)) - ret = PTR_ERR(entry); - else - ret = -ENOMEM; - - return ret; } diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 903968735a74..348be0aed97e 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -1427,7 +1427,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0); if (ret < 0) { wl1271_warning("could not set keys"); - goto out; + goto out; } out: diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index aeb74e74698e..68acd901d384 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -1301,11 +1301,10 @@ static const struct file_operations fw_logger_ops = { .llseek = default_llseek, }; -static int wl1271_debugfs_add_files(struct wl1271 *wl, - struct dentry *rootdir) +static void wl1271_debugfs_add_files(struct wl1271 *wl, + struct dentry *rootdir) { - int ret = 0; - struct dentry *entry, *streaming; + struct dentry *streaming; DEBUGFS_ADD(tx_queue_len, rootdir); DEBUGFS_ADD(retry_count, rootdir); @@ -1330,23 +1329,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl, DEBUGFS_ADD(fw_logger, rootdir); streaming = debugfs_create_dir("rx_streaming", rootdir); - if (!streaming || IS_ERR(streaming)) - goto err; DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming); DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming); DEBUGFS_ADD_PREFIX(dev, mem, rootdir); - - return 0; - -err: - if (IS_ERR(entry)) - ret = PTR_ERR(entry); - else - ret = -ENOMEM; - - return ret; } void wl1271_debugfs_reset(struct wl1271 *wl) @@ -1367,11 +1354,6 @@ int wl1271_debugfs_init(struct wl1271 *wl) rootdir = debugfs_create_dir(KBUILD_MODNAME, wl->hw->wiphy->debugfsdir); - if (IS_ERR(rootdir)) { - ret = PTR_ERR(rootdir); - goto out; - } - wl->stats.fw_stats = kzalloc(wl->stats.fw_stats_len, GFP_KERNEL); if (!wl->stats.fw_stats) { ret = -ENOMEM; @@ -1380,9 +1362,7 @@ int wl1271_debugfs_init(struct wl1271 *wl) wl->stats.fw_stats_update = jiffies; - ret = wl1271_debugfs_add_files(wl, rootdir); - if (ret < 0) - goto out_exit; + wl1271_debugfs_add_files(wl, rootdir); ret = wlcore_debugfs_init(wl, rootdir); if (ret < 0) diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h index bf14676e6515..a4952c4f587e 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.h +++ b/drivers/net/wireless/ti/wlcore/debugfs.h @@ -53,19 +53,15 @@ static const struct file_operations name## _ops = { \ #define DEBUGFS_ADD(name, parent) \ do { \ - entry = debugfs_create_file(#name, 0400, parent, \ - wl, &name## _ops); \ - if (!entry || IS_ERR(entry)) \ - goto err; \ + debugfs_create_file(#name, 0400, parent, \ + wl, &name## _ops); \ } while (0) #define DEBUGFS_ADD_PREFIX(prefix, name, parent) \ do { \ - entry = debugfs_create_file(#name, 0400, parent, \ + debugfs_create_file(#name, 0400, parent, \ wl, &prefix## _## name## _ops); \ - if (!entry || IS_ERR(entry)) \ - goto err; \ } while (0) #define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \ diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 26b187336875..2e12de813a5b 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -1085,8 +1085,11 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt) goto out; ret = wl12xx_fetch_firmware(wl, plt); - if (ret < 0) - goto out; + if (ret < 0) { + kfree(wl->fw_status); + kfree(wl->raw_fw_status); + kfree(wl->tx_res_if); + } out: return ret; diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index bd10165d7eec..4d4b07701149 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) } sdio_claim_host(func); + /* + * To guarantee that the SDIO card is power cycled, as required to make + * the FW programming to succeed, let's do a brute force HW reset. + */ + mmc_hw_reset(card->host); + sdio_enable_func(func); sdio_release_host(func); @@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) { struct sdio_func *func = dev_to_sdio_func(glue->dev); struct mmc_card *card = func->card; - int error; sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); /* Let runtime PM know the card is powered off */ - error = pm_runtime_put(&card->dev); - if (error < 0 && error != -EBUSY) { - dev_err(&card->dev, "%s failed: %i\n", __func__, error); - - return error; - } - + pm_runtime_put(&card->dev); return 0; } diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 2625740bdc4a..330ddb64930f 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -655,7 +655,7 @@ static void frontend_changed(struct xenbus_device *dev, set_backend_state(be, XenbusStateClosed); if (xenbus_dev_is_online(dev)) break; - /* fall through if not online */ + /* fall through - if not online */ case XenbusStateUnknown: set_backend_state(be, XenbusStateClosed); device_unregister(&dev->dev); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 150e49723c15..6a9dd68c0f4f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1253,6 +1253,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, * effects say only one namespace is affected. */ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { + mutex_lock(&ctrl->scan_lock); nvme_start_freeze(ctrl); nvme_wait_freeze(ctrl); } @@ -1281,8 +1282,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) */ if (effects & NVME_CMD_EFFECTS_LBCC) nvme_update_formats(ctrl); - if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) + if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { nvme_unfreeze(ctrl); + mutex_unlock(&ctrl->scan_lock); + } if (effects & NVME_CMD_EFFECTS_CCC) nvme_init_identify(ctrl); if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) @@ -3401,6 +3404,7 @@ static void nvme_scan_work(struct work_struct *work) if (nvme_identify_ctrl(ctrl, &id)) return; + mutex_lock(&ctrl->scan_lock); nn = le32_to_cpu(id->nn); if (ctrl->vs >= NVME_VS(1, 1, 0) && !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { @@ -3409,6 +3413,7 @@ static void nvme_scan_work(struct work_struct *work) } nvme_scan_ns_sequential(ctrl, nn); out_free_id: + mutex_unlock(&ctrl->scan_lock); kfree(id); down_write(&ctrl->namespaces_rwsem); list_sort(NULL, &ctrl->namespaces, ns_cmp); @@ -3652,6 +3657,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->state = NVME_CTRL_NEW; spin_lock_init(&ctrl->lock); + mutex_init(&ctrl->scan_lock); INIT_LIST_HEAD(&ctrl->namespaces); init_rwsem(&ctrl->namespaces_rwsem); ctrl->dev = dev; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ab961bdeea89..c4a1bb41abf0 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -154,6 +154,7 @@ struct nvme_ctrl { enum nvme_ctrl_state state; bool identified; spinlock_t lock; + struct mutex scan_lock; const struct nvme_ctrl_ops *ops; struct request_queue *admin_q; struct request_queue *connect_q; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9bc585415d9b..7fee665ec45e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2557,27 +2557,18 @@ static void nvme_reset_work(struct work_struct *work) if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) nvme_dev_disable(dev, false); - /* - * Introduce CONNECTING state from nvme-fc/rdma transports to mark the - * initializing procedure here. - */ - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { - dev_warn(dev->ctrl.device, - "failed to mark controller CONNECTING\n"); - goto out; - } - + mutex_lock(&dev->shutdown_lock); result = nvme_pci_enable(dev); if (result) - goto out; + goto out_unlock; result = nvme_pci_configure_admin_queue(dev); if (result) - goto out; + goto out_unlock; result = nvme_alloc_admin_tags(dev); if (result) - goto out; + goto out_unlock; /* * Limit the max command size to prevent iod->sg allocations going @@ -2585,6 +2576,17 @@ static void nvme_reset_work(struct work_struct *work) */ dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; dev->ctrl.max_segments = NVME_MAX_SEGS; + mutex_unlock(&dev->shutdown_lock); + + /* + * Introduce CONNECTING state from nvme-fc/rdma transports to mark the + * initializing procedure here. + */ + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { + dev_warn(dev->ctrl.device, + "failed to mark controller CONNECTING\n"); + goto out; + } result = nvme_init_identify(&dev->ctrl); if (result) @@ -2649,6 +2651,8 @@ static void nvme_reset_work(struct work_struct *work) nvme_start_ctrl(&dev->ctrl); return; + out_unlock: + mutex_unlock(&dev->shutdown_lock); out: nvme_remove_dead_ctrl(dev, result); } diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 52e47dac028f..80f843030e36 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -310,6 +310,9 @@ static int imx6_pcie_attach_pd(struct device *dev) imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); if (IS_ERR(imx6_pcie->pd_pcie)) return PTR_ERR(imx6_pcie->pd_pcie); + /* Do nothing when power domain missing */ + if (!imx6_pcie->pd_pcie) + return 0; link = device_link_add(dev, imx6_pcie->pd_pcie, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | @@ -323,13 +326,13 @@ static int imx6_pcie_attach_pd(struct device *dev) if (IS_ERR(imx6_pcie->pd_pcie_phy)) return PTR_ERR(imx6_pcie->pd_pcie_phy); - device_link_add(dev, imx6_pcie->pd_pcie_phy, + link = device_link_add(dev, imx6_pcie->pd_pcie_phy, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); - if (IS_ERR(link)) { - dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link)); - return PTR_ERR(link); + if (!link) { + dev_err(dev, "Failed to add device_link to pcie_phy pd.\n"); + return -EINVAL; } return 0; diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index b171b6bc15c8..0c389a30ef5d 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -22,7 +22,6 @@ #include <linux/resource.h> #include <linux/of_pci.h> #include <linux/of_irq.h> -#include <linux/gpio/consumer.h> #include "pcie-designware.h" @@ -30,7 +29,6 @@ struct armada8k_pcie { struct dw_pcie *pci; struct clk *clk; struct clk *clk_reg; - struct gpio_desc *reset_gpio; }; #define PCIE_VENDOR_REGS_OFFSET 0x8000 @@ -139,12 +137,6 @@ static int armada8k_pcie_host_init(struct pcie_port *pp) struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct armada8k_pcie *pcie = to_armada8k_pcie(pci); - if (pcie->reset_gpio) { - /* assert and then deassert the reset signal */ - gpiod_set_value_cansleep(pcie->reset_gpio, 1); - msleep(100); - gpiod_set_value_cansleep(pcie->reset_gpio, 0); - } dw_pcie_setup_rc(pp); armada8k_pcie_establish_link(pcie); @@ -257,14 +249,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev) goto fail_clkreg; } - /* Get reset gpio signal and hold asserted (logically high) */ - pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", - GPIOD_OUT_HIGH); - if (IS_ERR(pcie->reset_gpio)) { - ret = PTR_ERR(pcie->reset_gpio); - goto fail_clkreg; - } - platform_set_drvdata(pdev, pcie); ret = armada8k_add_pcie_port(pcie, pdev); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index b0a413f3f7ca..e2a879e93d86 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -639,8 +639,9 @@ static void quirk_synopsys_haps(struct pci_dev *pdev) break; } } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, - quirk_synopsys_haps); +DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB_XHCI, 0, + quirk_synopsys_haps); /* * Let's make the southbridge information explicit instead of having to diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig index 6fb4b56e4c14..224ea4e6a46d 100644 --- a/drivers/phy/marvell/Kconfig +++ b/drivers/phy/marvell/Kconfig @@ -21,6 +21,16 @@ config PHY_BERLIN_USB help Enable this to support the USB PHY on Marvell Berlin SoCs. +config PHY_MVEBU_A38X_COMPHY + tristate "Marvell Armada 38x comphy driver" + depends on ARCH_MVEBU || COMPILE_TEST + depends on OF + select GENERIC_PHY + help + This driver allows to control the comphy, an hardware block providing + shared serdes PHYs on Marvell Armada 38x. Its serdes lanes can be + used by various controllers (Ethernet, sata, usb, PCIe...). + config PHY_MVEBU_CP110_COMPHY tristate "Marvell CP110 comphy driver" depends on ARCH_MVEBU || COMPILE_TEST diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile index 3975b144f8ec..59b6c03ef756 100644 --- a/drivers/phy/marvell/Makefile +++ b/drivers/phy/marvell/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o +obj-$(CONFIG_PHY_MVEBU_A38X_COMPHY) += phy-armada38x-comphy.o obj-$(CONFIG_PHY_MVEBU_CP110_COMPHY) += phy-mvebu-cp110-comphy.o obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c new file mode 100644 index 000000000000..3e00bc679d4e --- /dev/null +++ b/drivers/phy/marvell/phy-armada38x-comphy.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Russell King, Deep Blue Solutions Ltd. + * + * Partly derived from CP110 comphy driver by Antoine Tenart + * <antoine.tenart@bootlin.com> + */ +#include <linux/delay.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/phy/phy.h> +#include <linux/phy.h> +#include <linux/platform_device.h> + +#define MAX_A38X_COMPHY 6 +#define MAX_A38X_PORTS 3 + +#define COMPHY_CFG1 0x00 +#define COMPHY_CFG1_GEN_TX(x) ((x) << 26) +#define COMPHY_CFG1_GEN_TX_MSK COMPHY_CFG1_GEN_TX(15) +#define COMPHY_CFG1_GEN_RX(x) ((x) << 22) +#define COMPHY_CFG1_GEN_RX_MSK COMPHY_CFG1_GEN_RX(15) +#define GEN_SGMII_1_25GBPS 6 +#define GEN_SGMII_3_125GBPS 8 + +#define COMPHY_STAT1 0x18 +#define COMPHY_STAT1_PLL_RDY_TX BIT(3) +#define COMPHY_STAT1_PLL_RDY_RX BIT(2) + +#define COMPHY_SELECTOR 0xfc + +struct a38x_comphy; + +struct a38x_comphy_lane { + void __iomem *base; + struct a38x_comphy *priv; + unsigned int n; + + int port; +}; + +struct a38x_comphy { + void __iomem *base; + struct device *dev; + struct a38x_comphy_lane lane[MAX_A38X_COMPHY]; +}; + +static const u8 gbe_mux[MAX_A38X_COMPHY][MAX_A38X_PORTS] = { + { 0, 0, 0 }, + { 4, 5, 0 }, + { 0, 4, 0 }, + { 0, 0, 4 }, + { 0, 3, 0 }, + { 0, 0, 3 }, +}; + +static void a38x_comphy_set_reg(struct a38x_comphy_lane *lane, + unsigned int offset, u32 mask, u32 value) +{ + u32 val; + + val = readl_relaxed(lane->base + offset) & ~mask; + writel(val | value, lane->base + offset); +} + +static void a38x_comphy_set_speed(struct a38x_comphy_lane *lane, + unsigned int gen_tx, unsigned int gen_rx) +{ + a38x_comphy_set_reg(lane, COMPHY_CFG1, + COMPHY_CFG1_GEN_TX_MSK | COMPHY_CFG1_GEN_RX_MSK, + COMPHY_CFG1_GEN_TX(gen_tx) | + COMPHY_CFG1_GEN_RX(gen_rx)); +} + +static int a38x_comphy_poll(struct a38x_comphy_lane *lane, + unsigned int offset, u32 mask, u32 value) +{ + u32 val; + int ret; + + ret = readl_relaxed_poll_timeout_atomic(lane->base + offset, val, + (val & mask) == value, + 1000, 150000); + + if (ret) + dev_err(lane->priv->dev, + "comphy%u: timed out waiting for status\n", lane->n); + + return ret; +} + +/* + * We only support changing the speed for comphys configured for GBE. + * Since that is all we do, we only poll for PLL ready status. + */ +static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub) +{ + struct a38x_comphy_lane *lane = phy_get_drvdata(phy); + unsigned int gen; + + if (mode != PHY_MODE_ETHERNET) + return -EINVAL; + + switch (sub) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + gen = GEN_SGMII_1_25GBPS; + break; + + case PHY_INTERFACE_MODE_2500BASEX: + gen = GEN_SGMII_3_125GBPS; + break; + + default: + return -EINVAL; + } + + a38x_comphy_set_speed(lane, gen, gen); + + return a38x_comphy_poll(lane, COMPHY_STAT1, + COMPHY_STAT1_PLL_RDY_TX | + COMPHY_STAT1_PLL_RDY_RX, + COMPHY_STAT1_PLL_RDY_TX | + COMPHY_STAT1_PLL_RDY_RX); +} + +static const struct phy_ops a38x_comphy_ops = { + .set_mode = a38x_comphy_set_mode, + .owner = THIS_MODULE, +}; + +static struct phy *a38x_comphy_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct a38x_comphy_lane *lane; + struct phy *phy; + u32 val; + + if (WARN_ON(args->args[0] >= MAX_A38X_PORTS)) + return ERR_PTR(-EINVAL); + + phy = of_phy_simple_xlate(dev, args); + if (IS_ERR(phy)) + return phy; + + lane = phy_get_drvdata(phy); + if (lane->port >= 0) + return ERR_PTR(-EBUSY); + + lane->port = args->args[0]; + + val = readl_relaxed(lane->priv->base + COMPHY_SELECTOR); + val = (val >> (4 * lane->n)) & 0xf; + + if (!gbe_mux[lane->n][lane->port] || + val != gbe_mux[lane->n][lane->port]) { + dev_warn(lane->priv->dev, + "comphy%u: not configured for GBE\n", lane->n); + phy = ERR_PTR(-EINVAL); + } + + return phy; +} + +static int a38x_comphy_probe(struct platform_device *pdev) +{ + struct phy_provider *provider; + struct device_node *child; + struct a38x_comphy *priv; + struct resource *res; + void __iomem *base; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + priv->dev = &pdev->dev; + priv->base = base; + + for_each_available_child_of_node(pdev->dev.of_node, child) { + struct phy *phy; + int ret; + u32 val; + + ret = of_property_read_u32(child, "reg", &val); + if (ret < 0) { + dev_err(&pdev->dev, "missing 'reg' property (%d)\n", + ret); + continue; + } + + if (val >= MAX_A38X_COMPHY || priv->lane[val].base) { + dev_err(&pdev->dev, "invalid 'reg' property\n"); + continue; + } + + phy = devm_phy_create(&pdev->dev, child, &a38x_comphy_ops); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + priv->lane[val].base = base + 0x28 * val; + priv->lane[val].priv = priv; + priv->lane[val].n = val; + priv->lane[val].port = -1; + phy_set_drvdata(phy, &priv->lane[val]); + } + + dev_set_drvdata(&pdev->dev, priv); + + provider = devm_of_phy_provider_register(&pdev->dev, a38x_comphy_xlate); + + return PTR_ERR_OR_ZERO(provider); +} + +static const struct of_device_id a38x_comphy_of_match_table[] = { + { .compatible = "marvell,armada-380-comphy" }, + { }, +}; +MODULE_DEVICE_TABLE(of, a38x_comphy_of_match_table); + +static struct platform_driver a38x_comphy_driver = { + .probe = a38x_comphy_probe, + .driver = { + .name = "armada-38x-comphy", + .of_match_table = a38x_comphy_of_match_table, + }, +}; +module_platform_driver(a38x_comphy_driver); + +MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>"); +MODULE_DESCRIPTION("Common PHY driver for Armada 38x SoCs"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 05044e323ea5..03ec7a5d9d0b 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1513,7 +1513,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1521,7 +1521,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1529,7 +1529,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1537,7 +1537,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, {} diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig index 1817786ab6aa..a005cbccb4f7 100644 --- a/drivers/pinctrl/mediatek/Kconfig +++ b/drivers/pinctrl/mediatek/Kconfig @@ -45,12 +45,14 @@ config PINCTRL_MT2701 config PINCTRL_MT7623 bool "Mediatek MT7623 pin control with generic binding" depends on MACH_MT7623 || COMPILE_TEST + depends on OF default MACH_MT7623 select PINCTRL_MTK_MOORE config PINCTRL_MT7629 bool "Mediatek MT7629 pin control" depends on MACH_MT7629 || COMPILE_TEST + depends on OF default MACH_MT7629 select PINCTRL_MTK_MOORE @@ -92,6 +94,7 @@ config PINCTRL_MT6797 config PINCTRL_MT7622 bool "MediaTek MT7622 pin control" + depends on OF depends on ARM64 || COMPILE_TEST default ARM64 && ARCH_MEDIATEK select PINCTRL_MTK_MOORE diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index b03481ef99a1..98905d4a79ca 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, break; case MCP_TYPE_S18: + one_regmap_config = + devm_kmemdup(dev, &mcp23x17_regmap, + sizeof(struct regmap_config), GFP_KERNEL); + if (!one_regmap_config) + return -ENOMEM; mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, - &mcp23x17_regmap); + one_regmap_config); mcp->reg_shift = 1; mcp->chip.ngpio = 16; mcp->chip.label = "mcp23s18"; diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c index aa8b58125568..ef4268cc6227 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c @@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 }; static const struct sunxi_pinctrl_desc h6_pinctrl_data = { .pins = h6_pins, .npins = ARRAY_SIZE(h6_pins), - .irq_banks = 3, + .irq_banks = 4, .irq_bank_map = h6_irq_bank_map, .irq_read_needs_mux = true, }; diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 5d9184d18c16..0e7fa69e93df 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -698,26 +698,24 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset) { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); unsigned short bank = offset / PINS_PER_BANK; - struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; - struct regulator *reg; + unsigned short bank_offset = bank - pctl->desc->pin_base / + PINS_PER_BANK; + struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; + struct regulator *reg = s_reg->regulator; + char supply[16]; int ret; - reg = s_reg->regulator; - if (!reg) { - char supply[16]; - - snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); - reg = regulator_get(pctl->dev, supply); - if (IS_ERR(reg)) { - dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", - 'A' + bank); - return PTR_ERR(reg); - } - - s_reg->regulator = reg; - refcount_set(&s_reg->refcount, 1); - } else { + if (reg) { refcount_inc(&s_reg->refcount); + return 0; + } + + snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); + reg = regulator_get(pctl->dev, supply); + if (IS_ERR(reg)) { + dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", + 'A' + bank); + return PTR_ERR(reg); } ret = regulator_enable(reg); @@ -727,13 +725,13 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset) goto out; } + s_reg->regulator = reg; + refcount_set(&s_reg->refcount, 1); + return 0; out: - if (refcount_dec_and_test(&s_reg->refcount)) { - regulator_put(s_reg->regulator); - s_reg->regulator = NULL; - } + regulator_put(s_reg->regulator); return ret; } @@ -742,7 +740,9 @@ static int sunxi_pmx_free(struct pinctrl_dev *pctldev, unsigned offset) { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); unsigned short bank = offset / PINS_PER_BANK; - struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; + unsigned short bank_offset = bank - pctl->desc->pin_base / + PINS_PER_BANK; + struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; if (!refcount_dec_and_test(&s_reg->refcount)) return 0; diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index e340d2a24b44..034c0317c8d6 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h @@ -136,7 +136,7 @@ struct sunxi_pinctrl { struct gpio_chip *chip; const struct sunxi_pinctrl_desc *desc; struct device *dev; - struct sunxi_pinctrl_regulator regulators[12]; + struct sunxi_pinctrl_regulator regulators[9]; struct irq_domain *domain; struct sunxi_pinctrl_function *functions; unsigned nfunctions; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 5e2109c54c7c..b5e9db85e881 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -905,6 +905,7 @@ config TOSHIBA_WMI config ACPI_CMPC tristate "CMPC Laptop Extras" depends on ACPI && INPUT + depends on BACKLIGHT_LCD_SUPPORT depends on RFKILL || RFKILL=n select BACKLIGHT_CLASS_DEVICE help @@ -1128,6 +1129,7 @@ config INTEL_OAKTRAIL config SAMSUNG_Q10 tristate "Samsung Q10 Extras" depends on ACPI + depends on BACKLIGHT_LCD_SUPPORT select BACKLIGHT_CLASS_DEVICE ---help--- This driver provides support for backlight control on Samsung Q10 diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index aeb4a8b2e0af..7fe18636915a 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -43,7 +43,7 @@ config PTP_1588_CLOCK_DTE config PTP_1588_CLOCK_QORIQ tristate "Freescale QorIQ 1588 timer as PTP clock" - depends on GIANFAR || FSL_DPAA_ETH + depends on GIANFAR || FSL_DPAA_ETH || FSL_ENETC || FSL_ENETC_VF depends on PTP_1588_CLOCK default y help diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c index 43416b2e8a13..53775362aac6 100644 --- a/drivers/ptp/ptp_qoriq.c +++ b/drivers/ptp/ptp_qoriq.c @@ -22,7 +22,6 @@ #include <linux/device.h> #include <linux/hrtimer.h> -#include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> @@ -37,61 +36,61 @@ * Register access functions */ -/* Caller must hold qoriq_ptp->lock. */ -static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp) +/* Caller must hold ptp_qoriq->lock. */ +static u64 tmr_cnt_read(struct ptp_qoriq *ptp_qoriq) { - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; u64 ns; u32 lo, hi; - lo = qoriq_read(®s->ctrl_regs->tmr_cnt_l); - hi = qoriq_read(®s->ctrl_regs->tmr_cnt_h); + lo = ptp_qoriq->read(®s->ctrl_regs->tmr_cnt_l); + hi = ptp_qoriq->read(®s->ctrl_regs->tmr_cnt_h); ns = ((u64) hi) << 32; ns |= lo; return ns; } -/* Caller must hold qoriq_ptp->lock. */ -static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns) +/* Caller must hold ptp_qoriq->lock. */ +static void tmr_cnt_write(struct ptp_qoriq *ptp_qoriq, u64 ns) { - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; u32 hi = ns >> 32; u32 lo = ns & 0xffffffff; - qoriq_write(®s->ctrl_regs->tmr_cnt_l, lo); - qoriq_write(®s->ctrl_regs->tmr_cnt_h, hi); + ptp_qoriq->write(®s->ctrl_regs->tmr_cnt_l, lo); + ptp_qoriq->write(®s->ctrl_regs->tmr_cnt_h, hi); } -/* Caller must hold qoriq_ptp->lock. */ -static void set_alarm(struct qoriq_ptp *qoriq_ptp) +/* Caller must hold ptp_qoriq->lock. */ +static void set_alarm(struct ptp_qoriq *ptp_qoriq) { - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; u64 ns; u32 lo, hi; - ns = tmr_cnt_read(qoriq_ptp) + 1500000000ULL; + ns = tmr_cnt_read(ptp_qoriq) + 1500000000ULL; ns = div_u64(ns, 1000000000UL) * 1000000000ULL; - ns -= qoriq_ptp->tclk_period; + ns -= ptp_qoriq->tclk_period; hi = ns >> 32; lo = ns & 0xffffffff; - qoriq_write(®s->alarm_regs->tmr_alarm1_l, lo); - qoriq_write(®s->alarm_regs->tmr_alarm1_h, hi); + ptp_qoriq->write(®s->alarm_regs->tmr_alarm1_l, lo); + ptp_qoriq->write(®s->alarm_regs->tmr_alarm1_h, hi); } -/* Caller must hold qoriq_ptp->lock. */ -static void set_fipers(struct qoriq_ptp *qoriq_ptp) +/* Caller must hold ptp_qoriq->lock. */ +static void set_fipers(struct ptp_qoriq *ptp_qoriq) { - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; - set_alarm(qoriq_ptp); - qoriq_write(®s->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); - qoriq_write(®s->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); + set_alarm(ptp_qoriq); + ptp_qoriq->write(®s->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1); + ptp_qoriq->write(®s->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2); } -static int extts_clean_up(struct qoriq_ptp *qoriq_ptp, int index, +static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event) { - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; struct ptp_clock_event event; void __iomem *reg_etts_l; void __iomem *reg_etts_h; @@ -116,17 +115,17 @@ static int extts_clean_up(struct qoriq_ptp *qoriq_ptp, int index, event.index = index; do { - lo = qoriq_read(reg_etts_l); - hi = qoriq_read(reg_etts_h); + lo = ptp_qoriq->read(reg_etts_l); + hi = ptp_qoriq->read(reg_etts_h); if (update_event) { event.timestamp = ((u64) hi) << 32; event.timestamp |= lo; - ptp_clock_event(qoriq_ptp->clock, &event); + ptp_clock_event(ptp_qoriq->clock, &event); } - stat = qoriq_read(®s->ctrl_regs->tmr_stat); - } while (qoriq_ptp->extts_fifo_support && (stat & valid)); + stat = ptp_qoriq->read(®s->ctrl_regs->tmr_stat); + } while (ptp_qoriq->extts_fifo_support && (stat & valid)); return 0; } @@ -135,89 +134,90 @@ static int extts_clean_up(struct qoriq_ptp *qoriq_ptp, int index, * Interrupt service routine */ -static irqreturn_t isr(int irq, void *priv) +irqreturn_t ptp_qoriq_isr(int irq, void *priv) { - struct qoriq_ptp *qoriq_ptp = priv; - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq *ptp_qoriq = priv; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; struct ptp_clock_event event; u64 ns; u32 ack = 0, lo, hi, mask, val, irqs; - spin_lock(&qoriq_ptp->lock); + spin_lock(&ptp_qoriq->lock); - val = qoriq_read(®s->ctrl_regs->tmr_tevent); - mask = qoriq_read(®s->ctrl_regs->tmr_temask); + val = ptp_qoriq->read(®s->ctrl_regs->tmr_tevent); + mask = ptp_qoriq->read(®s->ctrl_regs->tmr_temask); - spin_unlock(&qoriq_ptp->lock); + spin_unlock(&ptp_qoriq->lock); irqs = val & mask; if (irqs & ETS1) { ack |= ETS1; - extts_clean_up(qoriq_ptp, 0, true); + extts_clean_up(ptp_qoriq, 0, true); } if (irqs & ETS2) { ack |= ETS2; - extts_clean_up(qoriq_ptp, 1, true); + extts_clean_up(ptp_qoriq, 1, true); } if (irqs & ALM2) { ack |= ALM2; - if (qoriq_ptp->alarm_value) { + if (ptp_qoriq->alarm_value) { event.type = PTP_CLOCK_ALARM; event.index = 0; - event.timestamp = qoriq_ptp->alarm_value; - ptp_clock_event(qoriq_ptp->clock, &event); + event.timestamp = ptp_qoriq->alarm_value; + ptp_clock_event(ptp_qoriq->clock, &event); } - if (qoriq_ptp->alarm_interval) { - ns = qoriq_ptp->alarm_value + qoriq_ptp->alarm_interval; + if (ptp_qoriq->alarm_interval) { + ns = ptp_qoriq->alarm_value + ptp_qoriq->alarm_interval; hi = ns >> 32; lo = ns & 0xffffffff; - qoriq_write(®s->alarm_regs->tmr_alarm2_l, lo); - qoriq_write(®s->alarm_regs->tmr_alarm2_h, hi); - qoriq_ptp->alarm_value = ns; + ptp_qoriq->write(®s->alarm_regs->tmr_alarm2_l, lo); + ptp_qoriq->write(®s->alarm_regs->tmr_alarm2_h, hi); + ptp_qoriq->alarm_value = ns; } else { - spin_lock(&qoriq_ptp->lock); - mask = qoriq_read(®s->ctrl_regs->tmr_temask); + spin_lock(&ptp_qoriq->lock); + mask = ptp_qoriq->read(®s->ctrl_regs->tmr_temask); mask &= ~ALM2EN; - qoriq_write(®s->ctrl_regs->tmr_temask, mask); - spin_unlock(&qoriq_ptp->lock); - qoriq_ptp->alarm_value = 0; - qoriq_ptp->alarm_interval = 0; + ptp_qoriq->write(®s->ctrl_regs->tmr_temask, mask); + spin_unlock(&ptp_qoriq->lock); + ptp_qoriq->alarm_value = 0; + ptp_qoriq->alarm_interval = 0; } } if (irqs & PP1) { ack |= PP1; event.type = PTP_CLOCK_PPS; - ptp_clock_event(qoriq_ptp->clock, &event); + ptp_clock_event(ptp_qoriq->clock, &event); } if (ack) { - qoriq_write(®s->ctrl_regs->tmr_tevent, ack); + ptp_qoriq->write(®s->ctrl_regs->tmr_tevent, ack); return IRQ_HANDLED; } else return IRQ_NONE; } +EXPORT_SYMBOL_GPL(ptp_qoriq_isr); /* * PTP clock operations */ -static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { u64 adj, diff; u32 tmr_add; int neg_adj = 0; - struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps); + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; if (scaled_ppm < 0) { neg_adj = 1; scaled_ppm = -scaled_ppm; } - tmr_add = qoriq_ptp->tmr_add; + tmr_add = ptp_qoriq->tmr_add; adj = tmr_add; /* calculate diff as adj*(scaled_ppm/65536)/1000000 @@ -229,71 +229,74 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; - qoriq_write(®s->ctrl_regs->tmr_add, tmr_add); + ptp_qoriq->write(®s->ctrl_regs->tmr_add, tmr_add); return 0; } +EXPORT_SYMBOL_GPL(ptp_qoriq_adjfine); -static int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta) +int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta) { s64 now; unsigned long flags; - struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps); - spin_lock_irqsave(&qoriq_ptp->lock, flags); + spin_lock_irqsave(&ptp_qoriq->lock, flags); - now = tmr_cnt_read(qoriq_ptp); + now = tmr_cnt_read(ptp_qoriq); now += delta; - tmr_cnt_write(qoriq_ptp, now); - set_fipers(qoriq_ptp); + tmr_cnt_write(ptp_qoriq, now); + set_fipers(ptp_qoriq); - spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + spin_unlock_irqrestore(&ptp_qoriq->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(ptp_qoriq_adjtime); -static int ptp_qoriq_gettime(struct ptp_clock_info *ptp, - struct timespec64 *ts) +int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { u64 ns; unsigned long flags; - struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps); - spin_lock_irqsave(&qoriq_ptp->lock, flags); + spin_lock_irqsave(&ptp_qoriq->lock, flags); - ns = tmr_cnt_read(qoriq_ptp); + ns = tmr_cnt_read(ptp_qoriq); - spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + spin_unlock_irqrestore(&ptp_qoriq->lock, flags); *ts = ns_to_timespec64(ns); return 0; } +EXPORT_SYMBOL_GPL(ptp_qoriq_gettime); -static int ptp_qoriq_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) +int ptp_qoriq_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) { u64 ns; unsigned long flags; - struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps); ns = timespec64_to_ns(ts); - spin_lock_irqsave(&qoriq_ptp->lock, flags); + spin_lock_irqsave(&ptp_qoriq->lock, flags); - tmr_cnt_write(qoriq_ptp, ns); - set_fipers(qoriq_ptp); + tmr_cnt_write(ptp_qoriq, ns); + set_fipers(ptp_qoriq); - spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + spin_unlock_irqrestore(&ptp_qoriq->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(ptp_qoriq_settime); -static int ptp_qoriq_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) +int ptp_qoriq_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) { - struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps); + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; unsigned long flags; u32 bit, mask = 0; @@ -311,7 +314,7 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp, } if (on) - extts_clean_up(qoriq_ptp, rq->extts.index, false); + extts_clean_up(ptp_qoriq, rq->extts.index, false); break; case PTP_CLK_REQ_PPS: @@ -321,21 +324,22 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } - spin_lock_irqsave(&qoriq_ptp->lock, flags); + spin_lock_irqsave(&ptp_qoriq->lock, flags); - mask = qoriq_read(®s->ctrl_regs->tmr_temask); + mask = ptp_qoriq->read(®s->ctrl_regs->tmr_temask); if (on) { mask |= bit; - qoriq_write(®s->ctrl_regs->tmr_tevent, bit); + ptp_qoriq->write(®s->ctrl_regs->tmr_tevent, bit); } else { mask &= ~bit; } - qoriq_write(®s->ctrl_regs->tmr_temask, mask); + ptp_qoriq->write(®s->ctrl_regs->tmr_temask, mask); - spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + spin_unlock_irqrestore(&ptp_qoriq->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(ptp_qoriq_enable); static const struct ptp_clock_info ptp_qoriq_caps = { .owner = THIS_MODULE, @@ -354,7 +358,7 @@ static const struct ptp_clock_info ptp_qoriq_caps = { }; /** - * qoriq_ptp_nominal_freq - calculate nominal frequency according to + * ptp_qoriq_nominal_freq - calculate nominal frequency according to * reference clock frequency * * @clk_src: reference clock frequency @@ -365,7 +369,7 @@ static const struct ptp_clock_info ptp_qoriq_caps = { * * Return the nominal frequency */ -static u32 qoriq_ptp_nominal_freq(u32 clk_src) +static u32 ptp_qoriq_nominal_freq(u32 clk_src) { u32 remainder = 0; @@ -385,9 +389,9 @@ static u32 qoriq_ptp_nominal_freq(u32 clk_src) } /** - * qoriq_ptp_auto_config - calculate a set of default configurations + * ptp_qoriq_auto_config - calculate a set of default configurations * - * @qoriq_ptp: pointer to qoriq_ptp + * @ptp_qoriq: pointer to ptp_qoriq * @node: pointer to device_node * * If below dts properties are not provided, this function will be @@ -401,7 +405,7 @@ static u32 qoriq_ptp_nominal_freq(u32 clk_src) * * Return 0 if success */ -static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, +static int ptp_qoriq_auto_config(struct ptp_qoriq *ptp_qoriq, struct device_node *node) { struct clk *clk; @@ -411,7 +415,7 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, u32 remainder = 0; u32 clk_src = 0; - qoriq_ptp->cksel = DEFAULT_CKSEL; + ptp_qoriq->cksel = DEFAULT_CKSEL; clk = of_clk_get(node, 0); if (!IS_ERR(clk)) { @@ -424,12 +428,12 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, return -EINVAL; } - nominal_freq = qoriq_ptp_nominal_freq(clk_src); + nominal_freq = ptp_qoriq_nominal_freq(clk_src); if (!nominal_freq) return -EINVAL; - qoriq_ptp->tclk_period = 1000000000UL / nominal_freq; - qoriq_ptp->tmr_prsc = DEFAULT_TMR_PRSC; + ptp_qoriq->tclk_period = 1000000000UL / nominal_freq; + ptp_qoriq->tmr_prsc = DEFAULT_TMR_PRSC; /* Calculate initial frequency compensation value for TMR_ADD register. * freq_comp = ceil(2^32 / freq_ratio) @@ -440,172 +444,193 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, if (remainder) freq_comp++; - qoriq_ptp->tmr_add = freq_comp; - qoriq_ptp->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - qoriq_ptp->tclk_period; - qoriq_ptp->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - qoriq_ptp->tclk_period; + ptp_qoriq->tmr_add = freq_comp; + ptp_qoriq->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - ptp_qoriq->tclk_period; + ptp_qoriq->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - ptp_qoriq->tclk_period; /* max_adj = 1000000000 * (freq_ratio - 1.0) - 1 * freq_ratio = reference_clock_freq / nominal_freq */ max_adj = 1000000000ULL * (clk_src - nominal_freq); max_adj = div_u64(max_adj, nominal_freq) - 1; - qoriq_ptp->caps.max_adj = max_adj; + ptp_qoriq->caps.max_adj = max_adj; return 0; } -static int qoriq_ptp_probe(struct platform_device *dev) +int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base, + const struct ptp_clock_info *caps) { - struct device_node *node = dev->dev.of_node; - struct qoriq_ptp *qoriq_ptp; - struct qoriq_ptp_registers *regs; + struct device_node *node = ptp_qoriq->dev->of_node; + struct ptp_qoriq_registers *regs; struct timespec64 now; - int err = -ENOMEM; - u32 tmr_ctrl; unsigned long flags; - void __iomem *base; - - qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL); - if (!qoriq_ptp) - goto no_memory; - - err = -EINVAL; + u32 tmr_ctrl; - qoriq_ptp->dev = &dev->dev; - qoriq_ptp->caps = ptp_qoriq_caps; + ptp_qoriq->base = base; + ptp_qoriq->caps = *caps; - if (of_property_read_u32(node, "fsl,cksel", &qoriq_ptp->cksel)) - qoriq_ptp->cksel = DEFAULT_CKSEL; + if (of_property_read_u32(node, "fsl,cksel", &ptp_qoriq->cksel)) + ptp_qoriq->cksel = DEFAULT_CKSEL; if (of_property_read_bool(node, "fsl,extts-fifo")) - qoriq_ptp->extts_fifo_support = true; + ptp_qoriq->extts_fifo_support = true; else - qoriq_ptp->extts_fifo_support = false; + ptp_qoriq->extts_fifo_support = false; if (of_property_read_u32(node, - "fsl,tclk-period", &qoriq_ptp->tclk_period) || + "fsl,tclk-period", &ptp_qoriq->tclk_period) || of_property_read_u32(node, - "fsl,tmr-prsc", &qoriq_ptp->tmr_prsc) || + "fsl,tmr-prsc", &ptp_qoriq->tmr_prsc) || of_property_read_u32(node, - "fsl,tmr-add", &qoriq_ptp->tmr_add) || + "fsl,tmr-add", &ptp_qoriq->tmr_add) || of_property_read_u32(node, - "fsl,tmr-fiper1", &qoriq_ptp->tmr_fiper1) || + "fsl,tmr-fiper1", &ptp_qoriq->tmr_fiper1) || of_property_read_u32(node, - "fsl,tmr-fiper2", &qoriq_ptp->tmr_fiper2) || + "fsl,tmr-fiper2", &ptp_qoriq->tmr_fiper2) || of_property_read_u32(node, - "fsl,max-adj", &qoriq_ptp->caps.max_adj)) { + "fsl,max-adj", &ptp_qoriq->caps.max_adj)) { pr_warn("device tree node missing required elements, try automatic configuration\n"); - if (qoriq_ptp_auto_config(qoriq_ptp, node)) - goto no_config; + if (ptp_qoriq_auto_config(ptp_qoriq, node)) + return -ENODEV; } - err = -ENODEV; + if (of_property_read_bool(node, "little-endian")) { + ptp_qoriq->read = qoriq_read_le; + ptp_qoriq->write = qoriq_write_le; + } else { + ptp_qoriq->read = qoriq_read_be; + ptp_qoriq->write = qoriq_write_be; + } + + /* The eTSEC uses differnt memory map with DPAA/ENETC */ + if (of_device_is_compatible(node, "fsl,etsec-ptp")) { + ptp_qoriq->regs.ctrl_regs = base + ETSEC_CTRL_REGS_OFFSET; + ptp_qoriq->regs.alarm_regs = base + ETSEC_ALARM_REGS_OFFSET; + ptp_qoriq->regs.fiper_regs = base + ETSEC_FIPER_REGS_OFFSET; + ptp_qoriq->regs.etts_regs = base + ETSEC_ETTS_REGS_OFFSET; + } else { + ptp_qoriq->regs.ctrl_regs = base + CTRL_REGS_OFFSET; + ptp_qoriq->regs.alarm_regs = base + ALARM_REGS_OFFSET; + ptp_qoriq->regs.fiper_regs = base + FIPER_REGS_OFFSET; + ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET; + } + + ktime_get_real_ts64(&now); + ptp_qoriq_settime(&ptp_qoriq->caps, &now); + + tmr_ctrl = + (ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | + (ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT; + + spin_lock_init(&ptp_qoriq->lock); + spin_lock_irqsave(&ptp_qoriq->lock, flags); + + regs = &ptp_qoriq->regs; + ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl); + ptp_qoriq->write(®s->ctrl_regs->tmr_add, ptp_qoriq->tmr_add); + ptp_qoriq->write(®s->ctrl_regs->tmr_prsc, ptp_qoriq->tmr_prsc); + ptp_qoriq->write(®s->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1); + ptp_qoriq->write(®s->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2); + set_alarm(ptp_qoriq); + ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, + tmr_ctrl|FIPERST|RTPE|TE|FRD); + + spin_unlock_irqrestore(&ptp_qoriq->lock, flags); + + ptp_qoriq->clock = ptp_clock_register(&ptp_qoriq->caps, ptp_qoriq->dev); + if (IS_ERR(ptp_qoriq->clock)) + return PTR_ERR(ptp_qoriq->clock); + + ptp_qoriq->phc_index = ptp_clock_index(ptp_qoriq->clock); + ptp_qoriq_create_debugfs(ptp_qoriq); + return 0; +} +EXPORT_SYMBOL_GPL(ptp_qoriq_init); + +void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq) +{ + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; + + ptp_qoriq->write(®s->ctrl_regs->tmr_temask, 0); + ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, 0); + + ptp_qoriq_remove_debugfs(ptp_qoriq); + ptp_clock_unregister(ptp_qoriq->clock); + iounmap(ptp_qoriq->base); + free_irq(ptp_qoriq->irq, ptp_qoriq); +} +EXPORT_SYMBOL_GPL(ptp_qoriq_free); - qoriq_ptp->irq = platform_get_irq(dev, 0); +static int ptp_qoriq_probe(struct platform_device *dev) +{ + struct ptp_qoriq *ptp_qoriq; + int err = -ENOMEM; + void __iomem *base; + + ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL); + if (!ptp_qoriq) + goto no_memory; + + ptp_qoriq->dev = &dev->dev; - if (qoriq_ptp->irq < 0) { + err = -ENODEV; + + ptp_qoriq->irq = platform_get_irq(dev, 0); + if (ptp_qoriq->irq < 0) { pr_err("irq not in device tree\n"); goto no_node; } - if (request_irq(qoriq_ptp->irq, isr, IRQF_SHARED, DRIVER, qoriq_ptp)) { + if (request_irq(ptp_qoriq->irq, ptp_qoriq_isr, IRQF_SHARED, + DRIVER, ptp_qoriq)) { pr_err("request_irq failed\n"); goto no_node; } - qoriq_ptp->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!qoriq_ptp->rsrc) { + ptp_qoriq->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!ptp_qoriq->rsrc) { pr_err("no resource\n"); goto no_resource; } - if (request_resource(&iomem_resource, qoriq_ptp->rsrc)) { + if (request_resource(&iomem_resource, ptp_qoriq->rsrc)) { pr_err("resource busy\n"); goto no_resource; } - spin_lock_init(&qoriq_ptp->lock); - - base = ioremap(qoriq_ptp->rsrc->start, - resource_size(qoriq_ptp->rsrc)); + base = ioremap(ptp_qoriq->rsrc->start, + resource_size(ptp_qoriq->rsrc)); if (!base) { pr_err("ioremap ptp registers failed\n"); goto no_ioremap; } - qoriq_ptp->base = base; - - if (of_device_is_compatible(node, "fsl,fman-ptp-timer")) { - qoriq_ptp->regs.ctrl_regs = base + FMAN_CTRL_REGS_OFFSET; - qoriq_ptp->regs.alarm_regs = base + FMAN_ALARM_REGS_OFFSET; - qoriq_ptp->regs.fiper_regs = base + FMAN_FIPER_REGS_OFFSET; - qoriq_ptp->regs.etts_regs = base + FMAN_ETTS_REGS_OFFSET; - } else { - qoriq_ptp->regs.ctrl_regs = base + CTRL_REGS_OFFSET; - qoriq_ptp->regs.alarm_regs = base + ALARM_REGS_OFFSET; - qoriq_ptp->regs.fiper_regs = base + FIPER_REGS_OFFSET; - qoriq_ptp->regs.etts_regs = base + ETTS_REGS_OFFSET; - } - - ktime_get_real_ts64(&now); - ptp_qoriq_settime(&qoriq_ptp->caps, &now); - - tmr_ctrl = - (qoriq_ptp->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | - (qoriq_ptp->cksel & CKSEL_MASK) << CKSEL_SHIFT; - - spin_lock_irqsave(&qoriq_ptp->lock, flags); - - regs = &qoriq_ptp->regs; - qoriq_write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl); - qoriq_write(®s->ctrl_regs->tmr_add, qoriq_ptp->tmr_add); - qoriq_write(®s->ctrl_regs->tmr_prsc, qoriq_ptp->tmr_prsc); - qoriq_write(®s->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); - qoriq_write(®s->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); - set_alarm(qoriq_ptp); - qoriq_write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD); - - spin_unlock_irqrestore(&qoriq_ptp->lock, flags); - - qoriq_ptp->clock = ptp_clock_register(&qoriq_ptp->caps, &dev->dev); - if (IS_ERR(qoriq_ptp->clock)) { - err = PTR_ERR(qoriq_ptp->clock); + err = ptp_qoriq_init(ptp_qoriq, base, &ptp_qoriq_caps); + if (err) goto no_clock; - } - qoriq_ptp->phc_index = ptp_clock_index(qoriq_ptp->clock); - - ptp_qoriq_create_debugfs(qoriq_ptp); - platform_set_drvdata(dev, qoriq_ptp); + platform_set_drvdata(dev, ptp_qoriq); return 0; no_clock: - iounmap(qoriq_ptp->base); + iounmap(ptp_qoriq->base); no_ioremap: - release_resource(qoriq_ptp->rsrc); + release_resource(ptp_qoriq->rsrc); no_resource: - free_irq(qoriq_ptp->irq, qoriq_ptp); -no_config: + free_irq(ptp_qoriq->irq, ptp_qoriq); no_node: - kfree(qoriq_ptp); + kfree(ptp_qoriq); no_memory: return err; } -static int qoriq_ptp_remove(struct platform_device *dev) +static int ptp_qoriq_remove(struct platform_device *dev) { - struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev); - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; - - qoriq_write(®s->ctrl_regs->tmr_temask, 0); - qoriq_write(®s->ctrl_regs->tmr_ctrl, 0); - - ptp_qoriq_remove_debugfs(qoriq_ptp); - ptp_clock_unregister(qoriq_ptp->clock); - iounmap(qoriq_ptp->base); - release_resource(qoriq_ptp->rsrc); - free_irq(qoriq_ptp->irq, qoriq_ptp); - kfree(qoriq_ptp); + struct ptp_qoriq *ptp_qoriq = platform_get_drvdata(dev); + ptp_qoriq_free(ptp_qoriq); + release_resource(ptp_qoriq->rsrc); + kfree(ptp_qoriq); return 0; } @@ -616,16 +641,16 @@ static const struct of_device_id match_table[] = { }; MODULE_DEVICE_TABLE(of, match_table); -static struct platform_driver qoriq_ptp_driver = { +static struct platform_driver ptp_qoriq_driver = { .driver = { .name = "ptp_qoriq", .of_match_table = match_table, }, - .probe = qoriq_ptp_probe, - .remove = qoriq_ptp_remove, + .probe = ptp_qoriq_probe, + .remove = ptp_qoriq_remove, }; -module_platform_driver(qoriq_ptp_driver); +module_platform_driver(ptp_qoriq_driver); MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); MODULE_DESCRIPTION("PTP clock for Freescale QorIQ 1588 timer"); diff --git a/drivers/ptp/ptp_qoriq_debugfs.c b/drivers/ptp/ptp_qoriq_debugfs.c index 970595021088..e8dddcedf288 100644 --- a/drivers/ptp/ptp_qoriq_debugfs.c +++ b/drivers/ptp/ptp_qoriq_debugfs.c @@ -7,11 +7,11 @@ static int ptp_qoriq_fiper1_lpbk_get(void *data, u64 *val) { - struct qoriq_ptp *qoriq_ptp = data; - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq *ptp_qoriq = data; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; u32 ctrl; - ctrl = qoriq_read(®s->ctrl_regs->tmr_ctrl); + ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl); *val = ctrl & PP1L ? 1 : 0; return 0; @@ -19,17 +19,17 @@ static int ptp_qoriq_fiper1_lpbk_get(void *data, u64 *val) static int ptp_qoriq_fiper1_lpbk_set(void *data, u64 val) { - struct qoriq_ptp *qoriq_ptp = data; - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq *ptp_qoriq = data; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; u32 ctrl; - ctrl = qoriq_read(®s->ctrl_regs->tmr_ctrl); + ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl); if (val == 0) ctrl &= ~PP1L; else ctrl |= PP1L; - qoriq_write(®s->ctrl_regs->tmr_ctrl, ctrl); + ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, ctrl); return 0; } @@ -38,11 +38,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper1_fops, ptp_qoriq_fiper1_lpbk_get, static int ptp_qoriq_fiper2_lpbk_get(void *data, u64 *val) { - struct qoriq_ptp *qoriq_ptp = data; - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq *ptp_qoriq = data; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; u32 ctrl; - ctrl = qoriq_read(®s->ctrl_regs->tmr_ctrl); + ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl); *val = ctrl & PP2L ? 1 : 0; return 0; @@ -50,52 +50,52 @@ static int ptp_qoriq_fiper2_lpbk_get(void *data, u64 *val) static int ptp_qoriq_fiper2_lpbk_set(void *data, u64 val) { - struct qoriq_ptp *qoriq_ptp = data; - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq *ptp_qoriq = data; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; u32 ctrl; - ctrl = qoriq_read(®s->ctrl_regs->tmr_ctrl); + ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl); if (val == 0) ctrl &= ~PP2L; else ctrl |= PP2L; - qoriq_write(®s->ctrl_regs->tmr_ctrl, ctrl); + ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, ctrl); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper2_fops, ptp_qoriq_fiper2_lpbk_get, ptp_qoriq_fiper2_lpbk_set, "%llu\n"); -void ptp_qoriq_create_debugfs(struct qoriq_ptp *qoriq_ptp) +void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq) { struct dentry *root; - root = debugfs_create_dir(dev_name(qoriq_ptp->dev), NULL); + root = debugfs_create_dir(dev_name(ptp_qoriq->dev), NULL); if (IS_ERR(root)) return; if (!root) goto err_root; - qoriq_ptp->debugfs_root = root; + ptp_qoriq->debugfs_root = root; if (!debugfs_create_file_unsafe("fiper1-loopback", 0600, root, - qoriq_ptp, &ptp_qoriq_fiper1_fops)) + ptp_qoriq, &ptp_qoriq_fiper1_fops)) goto err_node; if (!debugfs_create_file_unsafe("fiper2-loopback", 0600, root, - qoriq_ptp, &ptp_qoriq_fiper2_fops)) + ptp_qoriq, &ptp_qoriq_fiper2_fops)) goto err_node; return; err_node: debugfs_remove_recursive(root); - qoriq_ptp->debugfs_root = NULL; + ptp_qoriq->debugfs_root = NULL; err_root: - dev_err(qoriq_ptp->dev, "failed to initialize debugfs\n"); + dev_err(ptp_qoriq->dev, "failed to initialize debugfs\n"); } -void ptp_qoriq_remove_debugfs(struct qoriq_ptp *qoriq_ptp) +void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq) { - debugfs_remove_recursive(qoriq_ptp->debugfs_root); - qoriq_ptp->debugfs_root = NULL; + debugfs_remove_recursive(ptp_qoriq->debugfs_root); + ptp_qoriq->debugfs_root = NULL; } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 4e7b55a14b1a..6e294b4d3635 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) usrparm.psf_data &= 0x7fffffffULL; usrparm.rssd_result &= 0x7fffffffULL; } + /* at least 2 bytes are accessed and should be allocated */ + if (usrparm.psf_data_len < 2) { + DBF_DEV_EVENT(DBF_WARNING, device, + "Symmetrix ioctl invalid data length %d", + usrparm.psf_data_len); + rc = -EINVAL; + goto out; + } /* alloc I/O data area */ psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 48ea0004a56d..5a699746c357 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr) static inline int ap_test_config_card_id(unsigned int id) { if (!ap_configuration) /* QCI not supported */ - return 1; + /* only ids 0...3F may be probed */ + return id < 0x40 ? 1 : 0; return ap_test_config(ap_configuration->apm, id); } diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index f2d6bbe57a6f..bc55ec316adb 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o obj-$(CONFIG_SMSGIUCV) += smsgiucv.o obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o obj-$(CONFIG_LCS) += lcs.o -qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o +qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_ethtool.o obj-$(CONFIG_QETH) += qeth.o qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o obj-$(CONFIG_QETH_L2) += qeth_l2.o diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index d65650ef6b41..c0c46be0b251 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -18,10 +18,10 @@ #include <linux/in6.h> #include <linux/bitops.h> #include <linux/seq_file.h> -#include <linux/ethtool.h> #include <linux/hashtable.h> #include <linux/ip.h> #include <linux/refcount.h> +#include <linux/workqueue.h> #include <net/ipv6.h> #include <net/if_inet6.h> @@ -34,6 +34,8 @@ #include <asm/ccwgroup.h> #include <asm/sysinfo.h> +#include <uapi/linux/if_link.h> + #include "qeth_core_mpc.h" /** @@ -112,59 +114,6 @@ static inline u32 qeth_get_device_id(struct ccw_device *cdev) #define CCW_DEVID(cdev) (qeth_get_device_id(cdev)) #define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card))) -/** - * card stuff - */ -struct qeth_perf_stats { - unsigned int bufs_rec; - unsigned int bufs_sent; - unsigned int buf_elements_sent; - - unsigned int skbs_sent_pack; - unsigned int bufs_sent_pack; - - unsigned int sc_dp_p; - unsigned int sc_p_dp; - /* qdio_cq_handler: number of times called, time spent in */ - __u64 cq_start_time; - unsigned int cq_cnt; - unsigned int cq_time; - /* qdio_input_handler: number of times called, time spent in */ - __u64 inbound_start_time; - unsigned int inbound_cnt; - unsigned int inbound_time; - /* qeth_send_packet: number of times called, time spent in */ - __u64 outbound_start_time; - unsigned int outbound_cnt; - unsigned int outbound_time; - /* qdio_output_handler: number of times called, time spent in */ - __u64 outbound_handler_start_time; - unsigned int outbound_handler_cnt; - unsigned int outbound_handler_time; - /* number of calls to and time spent in do_QDIO for inbound queue */ - __u64 inbound_do_qdio_start_time; - unsigned int inbound_do_qdio_cnt; - unsigned int inbound_do_qdio_time; - /* number of calls to and time spent in do_QDIO for outbound queues */ - __u64 outbound_do_qdio_start_time; - unsigned int outbound_do_qdio_cnt; - unsigned int outbound_do_qdio_time; - unsigned int large_send_bytes; - unsigned int large_send_cnt; - unsigned int sg_skbs_sent; - /* initial values when measuring starts */ - unsigned long initial_rx_packets; - unsigned long initial_tx_packets; - /* inbound scatter gather data */ - unsigned int sg_skbs_rx; - unsigned int sg_frags_rx; - unsigned int sg_alloc_page_rx; - unsigned int tx_csum; - unsigned int tx_lin; - unsigned int tx_linfail; - unsigned int rx_csum; -}; - /* Routing stuff */ struct qeth_routing_info { enum qeth_routing_types type; @@ -494,10 +443,54 @@ enum qeth_out_q_states { QETH_OUT_Q_LOCKED_FLUSH, }; +#define QETH_CARD_STAT_ADD(_c, _stat, _val) ((_c)->stats._stat += (_val)) +#define QETH_CARD_STAT_INC(_c, _stat) QETH_CARD_STAT_ADD(_c, _stat, 1) + +#define QETH_TXQ_STAT_ADD(_q, _stat, _val) ((_q)->stats._stat += (_val)) +#define QETH_TXQ_STAT_INC(_q, _stat) QETH_TXQ_STAT_ADD(_q, _stat, 1) + +struct qeth_card_stats { + u64 rx_bufs; + u64 rx_skb_csum; + u64 rx_sg_skbs; + u64 rx_sg_frags; + u64 rx_sg_alloc_page; + + /* rtnl_link_stats64 */ + u64 rx_packets; + u64 rx_bytes; + u64 rx_errors; + u64 rx_dropped; + u64 rx_multicast; + u64 tx_errors; +}; + +struct qeth_out_q_stats { + u64 bufs; + u64 bufs_pack; + u64 buf_elements; + u64 skbs_pack; + u64 skbs_sg; + u64 skbs_csum; + u64 skbs_tso; + u64 skbs_linearized; + u64 skbs_linearized_fail; + u64 tso_bytes; + u64 packing_mode_switch; + + /* rtnl_link_stats64 */ + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; + u64 tx_dropped; + u64 tx_carrier_errors; +}; + struct qeth_qdio_out_q { struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qdio_outbuf_state *bufstates; /* convenience pointer */ + struct qeth_out_q_stats stats; int queue_no; struct qeth_card *card; atomic_t state; @@ -527,7 +520,7 @@ struct qeth_qdio_info { /* output */ int no_out_queues; - struct qeth_qdio_out_q **out_qs; + struct qeth_qdio_out_q *out_qs[QETH_MAX_QUEUES]; struct qdio_outbuf_state *out_bufstates; /* priority queueing */ @@ -594,8 +587,8 @@ struct qeth_channel; struct qeth_cmd_buffer { enum qeth_cmd_buffer_state state; struct qeth_channel *channel; + struct qeth_reply *reply; unsigned char *data; - int rc; void (*callback)(struct qeth_card *card, struct qeth_channel *channel, struct qeth_cmd_buffer *iob); }; @@ -701,7 +694,6 @@ struct qeth_card_options { struct qeth_vnicc_info vnicc; /* VNICC options */ int fake_broadcast; enum qeth_discipline_id layer; - int performance_stats; int rx_sg_cb; enum qeth_ipa_isolation_modes isolation; enum qeth_ipa_isolation_modes prev_isolation; @@ -777,13 +769,13 @@ struct qeth_card { struct qeth_channel data; struct net_device *dev; - struct net_device_stats stats; - + struct qeth_card_stats stats; struct qeth_card_info info; struct qeth_token token; struct qeth_seqno seqno; struct qeth_card_options options; + struct workqueue_struct *event_wq; wait_queue_head_t wait_q; spinlock_t mclock; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -800,7 +792,6 @@ struct qeth_card { struct list_head cmd_waiter_list; /* QDIO buffer handling */ struct qeth_qdio_info qdio; - struct qeth_perf_stats perf_stats; int read_or_write_problem; struct qeth_osn_info osn_info; struct qeth_discipline *discipline; @@ -857,11 +848,6 @@ static inline int qeth_get_elements_for_range(addr_t start, addr_t end) return PFN_UP(end) - PFN_DOWN(start); } -static inline int qeth_get_micros(void) -{ - return (int) (get_tod_clock() >> 12); -} - static inline int qeth_get_ip_version(struct sk_buff *skb) { struct vlan_ethhdr *veth = vlan_eth_hdr(skb); @@ -886,8 +872,7 @@ static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb, if ((card->dev->features & NETIF_F_RXCSUM) && (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) { skb->ip_summed = CHECKSUM_UNNECESSARY; - if (card->options.performance_stats) - card->perf_stats.rx_csum++; + QETH_CARD_STAT_INC(card, rx_skb_csum); } else { skb->ip_summed = CHECKSUM_NONE; } @@ -949,12 +934,13 @@ static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card, extern struct qeth_discipline qeth_l2_discipline; extern struct qeth_discipline qeth_l3_discipline; +extern const struct ethtool_ops qeth_ethtool_ops; +extern const struct ethtool_ops qeth_osn_ethtool_ops; extern const struct attribute_group *qeth_generic_attr_groups[]; extern const struct attribute_group *qeth_osn_attr_groups[]; extern const struct attribute_group qeth_device_attr_group; extern const struct attribute_group qeth_device_blkt_group; extern const struct device_type qeth_generic_devtype; -extern struct workqueue_struct *qeth_wq; int qeth_card_hw_is_reachable(struct qeth_card *); const char *qeth_get_cardname_short(struct qeth_card *); @@ -993,33 +979,25 @@ void qeth_clear_working_pool_list(struct qeth_card *); void qeth_clear_cmd_buffers(struct qeth_channel *); void qeth_clear_qdio_buffers(struct qeth_card *); void qeth_setadp_promisc_mode(struct qeth_card *); -struct net_device_stats *qeth_get_stats(struct net_device *); int qeth_setadpparms_change_macaddr(struct qeth_card *); void qeth_tx_timeout(struct net_device *); void qeth_prepare_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *); void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *); -void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob); +void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + u16 cmd_length); struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); int qeth_query_switch_attributes(struct qeth_card *card, struct qeth_switch_info *sw_info); -int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, - int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), - void *reply_param); +int qeth_query_card_info(struct qeth_card *card, + struct carrier_info *carrier_info); unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned int offset, unsigned int hd_len, int elements_needed); int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -int qeth_core_get_sset_count(struct net_device *, int); -void qeth_core_get_ethtool_stats(struct net_device *, - struct ethtool_stats *, u64 *); -void qeth_core_get_strings(struct net_device *, u32, u8 *); -void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); -int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd); int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); @@ -1036,14 +1014,16 @@ netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); netdev_features_t qeth_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); +void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); int qeth_open(struct net_device *dev); int qeth_stop(struct net_device *dev); int qeth_vm_request_mac(struct qeth_card *card); int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, struct qeth_qdio_out_q *queue, int ipv, int cast_type, - void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int ipv, int cast_type, + void (*fill_header)(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, int cast_type, unsigned int data_len)); /* exports for OSN */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index dcc06e48b70b..4708df39f129 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -74,8 +74,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); -struct workqueue_struct *qeth_wq; -EXPORT_SYMBOL_GPL(qeth_wq); +static struct workqueue_struct *qeth_wq; int qeth_card_hw_is_reachable(struct qeth_card *card) { @@ -540,6 +539,7 @@ static int __qeth_issue_next_read(struct qeth_card *card) QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", rc, CARD_DEVID(card)); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); card->read_or_write_problem = 1; qeth_schedule_recovery(card); wake_up(&card->wait_q); @@ -566,6 +566,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) if (reply) { refcount_set(&reply->refcnt, 1); atomic_set(&reply->received, 0); + init_waitqueue_head(&reply->wait_q); } return reply; } @@ -581,6 +582,26 @@ static void qeth_put_reply(struct qeth_reply *reply) kfree(reply); } +static void qeth_enqueue_reply(struct qeth_card *card, struct qeth_reply *reply) +{ + spin_lock_irq(&card->lock); + list_add_tail(&reply->list, &card->cmd_waiter_list); + spin_unlock_irq(&card->lock); +} + +static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply) +{ + spin_lock_irq(&card->lock); + list_del(&reply->list); + spin_unlock_irq(&card->lock); +} + +static void qeth_notify_reply(struct qeth_reply *reply) +{ + atomic_inc(&reply->received); + wake_up(&reply->wait_q); +} + static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, struct qeth_card *card) { @@ -657,19 +678,15 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, void qeth_clear_ipacmd_list(struct qeth_card *card) { - struct qeth_reply *reply, *r; + struct qeth_reply *reply; unsigned long flags; QETH_CARD_TEXT(card, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); - list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { - qeth_get_reply(reply); + list_for_each_entry(reply, &card->cmd_waiter_list, list) { reply->rc = -EIO; - atomic_inc(&reply->received); - list_del_init(&reply->list); - wake_up(&reply->wait_q); - qeth_put_reply(reply); + qeth_notify_reply(reply); } spin_unlock_irqrestore(&card->lock, flags); } @@ -726,7 +743,10 @@ void qeth_release_buffer(struct qeth_channel *channel, spin_lock_irqsave(&channel->iob_lock, flags); iob->state = BUF_STATE_FREE; iob->callback = qeth_send_control_data_cb; - iob->rc = 0; + if (iob->reply) { + qeth_put_reply(iob->reply); + iob->reply = NULL; + } spin_unlock_irqrestore(&channel->iob_lock, flags); wake_up(&channel->wait_q); } @@ -739,6 +759,17 @@ static void qeth_release_buffer_cb(struct qeth_card *card, qeth_release_buffer(channel, iob); } +static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) +{ + struct qeth_reply *reply = iob->reply; + + if (reply) { + reply->rc = rc; + qeth_notify_reply(reply); + } + qeth_release_buffer(iob->channel, iob); +} + static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel) { struct qeth_cmd_buffer *buffer = NULL; @@ -774,9 +805,9 @@ static void qeth_send_control_data_cb(struct qeth_card *card, struct qeth_cmd_buffer *iob) { struct qeth_ipa_cmd *cmd = NULL; - struct qeth_reply *reply, *r; + struct qeth_reply *reply = NULL; + struct qeth_reply *r; unsigned long flags; - int keep_reply; int rc = 0; QETH_CARD_TEXT(card, 4, "sndctlcb"); @@ -808,44 +839,40 @@ static void qeth_send_control_data_cb(struct qeth_card *card, goto out; } + /* match against pending cmd requests */ spin_lock_irqsave(&card->lock, flags); - list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { - if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) || - ((cmd) && (reply->seqno == cmd->hdr.seqno))) { + list_for_each_entry(r, &card->cmd_waiter_list, list) { + if ((r->seqno == QETH_IDX_COMMAND_SEQNO) || + (cmd && (r->seqno == cmd->hdr.seqno))) { + reply = r; + /* take the object outside the lock */ qeth_get_reply(reply); - list_del_init(&reply->list); - spin_unlock_irqrestore(&card->lock, flags); - keep_reply = 0; - if (reply->callback != NULL) { - if (cmd) { - reply->offset = (__u16)((char *)cmd - - (char *)iob->data); - keep_reply = reply->callback(card, - reply, - (unsigned long)cmd); - } else - keep_reply = reply->callback(card, - reply, - (unsigned long)iob); - } - if (cmd) - reply->rc = (u16) cmd->hdr.return_code; - else if (iob->rc) - reply->rc = iob->rc; - if (keep_reply) { - spin_lock_irqsave(&card->lock, flags); - list_add_tail(&reply->list, - &card->cmd_waiter_list); - spin_unlock_irqrestore(&card->lock, flags); - } else { - atomic_inc(&reply->received); - wake_up(&reply->wait_q); - } - qeth_put_reply(reply); - goto out; + break; } } spin_unlock_irqrestore(&card->lock, flags); + + if (!reply) + goto out; + + if (!reply->callback) { + rc = 0; + } else { + if (cmd) { + reply->offset = (u16)((char *)cmd - (char *)iob->data); + rc = reply->callback(card, reply, (unsigned long)cmd); + } else { + rc = reply->callback(card, reply, (unsigned long)iob); + } + } + + if (rc <= 0) { + reply->rc = rc; + qeth_notify_reply(reply); + } + + qeth_put_reply(reply); + out: memcpy(&card->seqno.pdu_hdr_ack, QETH_PDU_HEADER_SEQ_NO(iob->data), @@ -976,9 +1003,8 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, return 0; } -static long qeth_check_irb_error(struct qeth_card *card, - struct ccw_device *cdev, unsigned long intparm, - struct irb *irb) +static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, + unsigned long intparm, struct irb *irb) { if (!IS_ERR(irb)) return 0; @@ -989,7 +1015,7 @@ static long qeth_check_irb_error(struct qeth_card *card, CCW_DEVID(cdev)); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); - break; + return -EIO; case -ETIMEDOUT: dev_warn(&cdev->dev, "A hardware operation timed out" " on the device\n"); @@ -1001,14 +1027,14 @@ static long qeth_check_irb_error(struct qeth_card *card, wake_up(&card->wait_q); } } - break; + return -ETIMEDOUT; default: QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", PTR_ERR(irb), CCW_DEVID(cdev)); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT(card, 2, " rc???"); + return PTR_ERR(irb); } - return PTR_ERR(irb); } static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, @@ -1043,10 +1069,11 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, if (qeth_intparm_is_iob(intparm)) iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); - if (qeth_check_irb_error(card, cdev, intparm, irb)) { + rc = qeth_check_irb_error(card, cdev, intparm, irb); + if (rc) { /* IO was terminated, free its resources. */ if (iob) - qeth_release_buffer(iob->channel, iob); + qeth_cancel_cmd(iob, rc); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); return; @@ -1101,6 +1128,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, rc = qeth_get_problem(card, cdev, irb); if (rc) { card->read_or_write_problem = 1; + if (iob) + qeth_cancel_cmd(iob, rc); qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); goto out; @@ -1262,7 +1291,6 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) channel->iob[cnt].state = BUF_STATE_FREE; channel->iob[cnt].channel = channel; channel->iob[cnt].callback = qeth_send_control_data_cb; - channel->iob[cnt].rc = 0; } if (cnt < QETH_CMD_BUFFER_NO) { qeth_clean_channel(channel); @@ -1439,6 +1467,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) CARD_RDEV(card) = gdev->cdev[0]; CARD_WDEV(card) = gdev->cdev[1]; CARD_DDEV(card) = gdev->cdev[2]; + + card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev)); + if (!card->event_wq) + goto out_wq; if (qeth_setup_channel(&card->read, true)) goto out_ip; if (qeth_setup_channel(&card->write, true)) @@ -1454,6 +1486,8 @@ out_data: out_channel: qeth_clean_channel(&card->read); out_ip: + destroy_workqueue(card->event_wq); +out_wq: dev_set_drvdata(&gdev->dev, NULL); kfree(card); out: @@ -1782,6 +1816,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card, QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); wake_up(&card->wait_q); return rc; } @@ -1851,6 +1886,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card, rc); QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); wake_up(&card->wait_q); return rc; } @@ -1981,7 +2017,7 @@ void qeth_prepare_control_data(struct qeth_card *card, int len, card->seqno.pdu_hdr++; memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); - QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); + QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN)); } EXPORT_SYMBOL_GPL(qeth_prepare_control_data); @@ -1998,24 +2034,22 @@ EXPORT_SYMBOL_GPL(qeth_prepare_control_data); * for the IPA commands. * @reply_param: private pointer passed to the callback * - * Returns the value of the `return_code' field of the response - * block returned from the hardware, or other error indication. - * Value of zero indicates successful execution of the command. - * * Callback function gets called one or more times, with cb_cmd * pointing to the response returned by the hardware. Callback - * function must return non-zero if more reply blocks are expected, - * and zero if the last or only reply block is received. Callback - * function can get the value of the reply_param pointer from the + * function must return + * > 0 if more reply blocks are expected, + * 0 if the last or only reply block is received, and + * < 0 on error. + * Callback function can get the value of the reply_param pointer from the * field 'param' of the structure qeth_reply. */ -int qeth_send_control_data(struct qeth_card *card, int len, - struct qeth_cmd_buffer *iob, - int (*reply_cb)(struct qeth_card *cb_card, - struct qeth_reply *cb_reply, - unsigned long cb_cmd), - void *reply_param) +static int qeth_send_control_data(struct qeth_card *card, int len, + struct qeth_cmd_buffer *iob, + int (*reply_cb)(struct qeth_card *cb_card, + struct qeth_reply *cb_reply, + unsigned long cb_cmd), + void *reply_param) { struct qeth_channel *channel = iob->channel; int rc; @@ -2031,12 +2065,15 @@ int qeth_send_control_data(struct qeth_card *card, int len, } reply = qeth_alloc_reply(card); if (!reply) { + qeth_release_buffer(channel, iob); return -ENOMEM; } reply->callback = reply_cb; reply->param = reply_param; - init_waitqueue_head(&reply->wait_q); + /* pairs with qeth_release_buffer(): */ + qeth_get_reply(reply); + iob->reply = reply; while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ; @@ -2051,9 +2088,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, } qeth_prepare_control_data(card, len, iob); - spin_lock_irq(&card->lock); - list_add_tail(&reply->list, &card->cmd_waiter_list); - spin_unlock_irq(&card->lock); + qeth_enqueue_reply(card, reply); timeout = jiffies + event_timeout; @@ -2066,10 +2101,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", CARD_DEVID(card), rc); QETH_CARD_TEXT_(card, 2, " err%d", rc); - spin_lock_irq(&card->lock); - list_del_init(&reply->list); + qeth_dequeue_reply(card, reply); qeth_put_reply(reply); - spin_unlock_irq(&card->lock); qeth_release_buffer(channel, iob); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); @@ -2091,21 +2124,16 @@ int qeth_send_control_data(struct qeth_card *card, int len, } } + qeth_dequeue_reply(card, reply); rc = reply->rc; qeth_put_reply(reply); return rc; time_err: - reply->rc = -ETIME; - spin_lock_irq(&card->lock); - list_del_init(&reply->list); - spin_unlock_irq(&card->lock); - atomic_inc(&reply->received); - rc = reply->rc; + qeth_dequeue_reply(card, reply); qeth_put_reply(reply); - return rc; + return -ETIME; } -EXPORT_SYMBOL_GPL(qeth_send_control_data); static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -2310,9 +2338,8 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, QETH_DBF_TEXT(SETUP, 2, "olmlimit"); dev_err(&card->gdev->dev, "A connection could not be " "established because of an OLM limit\n"); - iob->rc = -EMLINK; + return -EMLINK; } - QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } @@ -2362,11 +2389,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) return 0; } -static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) +static void qeth_free_output_queue(struct qeth_qdio_out_q *q) { if (!q) return; + qeth_clear_outq_buffers(q, 1); qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); kfree(q); } @@ -2405,12 +2433,6 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) goto out_freeinq; /* outbound */ - card->qdio.out_qs = - kcalloc(card->qdio.no_out_queues, - sizeof(struct qeth_qdio_out_q *), - GFP_KERNEL); - if (!card->qdio.out_qs) - goto out_freepool; for (i = 0; i < card->qdio.no_out_queues; ++i) { card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf(); if (!card->qdio.out_qs[i]) @@ -2441,12 +2463,9 @@ out_freeoutqbufs: } out_freeoutq: while (i > 0) { - qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); + qeth_free_output_queue(card->qdio.out_qs[--i]); + card->qdio.out_qs[i] = NULL; } - kfree(card->qdio.out_qs); - card->qdio.out_qs = NULL; -out_freepool: qeth_free_buffer_pool(card); out_freeinq: qeth_free_qdio_queue(card->qdio.in_q); @@ -2475,13 +2494,9 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) /* inbound buffer pool */ qeth_free_buffer_pool(card); /* free outbound qdio_qs */ - if (card->qdio.out_qs) { - for (i = 0; i < card->qdio.no_out_queues; ++i) { - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); - qeth_free_qdio_out_buf(card->qdio.out_qs[i]); - } - kfree(card->qdio.out_qs); - card->qdio.out_qs = NULL; + for (i = 0; i < card->qdio.no_out_queues; i++) { + qeth_free_output_queue(card->qdio.out_qs[i]); + card->qdio.out_qs[i] = NULL; } } @@ -2688,8 +2703,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( } else { free_page((unsigned long)entry->elements[i]); entry->elements[i] = page_address(page); - if (card->options.performance_stats) - card->perf_stats.sg_alloc_page_rx++; + QETH_CARD_STAT_INC(card, rx_sg_alloc_page); } } } @@ -2808,14 +2822,20 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, cmd->hdr.prot_version = prot; } -void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob) +void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + u16 cmd_length) { + u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length; u8 prot_type = qeth_mpc_select_prot_type(card); memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); + memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); + memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); + memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); } EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); @@ -2826,7 +2846,7 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, iob = qeth_get_buffer(&card->write); if (iob) { - qeth_prepare_ipa_cmd(card, iob); + qeth_prepare_ipa_cmd(card, iob, sizeof(struct qeth_ipa_cmd)); qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot); } else { dev_warn(&card->gdev->dev, @@ -2839,6 +2859,14 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, } EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer); +static int qeth_send_ipa_cmd_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + return (cmd->hdr.return_code) ? -EIO : 0; +} + /** * qeth_send_ipa_cmd() - send an IPA command * @@ -2850,11 +2878,15 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, unsigned long), void *reply_param) { + u16 length; int rc; QETH_CARD_TEXT(card, 4, "sendipa"); - rc = qeth_send_control_data(card, IPA_CMD_LENGTH, - iob, reply_cb, reply_param); + + if (reply_cb == NULL) + reply_cb = qeth_send_ipa_cmd_cb; + memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2); + rc = qeth_send_control_data(card, length, iob, reply_cb, reply_param); if (rc == -ETIME) { qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); @@ -2863,9 +2895,19 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); +static int qeth_send_startlan_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) + return -ENETDOWN; + + return (cmd->hdr.return_code) ? -EIO : 0; +} + static int qeth_send_startlan(struct qeth_card *card) { - int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "strtlan"); @@ -2873,8 +2915,7 @@ static int qeth_send_startlan(struct qeth_card *card) iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); if (!iob) return -ENOMEM; - rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); - return rc; + return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); } static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) @@ -2892,7 +2933,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 3, "quyadpcb"); if (qeth_setadpparms_inspect_rc(cmd)) - return 0; + return -EIO; if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { card->info.link_type = @@ -2947,19 +2988,18 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, cmd = (struct qeth_ipa_cmd *) data; switch (cmd->hdr.return_code) { + case IPA_RC_SUCCESS: + break; case IPA_RC_NOTSUPP: case IPA_RC_L2_UNSUPPORTED_CMD: QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; - return 0; + return -EOPNOTSUPP; default: - if (cmd->hdr.return_code) { - QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", - CARD_DEVID(card), - cmd->hdr.return_code); - return 0; - } + QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", + CARD_DEVID(card), cmd->hdr.return_code); + return -EIO; } if (cmd->hdr.prot_version == QETH_PROT_IPV4) { @@ -2997,7 +3037,7 @@ static int qeth_query_switch_attributes_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "qswiatcb"); if (qeth_setadpparms_inspect_rc(cmd)) - return 0; + return -EIO; sw_info = (struct qeth_switch_info *)reply->param; attrs = &cmd->data.setadapterparms.data.query_switch_attributes; @@ -3029,15 +3069,15 @@ int qeth_query_switch_attributes(struct qeth_card *card, static int qeth_query_setdiagass_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; - __u16 rc; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + u16 rc = cmd->hdr.return_code; - cmd = (struct qeth_ipa_cmd *)data; - rc = cmd->hdr.return_code; - if (rc) + if (rc) { QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); - else - card->info.diagass_support = cmd->data.diagass.ext; + return -EIO; + } + + card->info.diagass_support = cmd->data.diagass.ext; return 0; } @@ -3084,13 +3124,13 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) static int qeth_hw_trap_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; - __u16 rc; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + u16 rc = cmd->hdr.return_code; - cmd = (struct qeth_ipa_cmd *)data; - rc = cmd->hdr.return_code; - if (rc) + if (rc) { QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); + return -EIO; + } return 0; } @@ -3139,7 +3179,7 @@ static int qeth_check_qdio_errors(struct qeth_card *card, buf->element[14].sflags); QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); if ((buf->element[15].sflags) == 0x12) { - card->stats.rx_dropped++; + QETH_CARD_STAT_INC(card, rx_dropped); return 0; } else return 1; @@ -3203,17 +3243,8 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index) * 'index') un-requeued -> this buffer is the first buffer that * will be requeued the next time */ - if (card->options.performance_stats) { - card->perf_stats.inbound_do_qdio_cnt++; - card->perf_stats.inbound_do_qdio_start_time = - qeth_get_micros(); - } rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, queue->next_buf_to_init, count); - if (card->options.performance_stats) - card->perf_stats.inbound_do_qdio_time += - qeth_get_micros() - - card->perf_stats.inbound_do_qdio_start_time; if (rc) { QETH_CARD_TEXT(card, 2, "qinberr"); } @@ -3290,8 +3321,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) >= QETH_HIGH_WATERMARK_PACK){ /* switch non-PACKING -> PACKING */ QETH_CARD_TEXT(queue->card, 6, "np->pack"); - if (queue->card->options.performance_stats) - queue->card->perf_stats.sc_dp_p++; + QETH_TXQ_STAT_INC(queue, packing_mode_switch); queue->do_pack = 1; } } @@ -3310,8 +3340,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) <= QETH_LOW_WATERMARK_PACK) { /* switch PACKING -> non-PACKING */ QETH_CARD_TEXT(queue->card, 6, "pack->np"); - if (queue->card->options.performance_stats) - queue->card->perf_stats.sc_p_dp++; + QETH_TXQ_STAT_INC(queue, packing_mode_switch); queue->do_pack = 0; return qeth_prep_flush_pack_buffer(queue); } @@ -3365,25 +3394,16 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } } + QETH_TXQ_STAT_ADD(queue, bufs, count); netif_trans_update(queue->card->dev); - if (queue->card->options.performance_stats) { - queue->card->perf_stats.outbound_do_qdio_cnt++; - queue->card->perf_stats.outbound_do_qdio_start_time = - qeth_get_micros(); - } qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; atomic_add(count, &queue->used_buffers); - rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); - if (queue->card->options.performance_stats) - queue->card->perf_stats.outbound_do_qdio_time += - qeth_get_micros() - - queue->card->perf_stats.outbound_do_qdio_start_time; if (rc) { - queue->card->stats.tx_errors += count; + QETH_TXQ_STAT_ADD(queue, tx_errors, count); /* ignore temporary SIGA errors without busy condition */ if (rc == -ENOBUFS) return; @@ -3398,8 +3418,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, qeth_schedule_recovery(queue->card); return; } - if (queue->card->options.performance_stats) - queue->card->perf_stats.bufs_sent += count; } static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) @@ -3430,10 +3448,8 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) flush_cnt += qeth_prep_flush_pack_buffer(queue); - if (queue->card->options.performance_stats && - q_was_packing) - queue->card->perf_stats.bufs_sent_pack += - flush_cnt; + if (q_was_packing) + QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); if (flush_cnt) qeth_flush_buffers(queue, index, flush_cnt); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); @@ -3488,7 +3504,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, int rc; if (!qeth_is_cq(card, queue)) - goto out; + return; QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); @@ -3497,12 +3513,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, if (qdio_err) { netif_stop_queue(card->dev); qeth_schedule_recovery(card); - goto out; - } - - if (card->options.performance_stats) { - card->perf_stats.cq_cnt++; - card->perf_stats.cq_start_time = qeth_get_micros(); + return; } for (i = first_element; i < first_element + count; ++i) { @@ -3530,14 +3541,6 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, } card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init + count) % QDIO_MAX_BUFFERS_PER_Q; - - if (card->options.performance_stats) { - int delta_t = qeth_get_micros(); - delta_t -= card->perf_stats.cq_start_time; - card->perf_stats.cq_time += delta_t; - } -out: - return; } static void qeth_qdio_input_handler(struct ccw_device *ccwdev, @@ -3573,11 +3576,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_schedule_recovery(card); return; } - if (card->options.performance_stats) { - card->perf_stats.outbound_handler_cnt++; - card->perf_stats.outbound_handler_start_time = - qeth_get_micros(); - } + for (i = first_element; i < (first_element + count); ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; buffer = queue->bufs[bidx]; @@ -3623,9 +3622,6 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_check_outbound_queue(queue); netif_wake_queue(queue->card->dev); - if (card->options.performance_stats) - card->perf_stats.outbound_handler_time += qeth_get_micros() - - card->perf_stats.outbound_handler_start_time; } /* We cannot use outbound queue 3 for unicast packets on HiperSockets */ @@ -3746,11 +3742,12 @@ EXPORT_SYMBOL_GPL(qeth_count_elements); * The number of needed buffer elements is returned in @elements. * Error to create the hdr is indicated by returning with < 0. */ -static int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb, - struct qeth_hdr **hdr, unsigned int hdr_len, - unsigned int proto_len, unsigned int *elements) +static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr **hdr, + unsigned int hdr_len, unsigned int proto_len, + unsigned int *elements) { - const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card); + const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card); const unsigned int contiguous = proto_len ? proto_len : 1; unsigned int __elements; addr_t start, end; @@ -3789,15 +3786,12 @@ check_layout: } rc = skb_linearize(skb); - if (card->options.performance_stats) { - if (rc) - card->perf_stats.tx_linfail++; - else - card->perf_stats.tx_lin++; - } - if (rc) + if (rc) { + QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); return rc; + } + QETH_TXQ_STAT_INC(queue, skbs_linearized); /* Linearization changed the layout, re-evaluate: */ goto check_layout; } @@ -3921,9 +3915,8 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); } else { QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); - if (queue->card->options.performance_stats) - queue->card->perf_stats.skbs_sent_pack++; + QETH_TXQ_STAT_INC(queue, skbs_pack); /* If the buffer still has free elements, keep using it. */ if (buf->next_element_to_fill < QETH_MAX_BUFFER_ELEMENTS(queue->card)) @@ -4037,8 +4030,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, } out: /* at this point the queue is UNLOCKED again */ - if (queue->card->options.performance_stats && do_pack) - queue->card->perf_stats.bufs_sent_pack += flush_count; + if (do_pack) + QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); return rc; } @@ -4062,8 +4055,9 @@ static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, struct qeth_qdio_out_q *queue, int ipv, int cast_type, - void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int ipv, int cast_type, + void (*fill_header)(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, int cast_type, unsigned int data_len)) { unsigned int proto_len, hw_hdr_len; @@ -4088,7 +4082,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, if (rc) return rc; - push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len, + push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, &elements); if (push_len < 0) return push_len; @@ -4098,7 +4092,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, data_offset = push_len + proto_len; } memset(hdr, 0, hw_hdr_len); - fill_header(card, hdr, skb, ipv, cast_type, frame_len); + fill_header(queue, hdr, skb, ipv, cast_type, frame_len); if (is_tso) qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, frame_len - proto_len, skb, proto_len); @@ -4115,14 +4109,12 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, } if (!rc) { - if (card->options.performance_stats) { - card->perf_stats.buf_elements_sent += elements; - if (is_sg) - card->perf_stats.sg_skbs_sent++; - if (is_tso) { - card->perf_stats.large_send_bytes += frame_len; - card->perf_stats.large_send_cnt++; - } + QETH_TXQ_STAT_ADD(queue, buf_elements, elements); + if (is_sg) + QETH_TXQ_STAT_INC(queue, skbs_sg); + if (is_tso) { + QETH_TXQ_STAT_INC(queue, skbs_tso); + QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len); } } else { if (!push_len) @@ -4149,7 +4141,7 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, setparms->data.mode = SET_PROMISC_MODE_OFF; } card->info.promisc_mode = setparms->data.mode; - return 0; + return (cmd->hdr.return_code) ? -EIO : 0; } void qeth_setadp_promisc_mode(struct qeth_card *card) @@ -4181,18 +4173,6 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); -struct net_device_stats *qeth_get_stats(struct net_device *dev) -{ - struct qeth_card *card; - - card = dev->ml_priv; - - QETH_CARD_TEXT(card, 5, "getstat"); - - return &card->stats; -} -EXPORT_SYMBOL_GPL(qeth_get_stats); - static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -4201,12 +4181,15 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "chgmaccb"); if (qeth_setadpparms_inspect_rc(cmd)) - return 0; + return -EIO; adp_cmd = &cmd->data.setadapterparms; + if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) + return -EADDRNOTAVAIL; + if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) - return 0; + return -EADDRNOTAVAIL; ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); return 0; @@ -4245,7 +4228,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "setaccb"); if (cmd->hdr.return_code) - return 0; + return -EIO; qeth_setadpparms_inspect_rc(cmd); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; @@ -4319,7 +4302,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, card->options.isolation = card->options.prev_isolation; break; } - return 0; + return (cmd->hdr.return_code) ? -EIO : 0; } static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, @@ -4383,7 +4366,7 @@ void qeth_tx_timeout(struct net_device *dev) card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "txtimeo"); - card->stats.tx_errors++; + QETH_CARD_STAT_INC(card, tx_errors); qeth_schedule_recovery(card); } EXPORT_SYMBOL_GPL(qeth_tx_timeout); @@ -4453,27 +4436,6 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) return rc; } -static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, - struct qeth_cmd_buffer *iob, int len, - int (*reply_cb)(struct qeth_card *, struct qeth_reply *, - unsigned long), - void *reply_param) -{ - u16 s1, s2; - - QETH_CARD_TEXT(card, 4, "sendsnmp"); - - /* adjust PDU length fields in IPA_PDU_HEADER */ - s1 = (u32) IPA_PDU_HEADER_SIZE + len; - s2 = (u32) len; - memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); - memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2); - memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2); - memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); - return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, - reply_cb, reply_param); -} - static int qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long sdata) { @@ -4491,13 +4453,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, if (cmd->hdr.return_code) { QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); - return 0; + return -EIO; } if (cmd->data.setadapterparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); - return 0; + return -EIO; } data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); if (cmd->data.setadapterparms.hdr.seq_no == 1) { @@ -4512,9 +4474,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card, /* check if there is enough room in userspace */ if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { - QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM); - cmd->hdr.return_code = IPA_RC_ENOMEM; - return 0; + QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); + return -ENOSPC; } QETH_CARD_TEXT_(card, 4, "snore%i", cmd->data.setadapterparms.hdr.used_total); @@ -4579,10 +4540,13 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) rc = -ENOMEM; goto out; } + + /* for large requests, fix-up the length fields: */ + qeth_prepare_ipa_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len); + cmd = __ipa_cmd(iob); memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); - rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, - qeth_snmp_command_cb, (void *)&qinfo); + rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); if (rc) QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", CARD_DEVID(card), rc); @@ -4606,16 +4570,14 @@ static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 3, "qoatcb"); if (qeth_setadpparms_inspect_rc(cmd)) - return 0; + return -EIO; priv = (struct qeth_qoat_priv *)reply->param; resdatalen = cmd->data.setadapterparms.hdr.cmdlength; resdata = (char *)data + 28; - if (resdatalen > (priv->buffer_len - priv->response_len)) { - cmd->hdr.return_code = IPA_RC_FFFF; - return 0; - } + if (resdatalen > (priv->buffer_len - priv->response_len)) + return -ENOSPC; memcpy((priv->buffer + priv->response_len), resdata, resdatalen); @@ -4688,9 +4650,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) if (copy_to_user(udata, &oat_data, sizeof(struct qeth_query_oat_data))) rc = -EFAULT; - } else - if (rc == IPA_RC_FFFF) - rc = -EFAULT; + } out_free: vfree(priv.buffer); @@ -4707,7 +4667,7 @@ static int qeth_query_card_info_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "qcrdincb"); if (qeth_setadpparms_inspect_rc(cmd)) - return 0; + return -EIO; card_info = &cmd->data.setadapterparms.data.card_info; carrier_info->card_type = card_info->card_type; @@ -4716,8 +4676,8 @@ static int qeth_query_card_info_cb(struct qeth_card *card, return 0; } -static int qeth_query_card_info(struct qeth_card *card, - struct carrier_info *carrier_info) +int qeth_query_card_info(struct qeth_card *card, + struct carrier_info *carrier_info) { struct qeth_cmd_buffer *iob; @@ -4994,6 +4954,7 @@ static void qeth_core_free_card(struct qeth_card *card) qeth_clean_channel(&card->read); qeth_clean_channel(&card->write); qeth_clean_channel(&card->data); + destroy_workqueue(card->event_wq); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); dev_set_drvdata(&card->gdev->dev, NULL); @@ -5108,12 +5069,10 @@ retriable: rc = qeth_send_startlan(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); - if (rc == IPA_RC_LAN_OFFLINE) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); + if (rc == -ENETDOWN) { + dev_warn(&card->gdev->dev, "The LAN is offline\n"); *carrier_ok = false; } else { - rc = -ENODEV; goto out; } } else { @@ -5265,7 +5224,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "unexeob"); QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); dev_kfree_skb_any(skb); - card->stats.rx_errors++; + QETH_CARD_STAT_INC(card, rx_errors); return NULL; } element++; @@ -5277,16 +5236,17 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, } *__element = element; *__offset = offset; - if (use_rx_sg && card->options.performance_stats) { - card->perf_stats.sg_skbs_rx++; - card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags; + if (use_rx_sg) { + QETH_CARD_STAT_INC(card, rx_sg_skbs); + QETH_CARD_STAT_ADD(card, rx_sg_frags, + skb_shinfo(skb)->nr_frags); } return skb; no_mem: if (net_ratelimit()) { QETH_CARD_TEXT(card, 2, "noskbmem"); } - card->stats.rx_dropped++; + QETH_CARD_STAT_INC(card, rx_dropped); return NULL; } EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); @@ -5299,11 +5259,6 @@ int qeth_poll(struct napi_struct *napi, int budget) int done; int new_budget = budget; - if (card->options.performance_stats) { - card->perf_stats.inbound_cnt++; - card->perf_stats.inbound_start_time = qeth_get_micros(); - } - while (1) { if (!card->rx.b_count) { card->rx.qdio_err = 0; @@ -5332,8 +5287,7 @@ int qeth_poll(struct napi_struct *napi, int budget) done = 1; if (done) { - if (card->options.performance_stats) - card->perf_stats.bufs_rec++; + QETH_CARD_STAT_INC(card, rx_bufs); qeth_put_buffer_pool_entry(card, buffer->pool_entry); qeth_queue_input_buffer(card, card->rx.b_index); @@ -5361,9 +5315,6 @@ int qeth_poll(struct napi_struct *napi, int budget) if (qdio_start_irq(card->data.ccwdev, 0)) napi_schedule(&card->napi); out: - if (card->options.performance_stats) - card->perf_stats.inbound_time += qeth_get_micros() - - card->perf_stats.inbound_start_time; return work_done; } EXPORT_SYMBOL_GPL(qeth_poll); @@ -5383,7 +5334,7 @@ static int qeth_setassparms_get_caps_cb(struct qeth_card *card, struct qeth_ipa_caps *caps = reply->param; if (qeth_setassparms_inspect_rc(cmd)) - return 0; + return -EIO; caps->supported = cmd->data.setassparms.data.caps.supported; caps->enabled = cmd->data.setassparms.data.caps.enabled; @@ -5393,18 +5344,18 @@ static int qeth_setassparms_get_caps_cb(struct qeth_card *card, int qeth_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; QETH_CARD_TEXT(card, 4, "defadpcb"); - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code == 0) { - cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; - if (cmd->hdr.prot_version == QETH_PROT_IPV4) - card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; - if (cmd->hdr.prot_version == QETH_PROT_IPV6) - card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; - } + if (cmd->hdr.return_code) + return -EIO; + + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + if (cmd->hdr.prot_version == QETH_PROT_IPV4) + card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; + if (cmd->hdr.prot_version == QETH_PROT_IPV6) + card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; return 0; } EXPORT_SYMBOL_GPL(qeth_setassparms_cb); @@ -5650,7 +5601,10 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) SET_NETDEV_DEV(dev, &card->gdev->dev); netif_carrier_off(dev); - if (!IS_OSN(card)) { + if (IS_OSN(card)) { + dev->ethtool_ops = &qeth_osn_ethtool_ops; + } else { + dev->ethtool_ops = &qeth_ethtool_ops; dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->hw_features |= NETIF_F_SG; dev->vlan_features |= NETIF_F_SG; @@ -5896,9 +5850,6 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!card) return -ENODEV; - if (card->info.type == QETH_CARD_TYPE_OSN) - return -EPERM; - switch (cmd) { case SIOC_QETH_ADP_SET_SNMP_CONTROL: rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); @@ -5938,452 +5889,91 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } EXPORT_SYMBOL_GPL(qeth_do_ioctl); -static struct { - const char str[ETH_GSTRING_LEN]; -} qeth_ethtool_stats_keys[] = { -/* 0 */{"rx skbs"}, - {"rx buffers"}, - {"tx skbs"}, - {"tx buffers"}, - {"tx skbs no packing"}, - {"tx buffers no packing"}, - {"tx skbs packing"}, - {"tx buffers packing"}, - {"tx sg skbs"}, - {"tx buffer elements"}, -/* 10 */{"rx sg skbs"}, - {"rx sg frags"}, - {"rx sg page allocs"}, - {"tx large kbytes"}, - {"tx large count"}, - {"tx pk state ch n->p"}, - {"tx pk state ch p->n"}, - {"tx pk watermark low"}, - {"tx pk watermark high"}, - {"queue 0 buffer usage"}, -/* 20 */{"queue 1 buffer usage"}, - {"queue 2 buffer usage"}, - {"queue 3 buffer usage"}, - {"rx poll time"}, - {"rx poll count"}, - {"rx do_QDIO time"}, - {"rx do_QDIO count"}, - {"tx handler time"}, - {"tx handler count"}, - {"tx time"}, -/* 30 */{"tx count"}, - {"tx do_QDIO time"}, - {"tx do_QDIO count"}, - {"tx csum"}, - {"tx lin"}, - {"tx linfail"}, - {"cq handler count"}, - {"cq handler time"}, - {"rx csum"} -}; - -int qeth_core_get_sset_count(struct net_device *dev, int stringset) -{ - switch (stringset) { - case ETH_SS_STATS: - return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN); - default: - return -EINVAL; - } -} -EXPORT_SYMBOL_GPL(qeth_core_get_sset_count); - -void qeth_core_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct qeth_card *card = dev->ml_priv; - data[0] = card->stats.rx_packets - - card->perf_stats.initial_rx_packets; - data[1] = card->perf_stats.bufs_rec; - data[2] = card->stats.tx_packets - - card->perf_stats.initial_tx_packets; - data[3] = card->perf_stats.bufs_sent; - data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets - - card->perf_stats.skbs_sent_pack; - data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack; - data[6] = card->perf_stats.skbs_sent_pack; - data[7] = card->perf_stats.bufs_sent_pack; - data[8] = card->perf_stats.sg_skbs_sent; - data[9] = card->perf_stats.buf_elements_sent; - data[10] = card->perf_stats.sg_skbs_rx; - data[11] = card->perf_stats.sg_frags_rx; - data[12] = card->perf_stats.sg_alloc_page_rx; - data[13] = (card->perf_stats.large_send_bytes >> 10); - data[14] = card->perf_stats.large_send_cnt; - data[15] = card->perf_stats.sc_dp_p; - data[16] = card->perf_stats.sc_p_dp; - data[17] = QETH_LOW_WATERMARK_PACK; - data[18] = QETH_HIGH_WATERMARK_PACK; - data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers); - data[20] = (card->qdio.no_out_queues > 1) ? - atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0; - data[21] = (card->qdio.no_out_queues > 2) ? - atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0; - data[22] = (card->qdio.no_out_queues > 3) ? - atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0; - data[23] = card->perf_stats.inbound_time; - data[24] = card->perf_stats.inbound_cnt; - data[25] = card->perf_stats.inbound_do_qdio_time; - data[26] = card->perf_stats.inbound_do_qdio_cnt; - data[27] = card->perf_stats.outbound_handler_time; - data[28] = card->perf_stats.outbound_handler_cnt; - data[29] = card->perf_stats.outbound_time; - data[30] = card->perf_stats.outbound_cnt; - data[31] = card->perf_stats.outbound_do_qdio_time; - data[32] = card->perf_stats.outbound_do_qdio_cnt; - data[33] = card->perf_stats.tx_csum; - data[34] = card->perf_stats.tx_lin; - data[35] = card->perf_stats.tx_linfail; - data[36] = card->perf_stats.cq_cnt; - data[37] = card->perf_stats.cq_time; - data[38] = card->perf_stats.rx_csum; -} -EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); - -void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data) -{ - switch (stringset) { - case ETH_SS_STATS: - memcpy(data, &qeth_ethtool_stats_keys, - sizeof(qeth_ethtool_stats_keys)); - break; - default: - WARN_ON(1); - break; - } -} -EXPORT_SYMBOL_GPL(qeth_core_get_strings); - -void qeth_core_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct qeth_card *card = dev->ml_priv; - - strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3", - sizeof(info->driver)); - strlcpy(info->version, "1.0", sizeof(info->version)); - strlcpy(info->fw_version, card->info.mcl_level, - sizeof(info->fw_version)); - snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s", - CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); -} -EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); - -/* Helper function to fill 'advertising' and 'supported' which are the same. */ -/* Autoneg and full-duplex are supported and advertised unconditionally. */ -/* Always advertise and support all speeds up to specified, and only one */ -/* specified port type. */ -static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, - int maxspeed, int porttype) -{ - ethtool_link_ksettings_zero_link_mode(cmd, supported); - ethtool_link_ksettings_zero_link_mode(cmd, advertising); - ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); - - ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); - ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); - - switch (porttype) { - case PORT_TP: - ethtool_link_ksettings_add_link_mode(cmd, supported, TP); - ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); - break; - case PORT_FIBRE: - ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); - ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); - break; - default: - ethtool_link_ksettings_add_link_mode(cmd, supported, TP); - ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); - WARN_ON_ONCE(1); - } - - /* partially does fall through, to also select lower speeds */ - switch (maxspeed) { - case SPEED_25000: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 25000baseSR_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 25000baseSR_Full); - break; - case SPEED_10000: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 10000baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 10000baseT_Full); - case SPEED_1000: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 1000baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 1000baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, supported, - 1000baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 1000baseT_Half); - case SPEED_100: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 100baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 100baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, supported, - 100baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 100baseT_Half); - case SPEED_10: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 10baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 10baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, supported, - 10baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 10baseT_Half); - /* end fallthrough */ - break; - default: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 10baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 10baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, supported, - 10baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 10baseT_Half); - WARN_ON_ONCE(1); - } -} - -int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct qeth_card *card = netdev->ml_priv; - enum qeth_link_types link_type; - struct carrier_info carrier_info; - int rc; - - if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) - link_type = QETH_LINK_TYPE_10GBIT_ETH; - else - link_type = card->info.link_type; - - cmd->base.duplex = DUPLEX_FULL; - cmd->base.autoneg = AUTONEG_ENABLE; - cmd->base.phy_address = 0; - cmd->base.mdio_support = 0; - cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; - cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; - - switch (link_type) { - case QETH_LINK_TYPE_FAST_ETH: - case QETH_LINK_TYPE_LANE_ETH100: - cmd->base.speed = SPEED_100; - cmd->base.port = PORT_TP; - break; - case QETH_LINK_TYPE_GBIT_ETH: - case QETH_LINK_TYPE_LANE_ETH1000: - cmd->base.speed = SPEED_1000; - cmd->base.port = PORT_FIBRE; - break; - case QETH_LINK_TYPE_10GBIT_ETH: - cmd->base.speed = SPEED_10000; - cmd->base.port = PORT_FIBRE; - break; - case QETH_LINK_TYPE_25GBIT_ETH: - cmd->base.speed = SPEED_25000; - cmd->base.port = PORT_FIBRE; - break; - default: - cmd->base.speed = SPEED_10; - cmd->base.port = PORT_TP; - } - qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port); - - /* Check if we can obtain more accurate information. */ - /* If QUERY_CARD_INFO command is not supported or fails, */ - /* just return the heuristics that was filled above. */ - rc = qeth_query_card_info(card, &carrier_info); - if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */ - return 0; - if (rc) /* report error from the hardware operation */ - return rc; - /* on success, fill in the information got from the hardware */ - - netdev_dbg(netdev, - "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", - carrier_info.card_type, - carrier_info.port_mode, - carrier_info.port_speed); - - /* Update attributes for which we've obtained more authoritative */ - /* information, leave the rest the way they where filled above. */ - switch (carrier_info.card_type) { - case CARD_INFO_TYPE_1G_COPPER_A: - case CARD_INFO_TYPE_1G_COPPER_B: - cmd->base.port = PORT_TP; - qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); - break; - case CARD_INFO_TYPE_1G_FIBRE_A: - case CARD_INFO_TYPE_1G_FIBRE_B: - cmd->base.port = PORT_FIBRE; - qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); - break; - case CARD_INFO_TYPE_10G_FIBRE_A: - case CARD_INFO_TYPE_10G_FIBRE_B: - cmd->base.port = PORT_FIBRE; - qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port); - break; - } - - switch (carrier_info.port_mode) { - case CARD_INFO_PORTM_FULLDUPLEX: - cmd->base.duplex = DUPLEX_FULL; - break; - case CARD_INFO_PORTM_HALFDUPLEX: - cmd->base.duplex = DUPLEX_HALF; - break; - } - - switch (carrier_info.port_speed) { - case CARD_INFO_PORTS_10M: - cmd->base.speed = SPEED_10; - break; - case CARD_INFO_PORTS_100M: - cmd->base.speed = SPEED_100; - break; - case CARD_INFO_PORTS_1G: - cmd->base.speed = SPEED_1000; - break; - case CARD_INFO_PORTS_10G: - cmd->base.speed = SPEED_10000; - break; - case CARD_INFO_PORTS_25G: - cmd->base.speed = SPEED_25000; - break; - } - - return 0; -} -EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings); - -/* Callback to handle checksum offload command reply from OSA card. - * Verify that required features have been enabled on the card. - * Return error in hdr->return_code as this value is checked by caller. - * - * Always returns zero to indicate no further messages from the OSA card. - */ -static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card, - struct qeth_reply *reply, - unsigned long data) +static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; - struct qeth_checksum_cmd *chksum_cb = - (struct qeth_checksum_cmd *)reply->param; + u32 *features = reply->param; - QETH_CARD_TEXT(card, 4, "chkdoccb"); if (qeth_setassparms_inspect_rc(cmd)) - return 0; + return -EIO; - memset(chksum_cb, 0, sizeof(*chksum_cb)); - if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { - chksum_cb->supported = - cmd->data.setassparms.data.chksum.supported; - QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported); - } - if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) { - chksum_cb->supported = - cmd->data.setassparms.data.chksum.supported; - chksum_cb->enabled = - cmd->data.setassparms.data.chksum.enabled; - QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported); - QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled); - } + *features = cmd->data.setassparms.data.flags_32bit; return 0; } -/* Send command to OSA card and check results. */ -static int qeth_ipa_checksum_run_cmd(struct qeth_card *card, - enum qeth_ipa_funcs ipa_func, - __u16 cmd_code, long data, - struct qeth_checksum_cmd *chksum_cb, - enum qeth_prot_versions prot) +static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, + enum qeth_prot_versions prot) { - struct qeth_cmd_buffer *iob; - - QETH_CARD_TEXT(card, 4, "chkdocmd"); - iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, - sizeof(__u32), prot); - if (!iob) - return -ENOMEM; - - __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data; - return qeth_send_ipa_cmd(card, iob, qeth_ipa_checksum_run_cmd_cb, - chksum_cb); + return qeth_send_simple_setassparms_prot(card, cstype, + IPA_CMD_ASS_STOP, 0, prot); } -static int qeth_send_checksum_on(struct qeth_card *card, int cstype, - enum qeth_prot_versions prot) +static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, + enum qeth_prot_versions prot) { u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; - struct qeth_checksum_cmd chksum_cb; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_caps caps; + u32 features; int rc; - if (prot == QETH_PROT_IPV4) + /* some L3 HW requires combined L3+L4 csum offload: */ + if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && + cstype == IPA_OUTBOUND_CHECKSUM) required_features |= QETH_IPA_CHECKSUM_IP_HDR; - rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0, - &chksum_cb, prot); - if (!rc) { - if ((required_features & chksum_cb.supported) != - required_features) - rc = -EIO; - else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) && - cstype == IPA_INBOUND_CHECKSUM) - dev_warn(&card->gdev->dev, - "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", - QETH_CARD_IFNAME(card)); - } - if (rc) { - qeth_send_simple_setassparms_prot(card, cstype, - IPA_CMD_ASS_STOP, 0, prot); - dev_warn(&card->gdev->dev, - "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n", - prot, QETH_CARD_IFNAME(card)); + + iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, + prot); + if (!iob) + return -ENOMEM; + + rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); + if (rc) return rc; + + if ((required_features & features) != required_features) { + qeth_set_csum_off(card, cstype, prot); + return -EOPNOTSUPP; } - rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE, - chksum_cb.supported, &chksum_cb, + + iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 4, prot); - if (!rc) { - if ((required_features & chksum_cb.enabled) != - required_features) - rc = -EIO; + if (!iob) { + qeth_set_csum_off(card, cstype, prot); + return -ENOMEM; } + + if (features & QETH_IPA_CHECKSUM_LP2LP) + required_features |= QETH_IPA_CHECKSUM_LP2LP; + __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; + rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); if (rc) { - qeth_send_simple_setassparms_prot(card, cstype, - IPA_CMD_ASS_STOP, 0, prot); - dev_warn(&card->gdev->dev, - "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n", - prot, QETH_CARD_IFNAME(card)); + qeth_set_csum_off(card, cstype, prot); return rc; } + if (!qeth_ipa_caps_supported(&caps, required_features) || + !qeth_ipa_caps_enabled(&caps, required_features)) { + qeth_set_csum_off(card, cstype, prot); + return -EOPNOTSUPP; + } + dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); + if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) && + cstype == IPA_OUTBOUND_CHECKSUM) + dev_warn(&card->gdev->dev, + "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", + QETH_CARD_IFNAME(card)); return 0; } static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, enum qeth_prot_versions prot) { - int rc = (on) ? qeth_send_checksum_on(card, cstype, prot) - : qeth_send_simple_setassparms_prot(card, cstype, - IPA_CMD_ASS_STOP, 0, - prot); - return rc ? -EIO : 0; + return on ? qeth_set_csum_on(card, cstype, prot) : + qeth_set_csum_off(card, cstype, prot); } static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, @@ -6393,7 +5983,7 @@ static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, struct qeth_tso_start_data *tso_data = reply->param; if (qeth_setassparms_inspect_rc(cmd)) - return 0; + return -EIO; tso_data->mss = cmd->data.setassparms.data.tso.mss; tso_data->supported = cmd->data.setassparms.data.tso.supported; @@ -6459,10 +6049,7 @@ static int qeth_set_tso_on(struct qeth_card *card, static int qeth_set_ipa_tso(struct qeth_card *card, bool on, enum qeth_prot_versions prot) { - int rc = on ? qeth_set_tso_on(card, prot) : - qeth_set_tso_off(card, prot); - - return rc ? -EIO : 0; + return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); } static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) @@ -6611,6 +6198,33 @@ netdev_features_t qeth_features_check(struct sk_buff *skb, } EXPORT_SYMBOL_GPL(qeth_features_check); +void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct qeth_card *card = dev->ml_priv; + struct qeth_qdio_out_q *queue; + unsigned int i; + + QETH_CARD_TEXT(card, 5, "getstat"); + + stats->rx_packets = card->stats.rx_packets; + stats->rx_bytes = card->stats.rx_bytes; + stats->rx_errors = card->stats.rx_errors; + stats->rx_dropped = card->stats.rx_dropped; + stats->multicast = card->stats.rx_multicast; + stats->tx_errors = card->stats.tx_errors; + + for (i = 0; i < card->qdio.no_out_queues; i++) { + queue = card->qdio.out_qs[i]; + + stats->tx_packets += queue->stats.tx_packets; + stats->tx_bytes += queue->stats.tx_bytes; + stats->tx_errors += queue->stats.tx_errors; + stats->tx_dropped += queue->stats.tx_dropped; + stats->tx_carrier_errors += queue->stats.tx_carrier_errors; + } +} +EXPORT_SYMBOL_GPL(qeth_get_stats64); + int qeth_open(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c index 16fc51ad0514..e3f4866c158e 100644 --- a/drivers/s390/net/qeth_core_mpc.c +++ b/drivers/s390/net/qeth_core_mpc.c @@ -125,24 +125,13 @@ unsigned char DM_ACT[] = { unsigned char IPA_PDU_HEADER[] = { 0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77, - 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, - (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256, - (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, - sizeof(struct qeth_ipa_cmd) / 256, - sizeof(struct qeth_ipa_cmd) % 256, - 0x00, - sizeof(struct qeth_ipa_cmd) / 256, - sizeof(struct qeth_ipa_cmd) % 256, - 0x05, - 0x77, 0x77, 0x77, 0x77, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x05, 0x77, 0x77, 0x77, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, - sizeof(struct qeth_ipa_cmd) / 256, - sizeof(struct qeth_ipa_cmd) % 256, - 0x00, 0x00, 0x00, 0x40, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, }; struct ipa_rc_msg { @@ -212,12 +201,10 @@ static const struct ipa_rc_msg qeth_ipa_rc_msg[] = { {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"}, {IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"}, {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"}, - {IPA_RC_ENOMEM, "Memory problem"}, + /* default for qeth_get_ipa_msg(): */ {IPA_RC_FFFF, "Unknown Error"} }; - - const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc) { int x; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index c7fb14a61eed..f8c5d4a9be13 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -21,8 +21,6 @@ extern unsigned char IPA_PDU_HEADER[]; #define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c) -#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd)) - #define QETH_SEQ_NO_LENGTH 4 #define QETH_MPC_TOKEN_LENGTH 4 #define QETH_MCL_LENGTH 4 @@ -83,6 +81,7 @@ enum qeth_card_types { #define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD) #define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM) #define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN) +#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX) #define IS_VM_NIC(card) ((card)->info.guestlan) #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 @@ -231,7 +230,6 @@ enum qeth_ipa_return_codes { IPA_RC_LAN_OFFLINE = 0xe080, IPA_RC_VEPA_TO_VEB_TRANSITION = 0xe090, IPA_RC_INVALID_IP_VERSION2 = 0xf001, - IPA_RC_ENOMEM = 0xfffe, IPA_RC_FFFF = 0xffff }; /* for VNIC Characteristics */ @@ -419,12 +417,6 @@ enum qeth_ipa_checksum_bits { QETH_IPA_CHECKSUM_LP2LP = 0x0020 }; -/* IPA Assist checksum offload reply layout. */ -struct qeth_checksum_cmd { - __u32 supported; - __u32 enabled; -} __packed; - enum qeth_ipa_large_send_caps { QETH_IPA_LARGE_SEND_TCP = 0x00000001, }; @@ -440,7 +432,6 @@ struct qeth_ipacmd_setassparms { union { __u32 flags_32bit; struct qeth_ipa_caps caps; - struct qeth_checksum_cmd chksum; struct qeth_arp_cache_entry arp_entry; struct qeth_arp_query_data query_arp; struct qeth_tso_start_data tso; @@ -834,15 +825,10 @@ enum qeth_ipa_arp_return_codes { extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); -#define QETH_SETASS_BASE_LEN (IPA_PDU_HEADER_SIZE + \ - sizeof(struct qeth_ipacmd_hdr) + \ - sizeof(struct qeth_ipacmd_setassparms_hdr)) #define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ sizeof(struct qeth_ipacmd_setadpparms_hdr)) #define QETH_SNMP_SETADP_CMDLENGTH 16 -#define QETH_ARP_DATA_SIZE 3968 -#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8) /* Helper functions */ #define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \ (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY)) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 30f61608fa22..8b223cc2c19b 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -316,7 +316,7 @@ static ssize_t qeth_dev_recover_store(struct device *dev, if (!card) return -EINVAL; - if (card->state != CARD_STATE_UP) + if (!qeth_card_hw_is_reachable(card)) return -EPERM; i = simple_strtoul(buf, &tmp, 16); @@ -336,35 +336,36 @@ static ssize_t qeth_dev_performance_stats_show(struct device *dev, if (!card) return -EINVAL; - return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0); + return sprintf(buf, "1\n"); } static ssize_t qeth_dev_performance_stats_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); - char *tmp; - int i, rc = 0; + struct qeth_qdio_out_q *queue; + unsigned int i; + bool reset; + int rc; if (!card) return -EINVAL; - mutex_lock(&card->conf_mutex); - i = simple_strtoul(buf, &tmp, 16); - if ((i == 0) || (i == 1)) { - if (i == card->options.performance_stats) - goto out; - card->options.performance_stats = i; - if (i == 0) - memset(&card->perf_stats, 0, - sizeof(struct qeth_perf_stats)); - card->perf_stats.initial_rx_packets = card->stats.rx_packets; - card->perf_stats.initial_tx_packets = card->stats.tx_packets; - } else - rc = -EINVAL; -out: - mutex_unlock(&card->conf_mutex); - return rc ? rc : count; + rc = kstrtobool(buf, &reset); + if (rc) + return rc; + + if (reset) { + memset(&card->stats, 0, sizeof(card->stats)); + for (i = 0; i < card->qdio.no_out_queues; i++) { + queue = card->qdio.out_qs[i]; + if (!queue) + break; + memset(&queue->stats, 0, sizeof(queue->stats)); + } + } + + return count; } static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show, diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c new file mode 100644 index 000000000000..93a53fed4cf8 --- /dev/null +++ b/drivers/s390/net/qeth_ethtool.c @@ -0,0 +1,370 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2018 + */ + +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/ethtool.h> +#include "qeth_core.h" + + +#define QETH_TXQ_STAT(_name, _stat) { \ + .name = _name, \ + .offset = offsetof(struct qeth_out_q_stats, _stat) \ +} + +#define QETH_CARD_STAT(_name, _stat) { \ + .name = _name, \ + .offset = offsetof(struct qeth_card_stats, _stat) \ +} + +struct qeth_stats { + char name[ETH_GSTRING_LEN]; + unsigned int offset; +}; + +static const struct qeth_stats txq_stats[] = { + QETH_TXQ_STAT("IO buffers", bufs), + QETH_TXQ_STAT("IO buffer elements", buf_elements), + QETH_TXQ_STAT("packed IO buffers", bufs_pack), + QETH_TXQ_STAT("skbs", tx_packets), + QETH_TXQ_STAT("packed skbs", skbs_pack), + QETH_TXQ_STAT("SG skbs", skbs_sg), + QETH_TXQ_STAT("HW csum skbs", skbs_csum), + QETH_TXQ_STAT("TSO skbs", skbs_tso), + QETH_TXQ_STAT("linearized skbs", skbs_linearized), + QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail), + QETH_TXQ_STAT("TSO bytes", tso_bytes), + QETH_TXQ_STAT("Packing mode switches", packing_mode_switch), +}; + +static const struct qeth_stats card_stats[] = { + QETH_CARD_STAT("rx0 IO buffers", rx_bufs), + QETH_CARD_STAT("rx0 HW csum skbs", rx_skb_csum), + QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs), + QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags), + QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page), +}; + +#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats) +#define CARD_STATS_LEN ARRAY_SIZE(card_stats) + +static void qeth_add_stat_data(u64 **dst, void *src, + const struct qeth_stats stats[], + unsigned int size) +{ + unsigned int i; + char *stat; + + for (i = 0; i < size; i++) { + stat = (char *)src + stats[i].offset; + **dst = *(u64 *)stat; + (*dst)++; + } +} + +static void qeth_add_stat_strings(u8 **data, const char *prefix, + const struct qeth_stats stats[], + unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + snprintf(*data, ETH_GSTRING_LEN, "%s%s", prefix, stats[i].name); + *data += ETH_GSTRING_LEN; + } +} + +static int qeth_get_sset_count(struct net_device *dev, int stringset) +{ + struct qeth_card *card = dev->ml_priv; + + switch (stringset) { + case ETH_SS_STATS: + return CARD_STATS_LEN + + card->qdio.no_out_queues * TXQ_STATS_LEN; + default: + return -EINVAL; + } +} + +static void qeth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct qeth_card *card = dev->ml_priv; + unsigned int i; + + qeth_add_stat_data(&data, &card->stats, card_stats, CARD_STATS_LEN); + for (i = 0; i < card->qdio.no_out_queues; i++) + qeth_add_stat_data(&data, &card->qdio.out_qs[i]->stats, + txq_stats, TXQ_STATS_LEN); +} + +static void qeth_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct qeth_card *card = dev->ml_priv; + + param->rx_max_pending = QDIO_MAX_BUFFERS_PER_Q; + param->rx_mini_max_pending = 0; + param->rx_jumbo_max_pending = 0; + param->tx_max_pending = QDIO_MAX_BUFFERS_PER_Q; + + param->rx_pending = card->qdio.in_buf_pool.buf_count; + param->rx_mini_pending = 0; + param->rx_jumbo_pending = 0; + param->tx_pending = QDIO_MAX_BUFFERS_PER_Q; +} + +static void qeth_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct qeth_card *card = dev->ml_priv; + char prefix[ETH_GSTRING_LEN] = ""; + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + qeth_add_stat_strings(&data, prefix, card_stats, + CARD_STATS_LEN); + for (i = 0; i < card->qdio.no_out_queues; i++) { + snprintf(prefix, ETH_GSTRING_LEN, "tx%u ", i); + qeth_add_stat_strings(&data, prefix, txq_stats, + TXQ_STATS_LEN); + } + break; + default: + WARN_ON(1); + break; + } +} + +static void qeth_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct qeth_card *card = dev->ml_priv; + + strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3", + sizeof(info->driver)); + strlcpy(info->version, "1.0", sizeof(info->version)); + strlcpy(info->fw_version, card->info.mcl_level, + sizeof(info->fw_version)); + snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s", + CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); +} + +/* Helper function to fill 'advertising' and 'supported' which are the same. */ +/* Autoneg and full-duplex are supported and advertised unconditionally. */ +/* Always advertise and support all speeds up to specified, and only one */ +/* specified port type. */ +static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, + int maxspeed, int porttype) +{ + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + + switch (porttype) { + case PORT_TP: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + break; + case PORT_FIBRE: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + WARN_ON_ONCE(1); + } + + /* partially does fall through, to also select lower speeds */ + switch (maxspeed) { + case SPEED_25000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 25000baseSR_Full); + break; + case SPEED_10000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + /* fall through */ + case SPEED_1000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Half); + /* fall through */ + case SPEED_100: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Half); + /* fall through */ + case SPEED_10: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + WARN_ON_ONCE(1); + } +} + + +static int qeth_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct qeth_card *card = netdev->ml_priv; + enum qeth_link_types link_type; + struct carrier_info carrier_info; + int rc; + + if (IS_IQD(card) || IS_VM_NIC(card)) + link_type = QETH_LINK_TYPE_10GBIT_ETH; + else + link_type = card->info.link_type; + + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = AUTONEG_ENABLE; + cmd->base.phy_address = 0; + cmd->base.mdio_support = 0; + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + + switch (link_type) { + case QETH_LINK_TYPE_FAST_ETH: + case QETH_LINK_TYPE_LANE_ETH100: + cmd->base.speed = SPEED_100; + cmd->base.port = PORT_TP; + break; + case QETH_LINK_TYPE_GBIT_ETH: + case QETH_LINK_TYPE_LANE_ETH1000: + cmd->base.speed = SPEED_1000; + cmd->base.port = PORT_FIBRE; + break; + case QETH_LINK_TYPE_10GBIT_ETH: + cmd->base.speed = SPEED_10000; + cmd->base.port = PORT_FIBRE; + break; + case QETH_LINK_TYPE_25GBIT_ETH: + cmd->base.speed = SPEED_25000; + cmd->base.port = PORT_FIBRE; + break; + default: + cmd->base.speed = SPEED_10; + cmd->base.port = PORT_TP; + } + qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port); + + /* Check if we can obtain more accurate information. */ + /* If QUERY_CARD_INFO command is not supported or fails, */ + /* just return the heuristics that was filled above. */ + rc = qeth_query_card_info(card, &carrier_info); + if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */ + return 0; + if (rc) /* report error from the hardware operation */ + return rc; + /* on success, fill in the information got from the hardware */ + + netdev_dbg(netdev, + "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", + carrier_info.card_type, + carrier_info.port_mode, + carrier_info.port_speed); + + /* Update attributes for which we've obtained more authoritative */ + /* information, leave the rest the way they where filled above. */ + switch (carrier_info.card_type) { + case CARD_INFO_TYPE_1G_COPPER_A: + case CARD_INFO_TYPE_1G_COPPER_B: + cmd->base.port = PORT_TP; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); + break; + case CARD_INFO_TYPE_1G_FIBRE_A: + case CARD_INFO_TYPE_1G_FIBRE_B: + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); + break; + case CARD_INFO_TYPE_10G_FIBRE_A: + case CARD_INFO_TYPE_10G_FIBRE_B: + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port); + break; + } + + switch (carrier_info.port_mode) { + case CARD_INFO_PORTM_FULLDUPLEX: + cmd->base.duplex = DUPLEX_FULL; + break; + case CARD_INFO_PORTM_HALFDUPLEX: + cmd->base.duplex = DUPLEX_HALF; + break; + } + + switch (carrier_info.port_speed) { + case CARD_INFO_PORTS_10M: + cmd->base.speed = SPEED_10; + break; + case CARD_INFO_PORTS_100M: + cmd->base.speed = SPEED_100; + break; + case CARD_INFO_PORTS_1G: + cmd->base.speed = SPEED_1000; + break; + case CARD_INFO_PORTS_10G: + cmd->base.speed = SPEED_10000; + break; + case CARD_INFO_PORTS_25G: + cmd->base.speed = SPEED_25000; + break; + } + + return 0; +} + +const struct ethtool_ops qeth_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_ringparam = qeth_get_ringparam, + .get_strings = qeth_get_strings, + .get_ethtool_stats = qeth_get_ethtool_stats, + .get_sset_count = qeth_get_sset_count, + .get_drvinfo = qeth_get_drvinfo, + .get_link_ksettings = qeth_get_link_ksettings, +}; + +const struct ethtool_ops qeth_osn_ethtool_ops = { + .get_strings = qeth_get_strings, + .get_ethtool_stats = qeth_get_ethtool_stats, + .get_sset_count = qeth_get_sset_count, + .get_drvinfo = qeth_get_drvinfo, +}; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 82f50cc30b0a..2c9714215775 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -35,7 +35,7 @@ static void qeth_l2_vnicc_init(struct qeth_card *card); static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc, u32 *timeout); -static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode) +static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode) { int rc; @@ -62,9 +62,6 @@ static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode) case IPA_RC_L2_MAC_NOT_FOUND: rc = -ENOENT; break; - case -ENOMEM: - rc = -ENOMEM; - break; default: rc = -EIO; break; @@ -72,6 +69,15 @@ static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode) return rc; } +static int qeth_l2_send_setdelmac_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code); +} + static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, enum qeth_ipa_cmds ipacmd) { @@ -85,8 +91,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, cmd = __ipa_cmd(iob); cmd->data.setdelmac.mac_length = ETH_ALEN; ether_addr_copy(cmd->data.setdelmac.mac, mac); - return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob, - NULL, NULL)); + return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL); } static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) @@ -169,9 +174,9 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) return RTN_UNICAST; } -static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int ipv, int cast_type, - unsigned int data_len) +static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, int cast_type, unsigned int data_len) { struct vlan_ethhdr *veth = vlan_eth_hdr(skb); @@ -183,8 +188,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; if (skb->ip_summed == CHECKSUM_PARTIAL) { qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); - if (card->options.performance_stats) - card->perf_stats.tx_csum++; + QETH_TXQ_STAT_INC(queue, skbs_csum); } } @@ -205,7 +209,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, } } -static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode) +static int qeth_l2_setdelvlan_makerc(struct qeth_card *card, u16 retcode) { if (retcode) QETH_CARD_TEXT_(card, 2, "err%04x", retcode); @@ -221,8 +225,6 @@ static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode) return -ENOENT; case IPA_RC_L2_VLAN_ID_NOT_ALLOWED: return -EPERM; - case -ENOMEM: - return -ENOMEM; default: return -EIO; } @@ -240,9 +242,8 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, cmd->data.setdelvlan.vlan_id, CARD_DEVID(card), cmd->hdr.return_code); QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command); - QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); } - return 0; + return qeth_l2_setdelvlan_makerc(card, cmd->hdr.return_code); } static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, @@ -257,8 +258,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, return -ENOMEM; cmd = __ipa_cmd(iob); cmd->data.setdelvlan.vlan_id = i; - return qeth_setdelvlan_makerc(card, qeth_send_ipa_cmd(card, iob, - qeth_l2_send_setdelvlan_cb, NULL)); + return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelvlan_cb, NULL); } static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, @@ -319,6 +319,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) qeth_clear_cmd_buffers(&card->read); qeth_clear_cmd_buffers(&card->write); } + + flush_workqueue(card->event_wq); } static int qeth_l2_process_inbound_buffer(struct qeth_card *card, @@ -366,8 +368,8 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card, } work_done++; budget--; - card->stats.rx_packets++; - card->stats.rx_bytes += len; + QETH_CARD_STAT_INC(card, rx_packets); + QETH_CARD_STAT_ADD(card, rx_bytes, len); } return work_done; } @@ -391,7 +393,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) if (!IS_OSN(card)) { rc = qeth_setadpparms_change_macaddr(card); - if (!rc && is_valid_ether_addr(card->dev->dev_addr)) + if (!rc) goto out; QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n", CARD_DEVID(card), rc); @@ -423,7 +425,7 @@ static int qeth_l2_validate_addr(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - if (IS_OSN(card) || (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) + if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) return eth_validate_addr(dev); QETH_CARD_TEXT(card, 4, "nomacadr"); @@ -439,9 +441,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) QETH_CARD_TEXT(card, 3, "setmac"); - if (card->info.type == QETH_CARD_TYPE_OSN || - card->info.type == QETH_CARD_TYPE_OSM || - card->info.type == QETH_CARD_TYPE_OSX) { + if (IS_OSM(card) || IS_OSX(card)) { QETH_CARD_TEXT(card, 3, "setmcTYP"); return -EOPNOTSUPP; } @@ -535,9 +535,6 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) int i; int rc; - if (card->info.type == QETH_CARD_TYPE_OSN) - return; - QETH_CARD_TEXT(card, 3, "setmulti"); spin_lock_bh(&card->mclock); @@ -623,17 +620,13 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, int tx_bytes = skb->len; int rc; + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + if (card->state != CARD_STATE_UP) { - card->stats.tx_carrier_errors++; + QETH_TXQ_STAT_INC(queue, tx_carrier_errors); goto tx_drop; } - queue = qeth_get_tx_queue(card, skb, ipv, cast_type); - - if (card->options.performance_stats) { - card->perf_stats.outbound_cnt++; - card->perf_stats.outbound_start_time = qeth_get_micros(); - } netif_stop_queue(dev); if (IS_OSN(card)) @@ -643,11 +636,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, qeth_l2_fill_header); if (!rc) { - card->stats.tx_packets++; - card->stats.tx_bytes += tx_bytes; - if (card->options.performance_stats) - card->perf_stats.outbound_time += qeth_get_micros() - - card->perf_stats.outbound_start_time; + QETH_TXQ_STAT_INC(queue, tx_packets); + QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); netif_wake_queue(dev); return NETDEV_TX_OK; } else if (rc == -EBUSY) { @@ -655,8 +645,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, } /* else fall through */ tx_drop: - card->stats.tx_dropped++; - card->stats.tx_errors++; + QETH_TXQ_STAT_INC(queue, tx_dropped); + QETH_TXQ_STAT_INC(queue, tx_errors); dev_kfree_skb_any(skb); netif_wake_queue(dev); return NETDEV_TX_OK; @@ -695,30 +685,16 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l2_set_offline(cgdev); + + cancel_work_sync(&card->close_dev_work); if (qeth_netdev_is_registered(card->dev)) unregister_netdev(card->dev); } -static const struct ethtool_ops qeth_l2_ethtool_ops = { - .get_link = ethtool_op_get_link, - .get_strings = qeth_core_get_strings, - .get_ethtool_stats = qeth_core_get_ethtool_stats, - .get_sset_count = qeth_core_get_sset_count, - .get_drvinfo = qeth_core_get_drvinfo, - .get_link_ksettings = qeth_core_ethtool_get_link_ksettings, -}; - -static const struct ethtool_ops qeth_l2_osn_ops = { - .get_strings = qeth_core_get_strings, - .get_ethtool_stats = qeth_core_get_ethtool_stats, - .get_sset_count = qeth_core_get_sset_count, - .get_drvinfo = qeth_core_get_drvinfo, -}; - static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_open = qeth_open, .ndo_stop = qeth_stop, - .ndo_get_stats = qeth_get_stats, + .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l2_hard_start_xmit, .ndo_features_check = qeth_features_check, .ndo_validate_addr = qeth_l2_validate_addr, @@ -732,20 +708,29 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_set_features = qeth_set_features }; +static const struct net_device_ops qeth_osn_netdev_ops = { + .ndo_open = qeth_open, + .ndo_stop = qeth_stop, + .ndo_get_stats64 = qeth_get_stats64, + .ndo_start_xmit = qeth_l2_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = qeth_tx_timeout, +}; + static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok) { int rc; - card->dev->priv_flags |= IFF_UNICAST_FLT; - card->dev->netdev_ops = &qeth_l2_netdev_ops; - if (card->info.type == QETH_CARD_TYPE_OSN) { - card->dev->ethtool_ops = &qeth_l2_osn_ops; + if (IS_OSN(card)) { + card->dev->netdev_ops = &qeth_osn_netdev_ops; card->dev->flags |= IFF_NOARP; - } else { - card->dev->ethtool_ops = &qeth_l2_ethtool_ops; - card->dev->needed_headroom = sizeof(struct qeth_hdr); + goto add_napi; } + card->dev->needed_headroom = sizeof(struct qeth_hdr); + card->dev->netdev_ops = &qeth_l2_netdev_ops; + card->dev->priv_flags |= IFF_UNICAST_FLT; + if (IS_OSM(card)) { card->dev->features |= NETIF_F_VLAN_CHALLENGED; } else { @@ -786,6 +771,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok) PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)); } +add_napi: netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); rc = register_netdev(card->dev); if (!rc && carrier_ok) @@ -1120,20 +1106,14 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, } static int qeth_osn_send_ipa_cmd(struct qeth_card *card, - struct qeth_cmd_buffer *iob, int data_len) + struct qeth_cmd_buffer *iob) { - u16 s1, s2; + u16 length; QETH_CARD_TEXT(card, 4, "osndipa"); - qeth_prepare_ipa_cmd(card, iob); - s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len); - s2 = (u16)data_len; - memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); - memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2); - memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2); - memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); - return qeth_osn_send_control_data(card, s1, iob); + memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2); + return qeth_osn_send_control_data(card, length, iob); } int qeth_osn_assist(struct net_device *dev, void *data, int data_len) @@ -1150,8 +1130,9 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len) if (!qeth_card_hw_is_reachable(card)) return -ENODEV; iob = qeth_wait_for_buffer(&card->write); + qeth_prepare_ipa_cmd(card, iob, (u16) data_len); memcpy(__ipa_cmd(iob), data, data_len); - return qeth_osn_send_ipa_cmd(card, iob, data_len); + return qeth_osn_send_ipa_cmd(card, iob); } EXPORT_SYMBOL(qeth_osn_assist); @@ -1330,7 +1311,7 @@ static void qeth_bridge_state_change(struct qeth_card *card, data->card = card; memcpy(&data->qports, qports, sizeof(struct qeth_sbp_state_change) + extrasize); - queue_work(qeth_wq, &data->worker); + queue_work(card->event_wq, &data->worker); } struct qeth_bridge_host_data { @@ -1402,14 +1383,12 @@ static void qeth_bridge_host_event(struct qeth_card *card, data->card = card; memcpy(&data->hostevs, hostevs, sizeof(struct qeth_ipacmd_addr_change) + extrasize); - queue_work(qeth_wq, &data->worker); + queue_work(card->event_wq, &data->worker); } /* SETBRIDGEPORT support; sending commands */ struct _qeth_sbp_cbctl { - u16 ipa_rc; - u16 cmd_rc; union { u32 supported; struct { @@ -1419,23 +1398,21 @@ struct _qeth_sbp_cbctl { } data; }; -/** - * qeth_bridgeport_makerc() - derive "traditional" error from hardware codes. - * @card: qeth_card structure pointer, for debug messages. - * @cbctl: state structure with hardware return codes. - * @setcmd: IPA command code - * - * Returns negative errno-compatible error indication or 0 on success. - */ static int qeth_bridgeport_makerc(struct qeth_card *card, - struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd) + struct qeth_ipa_cmd *cmd) { + struct qeth_ipacmd_setbridgeport *sbp = &cmd->data.sbp; + enum qeth_ipa_sbp_cmd setcmd = sbp->hdr.command_code; + u16 ipa_rc = cmd->hdr.return_code; + u16 sbp_rc = sbp->hdr.return_code; int rc; - int is_iqd = (card->info.type == QETH_CARD_TYPE_IQD); - if ((is_iqd && (cbctl->ipa_rc == IPA_RC_SUCCESS)) || - (!is_iqd && (cbctl->ipa_rc == cbctl->cmd_rc))) - switch (cbctl->cmd_rc) { + if (ipa_rc == IPA_RC_SUCCESS && sbp_rc == IPA_RC_SUCCESS) + return 0; + + if ((IS_IQD(card) && ipa_rc == IPA_RC_SUCCESS) || + (!IS_IQD(card) && ipa_rc == sbp_rc)) { + switch (sbp_rc) { case IPA_RC_SUCCESS: rc = 0; break; @@ -1499,8 +1476,8 @@ static int qeth_bridgeport_makerc(struct qeth_card *card, default: rc = -EIO; } - else - switch (cbctl->ipa_rc) { + } else { + switch (ipa_rc) { case IPA_RC_NOTSUPP: rc = -EOPNOTSUPP; break; @@ -1510,10 +1487,11 @@ static int qeth_bridgeport_makerc(struct qeth_card *card, default: rc = -EIO; } + } if (rc) { - QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc); - QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc); + QETH_CARD_TEXT_(card, 2, "SBPi%04x", ipa_rc); + QETH_CARD_TEXT_(card, 2, "SBPc%04x", sbp_rc); } return rc; } @@ -1545,15 +1523,15 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + int rc; + QETH_CARD_TEXT(card, 2, "brqsupcb"); - cbctl->ipa_rc = cmd->hdr.return_code; - cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; - if ((cbctl->ipa_rc == 0) && (cbctl->cmd_rc == 0)) { - cbctl->data.supported = - cmd->data.sbp.data.query_cmds_supp.supported_cmds; - } else { - cbctl->data.supported = 0; - } + rc = qeth_bridgeport_makerc(card, cmd); + if (rc) + return rc; + + cbctl->data.supported = + cmd->data.sbp.data.query_cmds_supp.supported_cmds; return 0; } @@ -1574,12 +1552,11 @@ static void qeth_bridgeport_query_support(struct qeth_card *card) sizeof(struct qeth_sbp_query_cmds_supp)); if (!iob) return; + if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb, - (void *)&cbctl) || - qeth_bridgeport_makerc(card, &cbctl, - IPA_SBP_QUERY_COMMANDS_SUPPORTED)) { - /* non-zero makerc signifies failure, and produce messages */ + &cbctl)) { card->options.sbp.role = QETH_SBP_ROLE_NONE; + card->options.sbp.supported_funcs = 0; return; } card->options.sbp.supported_funcs = cbctl.data.supported; @@ -1591,16 +1568,16 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card, struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports; struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + int rc; QETH_CARD_TEXT(card, 2, "brqprtcb"); - cbctl->ipa_rc = cmd->hdr.return_code; - cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; - if ((cbctl->ipa_rc != 0) || (cbctl->cmd_rc != 0)) - return 0; + rc = qeth_bridgeport_makerc(card, cmd); + if (rc) + return rc; + if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { - cbctl->cmd_rc = 0xffff; QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length); - return 0; + return -EINVAL; } /* first entry contains the state of the local port */ if (qports->num_entries > 0) { @@ -1625,7 +1602,6 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card, int qeth_bridgeport_query_ports(struct qeth_card *card, enum qeth_sbp_roles *role, enum qeth_sbp_states *state) { - int rc = 0; struct qeth_cmd_buffer *iob; struct _qeth_sbp_cbctl cbctl = { .data = { @@ -1642,22 +1618,18 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0); if (!iob) return -ENOMEM; - rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb, - (void *)&cbctl); - if (rc < 0) - return rc; - return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); + + return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb, + &cbctl); } static int qeth_bridgeport_set_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; - struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + QETH_CARD_TEXT(card, 2, "brsetrcb"); - cbctl->ipa_rc = cmd->hdr.return_code; - cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; - return 0; + return qeth_bridgeport_makerc(card, cmd); } /** @@ -1669,10 +1641,8 @@ static int qeth_bridgeport_set_cb(struct qeth_card *card, */ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) { - int rc = 0; int cmdlength; struct qeth_cmd_buffer *iob; - struct _qeth_sbp_cbctl cbctl; enum qeth_ipa_sbp_cmd setcmd; QETH_CARD_TEXT(card, 2, "brsetrol"); @@ -1697,11 +1667,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) iob = qeth_sbp_build_cmd(card, setcmd, cmdlength); if (!iob) return -ENOMEM; - rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, - (void *)&cbctl); - if (rc < 0) - return rc; - return qeth_bridgeport_makerc(card, &cbctl, setcmd); + + return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL); } /** @@ -1805,7 +1772,7 @@ static bool qeth_bridgeport_is_in_use(struct qeth_card *card) /* VNIC Characteristics support */ /* handle VNICC IPA command return codes; convert to error codes */ -static int qeth_l2_vnicc_makerc(struct qeth_card *card, int ipa_rc) +static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc) { int rc; @@ -1863,7 +1830,7 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "vniccrcb"); if (cmd->hdr.return_code) - return 0; + return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code); /* return results to caller */ card->options.vnicc.sup_chars = rep->hdr.sup; card->options.vnicc.cur_chars = rep->hdr.cur; @@ -1884,7 +1851,6 @@ static int qeth_l2_vnicc_request(struct qeth_card *card, struct qeth_ipacmd_vnicc *req; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - int rc; QETH_CARD_TEXT(card, 2, "vniccreq"); @@ -1927,10 +1893,7 @@ static int qeth_l2_vnicc_request(struct qeth_card *card, } /* send request */ - rc = qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, - (void *) cbctl); - - return qeth_l2_vnicc_makerc(card, rc); + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, cbctl); } /* VNICC query VNIC characteristics request */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 59535ecb1487..07c3149e228c 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -253,8 +253,7 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) } else rc = qeth_l3_register_addr_entry(card, addr); - if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) || - (rc == IPA_RC_LAN_OFFLINE)) { + if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) { addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; if (addr->ref_counter < 1) { qeth_l3_deregister_addr_entry(card, addr); @@ -338,10 +337,28 @@ static void qeth_l3_recover_ip(struct qeth_card *card) } +static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + switch (cmd->hdr.return_code) { + case IPA_RC_SUCCESS: + return 0; + case IPA_RC_DUPLICATE_IP_ADDRESS: + return -EADDRINUSE; + case IPA_RC_MC_ADDR_NOT_FOUND: + return -ENOENT; + case IPA_RC_LAN_OFFLINE: + return -ENETDOWN; + default: + return -EIO; + } +} + static int qeth_l3_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd) { - int rc; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; @@ -358,9 +375,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, else memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4); - rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); - - return rc; + return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); } static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len) @@ -422,7 +437,7 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, cmd->data.setdelip4.flags = flags; } - return qeth_send_ipa_cmd(card, iob, NULL, NULL); + return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); } static int qeth_l3_send_setrouting(struct qeth_card *card, @@ -942,12 +957,13 @@ static int qeth_l3_start_ipassists(struct qeth_card *card) static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code == 0) - ether_addr_copy(card->dev->dev_addr, - cmd->data.create_destroy_addr.unique_id); + if (cmd->hdr.return_code) + return -EIO; + + ether_addr_copy(card->dev->dev_addr, + cmd->data.create_destroy_addr.unique_id); return 0; } @@ -975,19 +991,18 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) static int qeth_l3_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code == 0) + if (cmd->hdr.return_code == 0) { card->info.unique_id = *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]); - else { - card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | - UNIQUE_ID_NOT_BY_CARD; - dev_warn(&card->gdev->dev, "The network adapter failed to " - "generate a unique ID\n"); + return 0; } - return 0; + + card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | + UNIQUE_ID_NOT_BY_CARD; + dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n"); + return -EIO; } static int qeth_l3_get_unique_id(struct qeth_card *card) @@ -1070,7 +1085,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, cmd->data.diagass.action, CARD_DEVID(card)); } - return 0; + return rc ? -EIO : 0; } static int @@ -1300,12 +1315,11 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); else ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); - - card->stats.multicast++; + QETH_CARD_STAT_INC(card, rx_multicast); break; case QETH_CAST_BROADCAST: ether_addr_copy(tg_addr, card->dev->broadcast); - card->stats.multicast++; + QETH_CARD_STAT_INC(card, rx_multicast); break; default: if (card->options.sniffer) @@ -1386,8 +1400,8 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, } work_done++; budget--; - card->stats.rx_packets++; - card->stats.rx_bytes += len; + QETH_CARD_STAT_INC(card, rx_packets); + QETH_CARD_STAT_ADD(card, rx_bytes, len); } return work_done; } @@ -1428,6 +1442,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) qeth_clear_cmd_buffers(&card->read); qeth_clear_cmd_buffers(&card->write); } + + flush_workqueue(card->event_wq); } /* @@ -1479,14 +1495,14 @@ static void qeth_l3_set_rx_mode(struct net_device *dev) switch (addr->disp_flag) { case QETH_DISP_ADDR_DELETE: rc = qeth_l3_deregister_addr_entry(card, addr); - if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) { + if (!rc || rc == -ENOENT) { hash_del(&addr->hnode); kfree(addr); } break; case QETH_DISP_ADDR_ADD: rc = qeth_l3_register_addr_entry(card, addr); - if (rc && rc != IPA_RC_LAN_OFFLINE) { + if (rc && rc != -ENETDOWN) { hash_del(&addr->hnode); kfree(addr); break; @@ -1507,7 +1523,7 @@ static void qeth_l3_set_rx_mode(struct net_device *dev) qeth_l3_handle_promisc_mode(card); } -static int qeth_l3_arp_makerc(int rc) +static int qeth_l3_arp_makerc(u16 rc) { switch (rc) { case IPA_RC_SUCCESS: @@ -1524,8 +1540,18 @@ static int qeth_l3_arp_makerc(int rc) } } +static int qeth_l3_arp_cmd_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + qeth_setassparms_cb(card, reply, data); + return qeth_l3_arp_makerc(cmd->hdr.return_code); +} + static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) { + struct qeth_cmd_buffer *iob; int rc; QETH_CARD_TEXT(card, 3, "arpstnoe"); @@ -1540,13 +1566,19 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { return -EOPNOTSUPP; } - rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, - IPA_CMD_ASS_ARP_SET_NO_ENTRIES, - no_entries); + + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 4, + QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + + __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (u32) no_entries; + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); if (rc) QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n", CARD_DEVID(card), rc); - return qeth_l3_arp_makerc(rc); + return rc; } static __u32 get_arp_entry_size(struct qeth_card *card, @@ -1597,7 +1629,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, struct qeth_ipa_cmd *cmd; struct qeth_arp_query_data *qdata; struct qeth_arp_query_info *qinfo; - int i; int e; int entrybytes_done; int stripped_bytes; @@ -1611,13 +1642,13 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, if (cmd->hdr.return_code) { QETH_CARD_TEXT(card, 4, "arpcberr"); QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); - return 0; + return qeth_l3_arp_makerc(cmd->hdr.return_code); } if (cmd->data.setassparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; QETH_CARD_TEXT(card, 4, "setaperr"); QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); - return 0; + return qeth_l3_arp_makerc(cmd->hdr.return_code); } qdata = &cmd->data.setassparms.data.query_arp; QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); @@ -1644,9 +1675,9 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, break; if ((qinfo->udata_len - qinfo->udata_offset) < esize) { - QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM); - cmd->hdr.return_code = IPA_RC_ENOMEM; - goto out_error; + QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOSPC); + memset(qinfo->udata, 0, 4); + return -ENOSPC; } memcpy(qinfo->udata + qinfo->udata_offset, @@ -1669,10 +1700,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); QETH_CARD_TEXT_(card, 4, "rc%i", 0); return 0; -out_error: - i = 0; - memcpy(qinfo->udata, &i, 4); - return 0; } static int qeth_l3_query_arp_cache_info(struct qeth_card *card, @@ -1694,13 +1721,11 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card, return -ENOMEM; cmd = __ipa_cmd(iob); cmd->data.setassparms.data.query_arp.request_bits = 0x000F; - rc = qeth_send_control_data(card, - QETH_SETASS_BASE_LEN + QETH_ARP_CMD_LEN, - iob, qeth_l3_arp_query_cb, qinfo); + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_query_cb, qinfo); if (rc) QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n", CARD_DEVID(card), rc); - return qeth_l3_arp_makerc(rc); + return rc; } static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) @@ -1782,16 +1807,16 @@ static int qeth_l3_arp_modify_entry(struct qeth_card *card, cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry; ether_addr_copy(cmd_entry->macaddr, entry->macaddr); memcpy(cmd_entry->ipaddr, entry->ipaddr, 4); - rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); if (rc) QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n", arp_cmd, CARD_DEVID(card), rc); - - return qeth_l3_arp_makerc(rc); + return rc; } static int qeth_l3_arp_flush_cache(struct qeth_card *card) { + struct qeth_cmd_buffer *iob; int rc; QETH_CARD_TEXT(card, 3, "arpflush"); @@ -1806,12 +1831,18 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { return -EOPNOTSUPP; } - rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, - IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); + + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_FLUSH_CACHE, 0, + QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); if (rc) QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n", CARD_DEVID(card), rc); - return qeth_l3_arp_makerc(rc); + return rc; } static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -1913,12 +1944,13 @@ static u8 qeth_l3_cast_type_to_flag(int cast_type) return QETH_CAST_UNICAST; } -static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int ipv, int cast_type, - unsigned int data_len) +static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, int cast_type, unsigned int data_len) { struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; struct vlan_ethhdr *veth = vlan_eth_hdr(skb); + struct qeth_card *card = queue->card; hdr->hdr.l3.length = data_len; @@ -1940,8 +1972,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, /* some HW requires combined L3+L4 csum offload: */ if (ipv == 4) hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; - if (card->options.performance_stats) - card->perf_stats.tx_csum++; + QETH_TXQ_STAT_INC(queue, skbs_csum); } } @@ -2042,6 +2073,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, int tx_bytes = skb->len; int rc; + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + if (IS_IQD(card)) { if (card->options.sniffer) goto tx_drop; @@ -2052,19 +2085,13 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, } if (card->state != CARD_STATE_UP) { - card->stats.tx_carrier_errors++; + QETH_TXQ_STAT_INC(queue, tx_carrier_errors); goto tx_drop; } if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable) goto tx_drop; - queue = qeth_get_tx_queue(card, skb, ipv, cast_type); - - if (card->options.performance_stats) { - card->perf_stats.outbound_cnt++; - card->perf_stats.outbound_start_time = qeth_get_micros(); - } netif_stop_queue(dev); if (ipv == 4 || IS_IQD(card)) @@ -2074,11 +2101,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, qeth_l3_fill_header); if (!rc) { - card->stats.tx_packets++; - card->stats.tx_bytes += tx_bytes; - if (card->options.performance_stats) - card->perf_stats.outbound_time += qeth_get_micros() - - card->perf_stats.outbound_start_time; + QETH_TXQ_STAT_INC(queue, tx_packets); + QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); netif_wake_queue(dev); return NETDEV_TX_OK; } else if (rc == -EBUSY) { @@ -2086,22 +2110,13 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, } /* else fall through */ tx_drop: - card->stats.tx_dropped++; - card->stats.tx_errors++; + QETH_TXQ_STAT_INC(queue, tx_dropped); + QETH_TXQ_STAT_INC(queue, tx_errors); dev_kfree_skb_any(skb); netif_wake_queue(dev); return NETDEV_TX_OK; } -static const struct ethtool_ops qeth_l3_ethtool_ops = { - .get_link = ethtool_op_get_link, - .get_strings = qeth_core_get_strings, - .get_ethtool_stats = qeth_core_get_ethtool_stats, - .get_sset_count = qeth_core_get_sset_count, - .get_drvinfo = qeth_core_get_drvinfo, - .get_link_ksettings = qeth_core_ethtool_get_link_ksettings, -}; - /* * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting * NOARP on the netdevice is no option because it also turns off neighbor @@ -2138,7 +2153,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_open = qeth_open, .ndo_stop = qeth_stop, - .ndo_get_stats = qeth_get_stats, + .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, @@ -2153,7 +2168,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = { static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_open = qeth_open, .ndo_stop = qeth_stop, - .ndo_get_stats = qeth_get_stats, + .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_features_check = qeth_l3_osa_features_check, .ndo_validate_addr = eth_validate_addr, @@ -2223,7 +2238,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) return -ENODEV; card->dev->needed_headroom = headroom; - card->dev->ethtool_ops = &qeth_l3_ethtool_ops; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; @@ -2278,6 +2292,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l3_set_offline(cgdev); + cancel_work_sync(&card->close_dev_work); if (qeth_netdev_is_registered(card->dev)) unregister_netdev(card->dev); qeth_l3_clear_ip_htable(card, 0); diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 9cf30d124b9e..e390f8c6d5f3 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -403,7 +403,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) goto failed; /* report size limit per scatter-gather segment */ - adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 00acc7144bbc..f4f6a07c5222 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -428,6 +428,8 @@ static struct scsi_host_template zfcp_scsi_host_template = { .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, /* GCD, adjusted later */ + /* report size limit per scatter-gather segment */ + .max_segment_size = ZFCP_QDIO_SBALE_LEN, .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, .shost_attrs = zfcp_sysfs_shost_attrs, .sdev_attrs = zfcp_sysfs_sdev_attrs, diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 128d658d472a..16957d7ac414 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt, if(tpnt->sdev_attrs == NULL) tpnt->sdev_attrs = NCR_700_dev_attrs; - memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript, + memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); if(memory == NULL) { printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index f83f79b07b50..07efcb9b5b94 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -280,7 +280,7 @@ static ssize_t asd_show_dev_rev(struct device *dev, return snprintf(buf, PAGE_SIZE, "%s\n", asd_dev_rev[asd_ha->revision_id]); } -static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); +static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL); static ssize_t asd_show_dev_bios_build(struct device *dev, struct device_attribute *attr,char *buf) @@ -477,7 +477,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) { int err; - err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); + err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); if (err) return err; @@ -499,13 +499,13 @@ err_update_bios: err_biosb: device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); err_rev: - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); return err; } static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) { - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 350257c13a5b..bc9f2a2365f4 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) return NULL; } + cmgr->hba = hba; cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), GFP_KERNEL); if (!cmgr->free_list) { @@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) goto mem_err; } - cmgr->hba = hba; cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); for (i = 0; i < arr_sz; i++) { @@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ mem_size = num_ios * sizeof(struct io_bdt *); - cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); + cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL); if (!cmgr->io_bdt_pool) { printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); goto mem_err; diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index bfa13e3b191c..c8bad2c093b8 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3687,6 +3687,7 @@ static int cxlflash_probe(struct pci_dev *pdev, host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; cfg = shost_priv(host); + cfg->state = STATE_PROBING; cfg->host = host; rc = alloc_mem(cfg); if (rc) { @@ -3775,6 +3776,7 @@ out: return rc; out_remove: + cfg->state = STATE_PROBED; cxlflash_remove(pdev); goto out; } diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index be83590ed955..ff943f477d6f 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_payload_op(fp) != ELS_LS_ACC) { FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); fc_lport_error(lport, fp); - goto err; + goto out; } flp = fc_frame_payload_get(fp, sizeof(*flp)); if (!flp) { FC_LPORT_DBG(lport, "FLOGI bad response\n"); fc_lport_error(lport, fp); - goto err; + goto out; } mfs = ntohs(flp->fl_csp.sp_bb_data) & @@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " "lport->mfs:%hu\n", mfs, lport->mfs); fc_lport_error(lport, fp); - goto err; + goto out; } if (mfs <= lport->mfs) { diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 9192a1d9dec6..dfba4921b265 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref) struct fc_rport_priv *rdata; rdata = container_of(kref, struct fc_rport_priv, kref); - WARN_ON(!list_empty(&rdata->peers)); kfree_rcu(rdata, rcu); } EXPORT_SYMBOL(fc_rport_destroy); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index aeeb0144bd55..8d1acc802a67 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1785,13 +1785,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, /* Issue Marker IOCB */ qla2x00_marker(vha, vha->hw->req_q_map[0], - vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, + vha->hw->rsp_q_map[0], fcport->loop_id, lun, flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); } done_free_sp: sp->free(sp); - sp->fcport->flags &= ~FCF_ASYNC_SENT; + fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 661512bec3ac..e27f4df24021 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -62,7 +62,7 @@ /* make sure inq_product_rev string corresponds to this version */ #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */ -static const char *sdebug_version_date = "20180128"; +static const char *sdebug_version_date = "20190125"; #define MY_NAME "scsi_debug" @@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void) (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); } -static void *fake_store(unsigned long long lba) +static void *lba2fake_store(unsigned long long lba) { lba = do_div(lba, sdebug_store_sectors); @@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba, return ret; } -/* If fake_store(lba,num) compares equal to arr(num), then copy top half of - * arr into fake_store(lba,num) and return true. If comparison fails then +/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of + * arr into lba2fake_store(lba,num) and return true. If comparison fails then * return false. */ static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) { @@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, if (sdt->app_tag == cpu_to_be16(0xffff)) continue; - ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); + ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba); if (ret) { dif_errors++; return ret; @@ -3261,10 +3261,12 @@ err_out: static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba, bool unmap, bool ndob) { + int ret; unsigned long iflags; unsigned long long i; - int ret; - u64 lba_off; + u32 lb_size = sdebug_sector_size; + u64 block, lbaa; + u8 *fs1p; ret = check_device_access_params(scp, lba, num); if (ret) @@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, unmap_region(lba, num); goto out; } - - lba_off = lba * sdebug_sector_size; + lbaa = lba; + block = do_div(lbaa, sdebug_store_sectors); /* if ndob then zero 1 logical block, else fetch 1 logical block */ + fs1p = fake_storep + (block * lb_size); if (ndob) { - memset(fake_storep + lba_off, 0, sdebug_sector_size); + memset(fs1p, 0, lb_size); ret = 0; } else - ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, - sdebug_sector_size); + ret = fetch_to_dev_buffer(scp, fs1p, lb_size); if (-1 == ret) { write_unlock_irqrestore(&atomic_rw, iflags); return DID_ERROR << 16; - } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) + } else if (sdebug_verbose && !ndob && (ret < lb_size)) sdev_printk(KERN_INFO, scp->device, "%s: %s: lb size=%u, IO sent=%d bytes\n", - my_name, "write same", - sdebug_sector_size, ret); + my_name, "write same", lb_size, ret); /* Copy first sector to remaining blocks */ - for (i = 1 ; i < num ; i++) - memcpy(fake_storep + ((lba + i) * sdebug_sector_size), - fake_storep + lba_off, - sdebug_sector_size); - + for (i = 1 ; i < num ; i++) { + lbaa = lba + i; + block = do_div(lbaa, sdebug_store_sectors); + memmove(fake_storep + (block * lb_size), fs1p, lb_size); + } if (scsi_debug_lbp()) map_region(lba, num); out: diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b2da8a00ec33..5464d467e23e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2951,9 +2951,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) if (rot == 1) { blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); - } else { - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); } if (sdkp->device->type == TYPE_ZBC) { @@ -3090,6 +3087,15 @@ static int sd_revalidate_disk(struct gendisk *disk) if (sdkp->media_present) { sd_read_capacity(sdkp, buffer); + /* + * set the default to rotational. All non-rotational devices + * support the block characteristics VPD page, which will + * cause this to be updated correctly and any device which + * doesn't support it should be treated as rotational. + */ + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); + if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); sd_read_block_limits(sdkp); diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 83365b29a4d8..fff86940388b 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -462,12 +462,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) sdkp->device->use_10_for_rw = 0; /* - * If something changed, revalidate the disk zone bitmaps once we have - * the capacity, that is on the second revalidate execution during disk - * scan and always during normal revalidate. + * Revalidate the disk zone bitmaps once the block device capacity is + * set on the second revalidate execution during disk scan and if + * something changed when executing a normal revalidate. */ - if (sdkp->first_scan) + if (sdkp->first_scan) { + sdkp->zone_blocks = zone_blocks; + sdkp->nr_zones = nr_zones; return 0; + } + if (sdkp->zone_blocks != zone_blocks || sdkp->nr_zones != nr_zones || disk->queue->nr_zones != nr_zones) { diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 52c153cd795a..636f83f781f5 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -1143,18 +1143,19 @@ static void qm_mr_process_task(struct work_struct *work); static irqreturn_t portal_isr(int irq, void *ptr) { struct qman_portal *p = ptr; - - u32 clear = QM_DQAVAIL_MASK | p->irq_sources; u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; + u32 clear = 0; if (unlikely(!is)) return IRQ_NONE; /* DQRR-handling if it's interrupt-driven */ - if (is & QM_PIRQ_DQRI) + if (is & QM_PIRQ_DQRI) { __poll_portal_fast(p, QMAN_POLL_LIMIT); + clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI; + } /* Handling of anything else that's interrupt-driven */ - clear |= __poll_portal_slow(p, is); + clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW; qm_out(&p->p, QM_REG_ISR, clear); return IRQ_HANDLED; } diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c index daabaceeea52..018399ee8731 100644 --- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c +++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c @@ -505,6 +505,17 @@ static netdev_tx_t port_dropframe(struct sk_buff *skb, return NETDEV_TX_OK; } +static int swdev_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct ethsw_port_priv *port_priv = netdev_priv(dev); + + ppid->id_len = 1; + ppid->id[0] = port_priv->ethsw_data->dev_id; + + return 0; +} + static const struct net_device_ops ethsw_port_ops = { .ndo_open = port_open, .ndo_stop = port_stop, @@ -515,6 +526,7 @@ static const struct net_device_ops ethsw_port_ops = { .ndo_get_offload_stats = port_get_offload_stats, .ndo_start_xmit = port_dropframe, + .ndo_get_port_parent_id = swdev_get_port_parent_id, }; static void ethsw_links_state_update(struct ethsw_core *ethsw) @@ -628,31 +640,6 @@ static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev) fsl_mc_free_irqs(sw_dev); } -static int swdev_port_attr_get(struct net_device *netdev, - struct switchdev_attr *attr) -{ - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = 1; - attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id; - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - attr->u.brport_flags = - (port_priv->ethsw_data->learning ? BR_LEARNING : 0) | - (port_priv->flood ? BR_FLOOD : 0); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: - attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD; - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - static int port_attr_stp_state_set(struct net_device *netdev, struct switchdev_trans *trans, u8 state) @@ -665,6 +652,16 @@ static int port_attr_stp_state_set(struct net_device *netdev, return ethsw_port_set_stp_state(port_priv, state); } +static int port_attr_br_flags_pre_set(struct net_device *netdev, + struct switchdev_trans *trans, + unsigned long flags) +{ + if (flags & ~(BR_LEARNING | BR_FLOOD)) + return -EINVAL; + + return 0; +} + static int port_attr_br_flags_set(struct net_device *netdev, struct switchdev_trans *trans, unsigned long flags) @@ -697,6 +694,10 @@ static int swdev_port_attr_set(struct net_device *netdev, err = port_attr_stp_state_set(netdev, trans, attr->u.stp_state); break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = port_attr_br_flags_pre_set(netdev, trans, + attr->u.brport_flags); + break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: err = port_attr_br_flags_set(netdev, trans, attr->u.brport_flags); @@ -925,7 +926,6 @@ static int swdev_port_obj_del(struct net_device *netdev, } static const struct switchdev_ops ethsw_port_switchdev_ops = { - .switchdev_port_attr_get = swdev_port_attr_get, .switchdev_port_attr_set = swdev_port_attr_set, }; diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index 2848fa71a33d..d6248eecf123 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -170,7 +170,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev) return -ENODEV; priv->last_link = 0; - phy_start_aneg(phydev); + phy_start(phydev); return 0; no_phy: diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c index c92bbd05516e..005de0024dd4 100644 --- a/drivers/staging/speakup/spk_ttyio.c +++ b/drivers/staging/speakup/spk_ttyio.c @@ -265,7 +265,8 @@ static void spk_ttyio_send_xchar(char ch) return; } - speakup_tty->ops->send_xchar(speakup_tty, ch); + if (speakup_tty->ops->send_xchar) + speakup_tty->ops->send_xchar(speakup_tty, ch); mutex_unlock(&speakup_tty_mutex); } @@ -277,7 +278,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear) return; } - speakup_tty->ops->tiocmset(speakup_tty, set, clear); + if (speakup_tty->ops->tiocmset) + speakup_tty->ops->tiocmset(speakup_tty, set, clear); mutex_unlock(&speakup_tty_mutex); } diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 72016d0dfca5..8e7fffbb8802 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -852,6 +852,12 @@ static ssize_t pi_prot_type_store(struct config_item *item, return count; } +/* always zero, but attr needs to remain RW to avoid userspace breakage */ +static ssize_t pi_prot_format_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "0\n"); +} + static ssize_t pi_prot_format_store(struct config_item *item, const char *page, size_t count) { @@ -1132,7 +1138,7 @@ CONFIGFS_ATTR(, emulate_3pc); CONFIGFS_ATTR(, emulate_pr); CONFIGFS_ATTR(, pi_prot_type); CONFIGFS_ATTR_RO(, hw_pi_prot_type); -CONFIGFS_ATTR_WO(, pi_prot_format); +CONFIGFS_ATTR(, pi_prot_format); CONFIGFS_ATTR(, pi_prot_verify); CONFIGFS_ATTR(, enforce_pr_isids); CONFIGFS_ATTR(, is_nonrot); diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index dfd23245f778..6fff16113628 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -774,7 +774,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy) cdev = __cpufreq_cooling_register(np, policy, capacitance); if (IS_ERR(cdev)) { - pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n", + pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n", policy->cpu, PTR_ERR(cdev)); cdev = NULL; } diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 4bfdb4a1e47d..2df059cc07e2 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -867,14 +867,14 @@ __init *thermal_of_build_thermal_zone(struct device_node *np) ret = of_property_read_u32(np, "polling-delay-passive", &prop); if (ret < 0) { - pr_err("missing polling-delay-passive property\n"); + pr_err("%pOFn: missing polling-delay-passive property\n", np); goto free_tz; } tz->passive_delay = prop; ret = of_property_read_u32(np, "polling-delay", &prop); if (ret < 0) { - pr_err("missing polling-delay property\n"); + pr_err("%pOFn: missing polling-delay property\n", np); goto free_tz; } tz->polling_delay = prop; diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index e2c407656fa6..c1fdbc0b6840 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -357,6 +357,9 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p, if (dmacnt == 2) { data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma), GFP_KERNEL); + if (!data->dma) + return -ENOMEM; + data->dma->fn = mtk8250_dma_filter; data->dma->rx_size = MTK_UART_RX_SIZE; data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER; diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index f80a300b5d68..48bd694a5fa1 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -3420,6 +3420,11 @@ static int serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) { int num_iomem, num_port, first_port = -1, i; + int rc; + + rc = serial_pci_is_class_communication(dev); + if (rc) + return rc; /* * Should we try to make guesses for multiport serial devices later? @@ -3647,10 +3652,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) board = &pci_boards[ent->driver_data]; - rc = serial_pci_is_class_communication(dev); - if (rc) - return rc; - rc = serial_pci_is_blacklisted(dev); if (rc) return rc; diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c index e1a551aae336..ce81523c3113 100644 --- a/drivers/tty/serial/earlycon-riscv-sbi.c +++ b/drivers/tty/serial/earlycon-riscv-sbi.c @@ -10,13 +10,16 @@ #include <linux/serial_core.h> #include <asm/sbi.h> -static void sbi_console_write(struct console *con, - const char *s, unsigned int n) +static void sbi_putc(struct uart_port *port, int c) { - int i; + sbi_console_putchar(c); +} - for (i = 0; i < n; ++i) - sbi_console_putchar(s[i]); +static void sbi_console_write(struct console *con, + const char *s, unsigned n) +{ + struct earlycon_device *dev = con->data; + uart_console_write(&dev->port, s, n, sbi_putc); } static int __init early_sbi_setup(struct earlycon_device *device, diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 5c01bb6d1c24..556f50aa1b58 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty) struct uart_port *port; unsigned long flags; + if (!state) + return; + port = uart_port_lock(state, flags); __uart_start(tty); uart_port_unlock(port, flags); @@ -727,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty) upstat_t mask = UPSTAT_SYNC_FIFO; struct uart_port *port; + if (!state) + return; + port = uart_port_ref(state); if (!port) return; diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 8df0fd824520..64bbeb7d7e0c 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1921,7 +1921,7 @@ out_nomem: static void sci_free_irq(struct sci_port *port) { - int i; + int i, j; /* * Intentionally in reverse order so we iterate over the muxed @@ -1937,6 +1937,13 @@ static void sci_free_irq(struct sci_port *port) if (unlikely(irq < 0)) continue; + /* Check if already freed (irq was muxed) */ + for (j = 0; j < i; j++) + if (port->irqs[j] == irq) + j = i + 1; + if (j > i) + continue; + free_irq(port->irqs[i], port); kfree(port->irqstr[i]); diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index cb7fcd7c0ad8..c1e9ea621f41 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -78,7 +78,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev) for (i = 0; i < exynos->num_clks; i++) { ret = clk_prepare_enable(exynos->clks[i]); if (ret) { - while (--i > 0) + while (i-- > 0) clk_disable_unprepare(exynos->clks[i]); return ret; } @@ -223,7 +223,7 @@ static int dwc3_exynos_resume(struct device *dev) for (i = 0; i < exynos->num_clks; i++) { ret = clk_prepare_enable(exynos->clks[i]); if (ret) { - while (--i > 0) + while (i-- > 0) clk_disable_unprepare(exynos->clks[i]); return ret; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index bed2ff42780b..6c9b76bcc2e1 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1119,7 +1119,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int rem = length % maxp; - if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { + if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 660878a19505..b77f3126580e 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev) #if defined(PLX_PCI_RDK2) /* see if PCI int for us by checking irqstat */ intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); - if (!intcsr & (1 << NET2272_PCI_IRQ)) { + if (!(intcsr & (1 << NET2272_PCI_IRQ))) { spin_unlock(&dev->lock); return IRQ_NONE; } diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index eae8b1b1b45b..ffe462a657b1 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum) } if (request) { - u8 is_dma = 0; - bool short_packet = false; trace_musb_req_tx(req); if (dma && (csr & MUSB_TXCSR_DMAENAB)) { - is_dma = 1; csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); @@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum) */ if ((request->zero && request->length) && (request->length % musb_ep->packet_sz == 0) - && (request->actual == request->length)) - short_packet = true; + && (request->actual == request->length)) { - if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) && - (is_dma && (!dma->desired_mode || - (request->actual & - (musb_ep->packet_sz - 1))))) - short_packet = true; - - if (short_packet) { /* * On DMA completion, FIFO may not be * available yet... diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index a688f7f87829..5fc6825745f2 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) channel->status = MUSB_DMA_STATUS_FREE; /* completed */ - if ((devctl & MUSB_DEVCTL_HM) - && (musb_channel->transmit) - && ((channel->desired_mode == 0) - || (channel->actual_len & - (musb_channel->max_packet_sz - 1))) - ) { + if (musb_channel->transmit && + (!channel->desired_mode || + (channel->actual_len % + musb_channel->max_packet_sz))) { u8 epnum = musb_channel->epnum; int offset = musb->io.ep_offset(epnum, MUSB_TXCSR); @@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) */ musb_ep_select(mbase, epnum); txcsr = musb_readw(mbase, offset); - txcsr &= ~(MUSB_TXCSR_DMAENAB + if (channel->desired_mode == 1) { + txcsr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_AUTOSET); - musb_writew(mbase, offset, txcsr); - /* Send out the packet */ - txcsr &= ~MUSB_TXCSR_DMAMODE; + musb_writew(mbase, offset, txcsr); + /* Send out the packet */ + txcsr &= ~MUSB_TXCSR_DMAMODE; + txcsr |= MUSB_TXCSR_DMAENAB; + } txcsr |= MUSB_TXCSR_TXPKTRDY; musb_writew(mbase, offset, txcsr); } diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index d7312eed6088..91ea3083e7ad 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -21,7 +21,7 @@ config AB8500_USB config FSL_USB2_OTG bool "Freescale USB OTG Transceiver Driver" - depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM + depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' select USB_PHY help diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c index 27bdb7222527..f5f0568d8533 100644 --- a/drivers/usb/phy/phy-am335x.c +++ b/drivers/usb/phy/phy-am335x.c @@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev) if (ret) return ret; - ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy); - if (ret) - return ret; am_phy->usb_phy_gen.phy.init = am335x_init; am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown; @@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev) device_set_wakeup_enable(dev, false); phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); - return 0; + return usb_add_phy_dev(&am_phy->usb_phy_gen.phy); } static int am335x_phy_remove(struct platform_device *pdev) diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 4bc29b586698..f1c39a3c7534 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -2297,7 +2297,8 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) pdo_pps_apdo_max_voltage(snk)); port->pps_data.max_curr = min_pps_apdo_current(src, snk); port->pps_data.out_volt = min(port->pps_data.max_volt, - port->pps_data.out_volt); + max(port->pps_data.min_volt, + port->pps_data.out_volt)); port->pps_data.op_curr = min(port->pps_data.max_curr, port->pps_data.op_curr); } diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 24a129fcdd61..a2e5dc7716e2 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1788,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, len, iov, 64, VHOST_ACCESS_WO); - if (ret) + if (ret < 0) return ret; for (i = 0; i < ret; i++) { diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index cd7e755484e3..a0b07c331255 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -152,7 +152,12 @@ struct vring_virtqueue { /* Available for packed ring */ struct { /* Actual memory layout for this queue. */ - struct vring_packed vring; + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; /* Driver ring wrap counter. */ bool avail_wrap_counter; @@ -1609,6 +1614,9 @@ static struct virtqueue *vring_create_virtqueue_packed( !context; vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) + vq->weak_barriers = false; + vq->packed.ring_dma_addr = ring_dma_addr; vq->packed.driver_event_dma_addr = driver_event_dma_addr; vq->packed.device_event_dma_addr = device_event_dma_addr; @@ -2079,6 +2087,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, !context; vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) + vq->weak_barriers = false; + vq->split.queue_dma_addr = 0; vq->split.queue_size_in_bytes = 0; @@ -2213,6 +2224,8 @@ void vring_transport_features(struct virtio_device *vdev) break; case VIRTIO_F_RING_PACKED: break; + case VIRTIO_F_ORDER_PLATFORM: + break; default: /* We don't understand this bit. */ __virtio_clear_bit(vdev, i); |