diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/iwl-trans.h')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 201 |
1 files changed, 145 insertions, 56 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 3d089ae171b7..81b7cb71e001 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -26,7 +26,7 @@ * in the file called COPYING. * * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE @@ -68,6 +68,7 @@ #include <linux/ieee80211.h> #include <linux/mm.h> /* for page_address */ #include <linux/lockdep.h> +#include <linux/kernel.h> #include "iwl-debug.h" #include "iwl-config.h" @@ -248,6 +249,8 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle. * @CMD_WAKE_UP_TRANS: The command response should wake up the trans * (i.e. mark it as non-idle). + * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be + * called after this command completes. Valid only with CMD_ASYNC. * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to * check that we leave enough room for the TBs bitmap which needs 20 bits. */ @@ -259,6 +262,7 @@ enum CMD_MODE { CMD_SEND_IN_IDLE = BIT(4), CMD_MAKE_TRANS_IDLE = BIT(5), CMD_WAKE_UP_TRANS = BIT(6), + CMD_WANT_ASYNC_CALLBACK = BIT(7), CMD_TB_BITMAP_POS = 11, }; @@ -377,6 +381,11 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) #define MAX_NO_RECLAIM_CMDS 6 +/* + * The first entry in driver_data array in ieee80211_tx_info + * that can be used by the transport. + */ +#define IWL_TRANS_FIRST_DRIVER_DATA 2 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) /* @@ -439,6 +448,22 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) } } +struct iwl_hcmd_names { + u8 cmd_id; + const char *const cmd_name; +}; + +#define HCMD_NAME(x) \ + { .cmd_id = x, .cmd_name = #x } + +struct iwl_hcmd_arr { + const struct iwl_hcmd_names *arr; + int size; +}; + +#define HCMD_ARR(x) \ + { .arr = x, .size = ARRAY_SIZE(x) } + /** * struct iwl_trans_config - transport configuration * @@ -458,8 +483,10 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) * in DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue * @wide_cmd_header: firmware supports wide host command header - * @command_names: array of command names, must be 256 entries - * (one for each command); for debugging only + * @sw_csum_tx: transport should compute the TCP checksum + * @command_groups: array of command groups, each member is an array of the + * commands in the group; for debugging only + * @command_groups_size: number of command groups, to avoid illegal access * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until * we get the ALIVE from the uCode */ @@ -476,8 +503,10 @@ struct iwl_trans_config { bool bc_table_dword; bool scd_set_active; bool wide_cmd_header; - const char *const *command_names; - + bool sw_csum_tx; + const struct iwl_hcmd_arr *command_groups; + int command_groups_size; + u32 sdio_adma_addr; }; @@ -528,7 +557,11 @@ struct iwl_trans_txq_scd_cfg { * If RFkill is asserted in the middle of a SYNC host command, it must * return -ERFKILL straight away. * May sleep only if CMD_ASYNC is not set - * @tx: send an skb + * @tx: send an skb. The transport relies on the op_mode to zero the + * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all + * the CSUM will be taken care of (TCP CSUM and IP header in case of + * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP + * header if it is IPv4. * Must be atomic * @reclaim: free packet until ssn. Returns a list of freed packets. * Must be atomic @@ -542,6 +575,11 @@ struct iwl_trans_txq_scd_cfg { * @wait_tx_queue_empty: wait until tx queues are empty. May sleep. * @freeze_txq_timer: prevents the timer of the queue from firing until the * queue is set to awake. Must be atomic. + * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note + * that the transport needs to refcount the calls since this function + * will be called several times with block = true, and then the queues + * need to be unblocked only after the same number of calls with + * block = false. * @write8: write a u8 to a register at offset ofs from the BAR * @write32: write a u32 to a register at offset ofs from the BAR * @read32: read a u32 register at offset ofs from the BAR @@ -600,6 +638,7 @@ struct iwl_trans_ops { int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, bool freeze); + void (*block_txq_ptrs)(struct iwl_trans *trans, bool block); void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); @@ -613,8 +652,7 @@ struct iwl_trans_ops { void (*configure)(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg); void (*set_pmi)(struct iwl_trans *trans, bool state); - bool (*grab_nic_access)(struct iwl_trans *trans, bool silent, - unsigned long *flags); + bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags); void (*release_nic_access)(struct iwl_trans *trans, unsigned long *flags); void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, @@ -641,18 +679,61 @@ enum iwl_trans_state { }; /** - * enum iwl_d0i3_mode - d0i3 mode + * DOC: Platform power management + * + * There are two types of platform power management: system-wide + * (WoWLAN) and runtime. + * + * In system-wide power management the entire platform goes into a low + * power state (e.g. idle or suspend to RAM) at the same time and the + * device is configured as a wakeup source for the entire platform. + * This is usually triggered by userspace activity (e.g. the user + * presses the suspend button or a power management daemon decides to + * put the platform in low power mode). The device's behavior in this + * mode is dictated by the wake-on-WLAN configuration. + * + * In runtime power management, only the devices which are themselves + * idle enter a low power state. This is done at runtime, which means + * that the entire system is still running normally. This mode is + * usually triggered automatically by the device driver and requires + * the ability to enter and exit the low power modes in a very short + * time, so there is not much impact in usability. + * + * The terms used for the device's behavior are as follows: + * + * - D0: the device is fully powered and the host is awake; + * - D3: the device is in low power mode and only reacts to + * specific events (e.g. magic-packet received or scan + * results found); + * - D0I3: the device is in low power mode and reacts to any + * activity (e.g. RX); + * + * These terms reflect the power modes in the firmware and are not to + * be confused with the physical device power state. The NIC can be + * in D0I3 mode even if, for instance, the PCI device is in D3 state. + */ + +/** + * enum iwl_plat_pm_mode - platform power management mode * - * @IWL_D0I3_MODE_OFF - d0i3 is disabled - * @IWL_D0I3_MODE_ON_IDLE - enter d0i3 when device is idle - * (e.g. no active references) - * @IWL_D0I3_MODE_ON_SUSPEND - enter d0i3 only on suspend - * (in case of 'any' trigger) + * This enumeration describes the device's platform power management + * behavior when in idle mode (i.e. runtime power management) or when + * in system-wide suspend (i.e WoWLAN). + * + * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this + * device. At runtime, this means that nothing happens and the + * device always remains in active. In system-wide suspend mode, + * it means that the all connections will be closed automatically + * by mac80211 before the platform is suspended. + * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN). + * For runtime power management, this mode is not officially + * supported. + * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode. */ -enum iwl_d0i3_mode { - IWL_D0I3_MODE_OFF = 0, - IWL_D0I3_MODE_ON_IDLE, - IWL_D0I3_MODE_ON_SUSPEND, +enum iwl_plat_pm_mode { + IWL_PLAT_PM_MODE_DISABLED, + IWL_PLAT_PM_MODE_D3, + IWL_PLAT_PM_MODE_D0I3, }; /** @@ -692,6 +773,12 @@ enum iwl_d0i3_mode { * the opmode. * @paging_download_buf: Buffer used for copying all of the pages before * downloading them to the FW. The buffer is allocated in the opmode + * @system_pm_mode: the system-wide power management mode in use. + * This mode is set dynamically, depending on the WoWLAN values + * configured from the userspace at runtime. + * @runtime_pm_mode: the runtime power management mode in use. This + * mode is set during the initialization phase and is not + * supposed to change during runtime. */ struct iwl_trans { const struct iwl_trans_ops *ops; @@ -711,6 +798,9 @@ struct iwl_trans { bool pm_support; bool ltr_enabled; + const struct iwl_hcmd_arr *command_groups; + int command_groups_size; + u8 num_rx_queues; /* The following fields are internal only */ @@ -739,21 +829,24 @@ struct iwl_trans { struct iwl_fw_paging *paging_db; void *paging_download_buf; - enum iwl_d0i3_mode d0i3_mode; - - bool wowlan_d0i3; + enum iwl_plat_pm_mode system_pm_mode; + enum iwl_plat_pm_mode runtime_pm_mode; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[0] __aligned(sizeof(void *)); }; +const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); +int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); + static inline void iwl_trans_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { trans->op_mode = trans_cfg->op_mode; trans->ops->configure(trans, trans_cfg); + WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); } static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power) @@ -880,34 +973,6 @@ iwl_trans_dump_data(struct iwl_trans *trans, return trans->ops->dump_data(trans, trigger); } -static inline int iwl_trans_send_cmd(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) -{ - int ret; - - if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && - test_bit(STATUS_RFKILL, &trans->status))) - return -ERFKILL; - - if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) - return -EIO; - - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return -EIO; - } - - if (!(cmd->flags & CMD_ASYNC)) - lock_map_acquire_read(&trans->sync_cmd_lockdep_map); - - ret = trans->ops->send_cmd(trans, cmd); - - if (!(cmd->flags & CMD_ASYNC)) - lock_map_release(&trans->sync_cmd_lockdep_map); - - return ret; -} - static inline struct iwl_device_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) { @@ -920,6 +985,8 @@ iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) (dev_cmd_ptr + trans->dev_cmd_headroom); } +int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); + static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, struct iwl_device_cmd *dev_cmd) { @@ -934,8 +1001,10 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return -EIO; + } return trans->ops->tx(trans, skb, dev_cmd, queue); } @@ -943,8 +1012,10 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs) { - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return; + } trans->ops->reclaim(trans, queue, ssn, skbs); } @@ -962,8 +1033,10 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, { might_sleep(); - if (unlikely((trans->state != IWL_TRANS_FW_ALIVE))) + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return; + } trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); } @@ -1003,18 +1076,34 @@ static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, unsigned long txqs, bool freeze) { - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return; + } if (trans->ops->freeze_txq_timer) trans->ops->freeze_txq_timer(trans, txqs, freeze); } +static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans, + bool block) +{ + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return; + } + + if (trans->ops->block_txq_ptrs) + trans->ops->block_txq_ptrs(trans, block); +} + static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, u32 txqs) { - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return -EIO; + } return trans->ops->wait_tx_queue_empty(trans, txqs); } @@ -1092,9 +1181,9 @@ iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) trans->ops->set_bits_mask(trans, reg, mask, value); } -#define iwl_trans_grab_nic_access(trans, silent, flags) \ +#define iwl_trans_grab_nic_access(trans, flags) \ __cond_lock(nic_access, \ - likely((trans)->ops->grab_nic_access(trans, silent, flags))) + likely((trans)->ops->grab_nic_access(trans, flags))) static inline void __releases(nic_access) iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags) |