diff options
author | Jesse Brandeburg | 2018-09-14 17:37:47 -0700 |
---|---|---|
committer | Jeff Kirsher | 2018-09-18 15:17:50 -0700 |
commit | 0b6591e6463bf092080647e989bb541d0787eac6 (patch) | |
tree | 5d2ad30ee7c5d5cfe06d1091eef566b45fd9baba | |
parent | 129cf89e585676ea4cc4c096bad46ad73c46e21f (diff) |
iavf: rename i40e_status to iavf_status
This is just a rename of an internal variable i40e_status, but
it was a pretty big change and so deserved it's own patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r-- | drivers/net/ethernet/intel/iavf/i40e_adminq.c | 81 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/i40e_alloc.h | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/i40e_common.c | 52 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/i40e_osdep.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/i40e_prototype.h | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/i40evf.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/i40evf_client.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/i40evf_ethtool.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/i40evf_main.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c | 39 |
10 files changed, 119 insertions, 132 deletions
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/i40e_adminq.c index 7b39971743d1..c6da41e65439 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.c @@ -34,9 +34,9 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw) * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +static iavf_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) { - i40e_status ret_code; + iavf_status ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, i40e_mem_atq_ring, @@ -61,9 +61,9 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +static iavf_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) { - i40e_status ret_code; + iavf_status ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, i40e_mem_arq_ring, @@ -102,11 +102,11 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw) * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) +static iavf_status i40e_alloc_arq_bufs(struct i40e_hw *hw) { - i40e_status ret_code; struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; + iavf_status ret_code; int i; /* We'll be allocating the buffer info memory first, then we can @@ -115,7 +115,8 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) /* buffer_info structures do not need alignment */ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, - (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); + (hw->aq.num_arq_entries * + sizeof(struct i40e_dma_mem))); if (ret_code) goto alloc_arq_bufs; hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; @@ -169,15 +170,16 @@ unwind_alloc_arq_bufs: * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) +static iavf_status i40e_alloc_asq_bufs(struct i40e_hw *hw) { - i40e_status ret_code; struct i40e_dma_mem *bi; + iavf_status ret_code; int i; /* No mapped memory needed yet, just the buffer info structures */ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, - (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); + (hw->aq.num_asq_entries * + sizeof(struct i40e_dma_mem))); if (ret_code) goto alloc_asq_bufs; hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; @@ -253,9 +255,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw) * * Configure base address and length registers for the transmit queue **/ -static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) +static iavf_status i40e_config_asq_regs(struct i40e_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ @@ -282,9 +284,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) * * Configure base address and length registers for the receive (event queue) **/ -static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) +static iavf_status i40e_config_arq_regs(struct i40e_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ @@ -321,9 +323,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ -static i40e_status i40e_init_asq(struct i40e_hw *hw) +static iavf_status i40e_init_asq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; if (hw->aq.asq.count > 0) { /* queue already initialized */ @@ -380,9 +382,9 @@ init_adminq_exit: * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ -static i40e_status i40e_init_arq(struct i40e_hw *hw) +static iavf_status i40e_init_arq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; if (hw->aq.arq.count > 0) { /* queue already initialized */ @@ -432,9 +434,9 @@ init_adminq_exit: * * The main shutdown routine for the Admin Send Queue **/ -static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) +static iavf_status i40e_shutdown_asq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; mutex_lock(&hw->aq.asq_mutex); @@ -466,9 +468,9 @@ shutdown_asq_out: * * The main shutdown routine for the Admin Receive Queue **/ -static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) +static iavf_status i40e_shutdown_arq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; mutex_lock(&hw->aq.arq_mutex); @@ -505,9 +507,9 @@ shutdown_arq_out: * - hw->aq.arq_buf_size * - hw->aq.asq_buf_size **/ -i40e_status iavf_init_adminq(struct i40e_hw *hw) +iavf_status iavf_init_adminq(struct i40e_hw *hw) { - i40e_status ret_code; + iavf_status ret_code; /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || @@ -549,9 +551,9 @@ init_adminq_exit: * iavf_shutdown_adminq - shutdown routine for the Admin Queue * @hw: pointer to the hardware structure **/ -i40e_status iavf_shutdown_adminq(struct i40e_hw *hw) +iavf_status iavf_shutdown_adminq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; if (iavf_check_asq_alive(hw)) iavf_aq_queue_shutdown(hw, true); @@ -570,7 +572,7 @@ i40e_status iavf_shutdown_adminq(struct i40e_hw *hw) **/ static u16 i40e_clean_asq(struct i40e_hw *hw) { - struct i40e_adminq_ring *asq = &(hw->aq.asq); + struct i40e_adminq_ring *asq = &hw->aq.asq; struct i40e_asq_cmd_details *details; u16 ntc = asq->next_to_clean; struct i40e_aq_desc desc_cb; @@ -616,7 +618,6 @@ bool iavf_asq_done(struct i40e_hw *hw) * timing reliability than DD bit */ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; - } /** @@ -630,13 +631,13 @@ bool iavf_asq_done(struct i40e_hw *hw) * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ -i40e_status iavf_asq_send_command(struct i40e_hw *hw, +iavf_status iavf_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { - i40e_status status = 0; + iavf_status status = 0; struct i40e_dma_mem *dma_buff = NULL; struct i40e_asq_cmd_details *details; struct i40e_aq_desc *desc_on_ring; @@ -724,8 +725,8 @@ i40e_status iavf_asq_send_command(struct i40e_hw *hw, *desc_on_ring = *desc; /* if buff is not NULL assume indirect command */ - if (buff != NULL) { - dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); + if (buff) { + dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]; /* copy the user buff into the respective DMA buff */ memcpy(dma_buff->va, buff, buff_size); desc_on_ring->datalen = cpu_to_le16(buff_size); @@ -769,7 +770,7 @@ i40e_status iavf_asq_send_command(struct i40e_hw *hw, /* if ready, copy the desc back to temp */ if (iavf_asq_done(hw)) { *desc = *desc_on_ring; - if (buff != NULL) + if (buff) memcpy(buff, dma_buff->va, buff_size); retval = le16_to_cpu(desc->retval); if (retval != 0) { @@ -793,8 +794,7 @@ i40e_status iavf_asq_send_command(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); - iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, - buff_size); + iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); /* save writeback aq if requested */ if (details->wb_desc) @@ -826,8 +826,7 @@ asq_send_command_error: * * Fill the desc with default values **/ -void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, - u16 opcode) +void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode) { /* zero out the desc */ memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); @@ -845,13 +844,13 @@ void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, * the contents through e. It can also return how many events are * left to process through 'pending' **/ -i40e_status iavf_clean_arq_element(struct i40e_hw *hw, +iavf_status iavf_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *pending) { - i40e_status ret_code = 0; u16 ntc = hw->aq.arq.next_to_clean; struct i40e_aq_desc *desc; + iavf_status ret_code = 0; struct i40e_dma_mem *bi; u16 desc_idx; u16 datalen; @@ -897,7 +896,7 @@ i40e_status iavf_clean_arq_element(struct i40e_hw *hw, e->desc = *desc; datalen = le16_to_cpu(desc->datalen); e->msg_len = min(datalen, e->buf_len); - if (e->msg_buf != NULL && (e->msg_len != 0)) + if (e->msg_buf && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, e->msg_len); @@ -930,7 +929,7 @@ i40e_status iavf_clean_arq_element(struct i40e_hw *hw, clean_arq_element_out: /* Set pending if needed, unlock and return */ - if (pending != NULL) + if (pending) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); clean_arq_element_err: diff --git a/drivers/net/ethernet/intel/iavf/i40e_alloc.h b/drivers/net/ethernet/intel/iavf/i40e_alloc.h index cb8689222c8b..90c6988d1c8f 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_alloc.h +++ b/drivers/net/ethernet/intel/iavf/i40e_alloc.h @@ -20,16 +20,12 @@ enum i40e_memory_type { }; /* prototype for functions used for dynamic memory allocation */ -i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, - struct i40e_dma_mem *mem, - enum i40e_memory_type type, - u64 size, u32 alignment); -i40e_status i40e_free_dma_mem(struct i40e_hw *hw, - struct i40e_dma_mem *mem); -i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, - struct i40e_virt_mem *mem, - u32 size); -i40e_status i40e_free_virt_mem(struct i40e_hw *hw, - struct i40e_virt_mem *mem); +iavf_status i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, + enum i40e_memory_type type, + u64 size, u32 alignment); +iavf_status i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem); +iavf_status i40e_allocate_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem, u32 size); +iavf_status i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem); #endif /* _I40E_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_common.c b/drivers/net/ethernet/intel/iavf/i40e_common.c index cf2db9f8270c..96133efddf72 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_common.c +++ b/drivers/net/ethernet/intel/iavf/i40e_common.c @@ -13,9 +13,9 @@ * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ -i40e_status i40e_set_mac_type(struct i40e_hw *hw) +iavf_status i40e_set_mac_type(struct i40e_hw *hw) { - i40e_status status = 0; + iavf_status status = 0; if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { switch (hw->device_id) { @@ -56,8 +56,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) status = I40E_ERR_DEVICE_NOT_SUPPORTED; } - hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", - hw->mac.type, status); + hw_dbg(hw, "found mac: %d, returns: %d\n", hw->mac.type, status); return status; } @@ -126,7 +125,7 @@ const char *iavf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ -const char *iavf_stat_str(struct i40e_hw *hw, i40e_status stat_err) +const char *iavf_stat_str(struct i40e_hw *hw, iavf_status stat_err) { switch (stat_err) { case 0: @@ -285,7 +284,7 @@ void iavf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u8 *buf = (u8 *)buffer; - if ((!(mask & hw->debug_mask)) || (desc == NULL)) + if ((!(mask & hw->debug_mask)) || !desc) return; i40e_debug(hw, mask, @@ -304,7 +303,7 @@ void iavf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, le32_to_cpu(aq_desc->params.external.addr_high), le32_to_cpu(aq_desc->params.external.addr_low)); - if ((buffer != NULL) && (aq_desc->datalen != 0)) { + if (buffer && aq_desc->datalen) { u16 len = le16_to_cpu(aq_desc->datalen); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); @@ -349,16 +348,14 @@ bool iavf_check_asq_alive(struct i40e_hw *hw) * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well. **/ -i40e_status iavf_aq_queue_shutdown(struct i40e_hw *hw, - bool unloading) +iavf_status iavf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading) { struct i40e_aq_desc desc; struct i40e_aqc_queue_shutdown *cmd = (struct i40e_aqc_queue_shutdown *)&desc.params.raw; - i40e_status status; + iavf_status status; - iavf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_queue_shutdown); + iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown); if (unloading) cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); @@ -378,12 +375,12 @@ i40e_status iavf_aq_queue_shutdown(struct i40e_hw *hw, * * Internal function to get or set RSS look up table **/ -static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, +static iavf_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size, bool set) { - i40e_status status; + iavf_status status; struct i40e_aq_desc desc; struct i40e_aqc_get_set_rss_lut *cmd_resp = (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; @@ -431,7 +428,7 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, * * get the RSS lookup table, PF or VSI type **/ -i40e_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, +iavf_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, @@ -448,7 +445,7 @@ i40e_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, * * set the RSS lookup table, PF or VSI type **/ -i40e_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, +iavf_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); @@ -463,11 +460,12 @@ i40e_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, * * get the RSS key per VSI **/ -static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key, - bool set) +static +iavf_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) { - i40e_status status; + iavf_status status; struct i40e_aq_desc desc; struct i40e_aqc_get_set_rss_key *cmd_resp = (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; @@ -502,7 +500,7 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id, * @key: pointer to key info struct * **/ -i40e_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id, +iavf_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); @@ -516,7 +514,7 @@ i40e_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id, * * set the RSS key per VSI **/ -i40e_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 vsi_id, +iavf_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); @@ -900,14 +898,14 @@ struct i40e_rx_ptype_decoded iavf_ptype_lookup[] = { * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for * completion before returning. **/ -i40e_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw, +iavf_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw, enum virtchnl_ops v_opcode, - i40e_status v_retval, u8 *msg, u16 msglen, + iavf_status v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details) { - struct i40e_aq_desc desc; struct i40e_asq_cmd_details details; - i40e_status status; + struct i40e_aq_desc desc; + iavf_status status; iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); @@ -971,7 +969,7 @@ void iavf_vf_parse_hw_config(struct i40e_hw *hw, * as none will be forthcoming. Immediately after calling this function, * the admin queue should be shut down and (optionally) reinitialized. **/ -i40e_status iavf_vf_reset(struct i40e_hw *hw) +iavf_status iavf_vf_reset(struct i40e_hw *hw) { return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, 0, NULL, 0, NULL); diff --git a/drivers/net/ethernet/intel/iavf/i40e_osdep.h b/drivers/net/ethernet/intel/iavf/i40e_osdep.h index 01e6babbdcd3..788a599dc26b 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_osdep.h +++ b/drivers/net/ethernet/intel/iavf/i40e_osdep.h @@ -48,5 +48,5 @@ struct i40e_virt_mem { extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...) __attribute__ ((format(gnu_printf, 3, 4))); -typedef enum i40e_status_code i40e_status; +typedef enum i40e_status_code iavf_status; #endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_prototype.h b/drivers/net/ethernet/intel/iavf/i40e_prototype.h index 7fa574558e43..3f1ec69acf9b 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/iavf/i40e_prototype.h @@ -16,14 +16,13 @@ */ /* adminq functions */ -i40e_status iavf_init_adminq(struct i40e_hw *hw); -i40e_status iavf_shutdown_adminq(struct i40e_hw *hw); +iavf_status iavf_init_adminq(struct i40e_hw *hw); +iavf_status iavf_shutdown_adminq(struct i40e_hw *hw); void i40e_adminq_init_ring_data(struct i40e_hw *hw); -i40e_status iavf_clean_arq_element(struct i40e_hw *hw, +iavf_status iavf_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *events_pending); -i40e_status iavf_asq_send_command(struct i40e_hw *hw, - struct i40e_aq_desc *desc, +iavf_status iavf_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details); @@ -36,20 +35,20 @@ void iavf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); void iavf_resume_aq(struct i40e_hw *hw); bool iavf_check_asq_alive(struct i40e_hw *hw); -i40e_status iavf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +iavf_status iavf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); const char *iavf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -const char *iavf_stat_str(struct i40e_hw *hw, i40e_status stat_err); +const char *iavf_stat_str(struct i40e_hw *hw, iavf_status stat_err); -i40e_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, +iavf_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); -i40e_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, +iavf_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); -i40e_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 seid, +iavf_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 seid, struct i40e_aqc_get_set_rss_key_data *key); -i40e_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 seid, +iavf_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 seid, struct i40e_aqc_get_set_rss_key_data *key); -i40e_status i40e_set_mac_type(struct i40e_hw *hw); +iavf_status i40e_set_mac_type(struct i40e_hw *hw); extern struct i40e_rx_ptype_decoded iavf_ptype_lookup[]; @@ -61,9 +60,9 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) /* i40e_common for VF drivers*/ void iavf_vf_parse_hw_config(struct i40e_hw *hw, struct virtchnl_vf_resource *msg); -i40e_status iavf_vf_reset(struct i40e_hw *hw); -i40e_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw, +iavf_status iavf_vf_reset(struct i40e_hw *hw); +iavf_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw, enum virtchnl_ops v_opcode, - i40e_status v_retval, u8 *msg, u16 msglen, + iavf_status v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40evf.h b/drivers/net/ethernet/intel/iavf/i40evf.h index c7df8d54fff7..3be326499b8c 100644 --- a/drivers/net/ethernet/intel/iavf/i40evf.h +++ b/drivers/net/ethernet/intel/iavf/i40evf.h @@ -411,7 +411,7 @@ void iavf_enable_vlan_stripping(struct iavf_adapter *adapter); void iavf_disable_vlan_stripping(struct iavf_adapter *adapter); void iavf_virtchnl_completion(struct iavf_adapter *adapter, enum virtchnl_ops v_opcode, - i40e_status v_retval, u8 *msg, u16 msglen); + iavf_status v_retval, u8 *msg, u16 msglen); int iavf_config_rss(struct iavf_adapter *adapter); int iavf_lan_add_device(struct iavf_adapter *adapter); int iavf_lan_del_device(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/i40evf_client.c b/drivers/net/ethernet/intel/iavf/i40evf_client.c index 49214797acaa..4c3e9b5d547b 100644 --- a/drivers/net/ethernet/intel/iavf/i40evf_client.c +++ b/drivers/net/ethernet/intel/iavf/i40evf_client.c @@ -135,7 +135,7 @@ void iavf_notify_client_open(struct i40e_vsi *vsi) static int iavf_client_release_qvlist(struct i40e_info *ldev) { struct iavf_adapter *adapter = ldev->vf; - i40e_status err; + iavf_status err; if (adapter->aq_required) return -EAGAIN; @@ -420,7 +420,7 @@ static u32 iavf_client_virtchnl_send(struct i40e_info *ldev, u8 *msg, u16 len) { struct iavf_adapter *adapter = ldev->vf; - i40e_status err; + iavf_status err; if (adapter->aq_required) return -EAGAIN; @@ -449,7 +449,7 @@ static int iavf_client_setup_qvlist(struct i40e_info *ldev, struct virtchnl_iwarp_qvlist_info *v_qvlist_info; struct iavf_adapter *adapter = ldev->vf; struct i40e_qv_info *qv_info; - i40e_status err; + iavf_status err; u32 v_idx, i; u32 msg_size; diff --git a/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c b/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c index ab95059804c4..b8b2db7ee8b8 100644 --- a/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c @@ -770,10 +770,7 @@ static int __iavf_set_coalesce(struct net_device *netdev, (ec->rx_coalesce_usecs > I40E_MAX_ITR)) { netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); return -EINVAL; - } - - else - if (ec->tx_coalesce_usecs == 0) { + } else if (ec->tx_coalesce_usecs == 0) { if (ec->use_adaptive_tx_coalesce) netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); } else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) || @@ -996,9 +993,8 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, if (!indir) return 0; - if (key) { + if (key) memcpy(adapter->rss_key, key, adapter->rss_key_size); - } /* Each 32 bits pointed by 'indir' is stored with a lut entry */ for (i = 0; i < adapter->rss_lut_size; i++) diff --git a/drivers/net/ethernet/intel/iavf/i40evf_main.c b/drivers/net/ethernet/intel/iavf/i40evf_main.c index 69c0ac7650d8..46bdc59b9bed 100644 --- a/drivers/net/ethernet/intel/iavf/i40evf_main.c +++ b/drivers/net/ethernet/intel/iavf/i40evf_main.c @@ -66,7 +66,7 @@ static struct workqueue_struct *iavf_wq; * @size: size of memory requested * @alignment: what to align the allocation to **/ -i40e_status iavf_allocate_dma_mem_d(struct i40e_hw *hw, +iavf_status iavf_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, u64 size, u32 alignment) { @@ -89,7 +89,7 @@ i40e_status iavf_allocate_dma_mem_d(struct i40e_hw *hw, * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -i40e_status iavf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +iavf_status iavf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) { struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; @@ -106,7 +106,7 @@ i40e_status iavf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ -i40e_status iavf_allocate_virt_mem_d(struct i40e_hw *hw, +iavf_status iavf_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size) { if (!mem) @@ -126,7 +126,7 @@ i40e_status iavf_allocate_virt_mem_d(struct i40e_hw *hw, * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -i40e_status iavf_free_virt_mem_d(struct i40e_hw *hw, +iavf_status iavf_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) { if (!mem) @@ -469,6 +469,7 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) for (vector = 0; vector < q_vectors; vector++) { struct i40e_q_vector *q_vector = &adapter->q_vectors[vector]; + irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; if (q_vector->tx.ring && q_vector->rx.ring) { @@ -1427,6 +1428,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter) for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx]; + if (q_idx < napi_vectors) netif_napi_del(&q_vector->napi); } @@ -2048,7 +2050,7 @@ static void iavf_adminq_task(struct work_struct *work) struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; enum virtchnl_ops v_op; - i40e_status ret, v_ret; + iavf_status ret, v_ret; u32 val, oldval; u16 pending; @@ -2063,7 +2065,7 @@ static void iavf_adminq_task(struct work_struct *work) do { ret = iavf_clean_arq_element(hw, &event, &pending); v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low); + v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low); if (ret || !v_op) break; /* No event to process or error cleaning ARQ */ diff --git a/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c index aa8b2badbb52..a1fa7a22c19a 100644 --- a/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c @@ -22,7 +22,7 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len) { struct i40e_hw *hw = &adapter->hw; - i40e_status err; + iavf_status err; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) return 0; /* nothing to see here, move along */ @@ -69,7 +69,7 @@ int iavf_verify_api_ver(struct iavf_adapter *adapter) struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; enum virtchnl_ops op; - i40e_status err; + iavf_status err; event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); @@ -92,7 +92,7 @@ int iavf_verify_api_ver(struct iavf_adapter *adapter) } - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + err = (iavf_status)le32_to_cpu(event.desc.cookie_low); if (err) goto out_alloc; @@ -144,13 +144,11 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) - return iavf_send_pf_msg(adapter, - VIRTCHNL_OP_GET_VF_RESOURCES, - (u8 *)&caps, sizeof(caps)); + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, + (u8 *)&caps, sizeof(caps)); else - return iavf_send_pf_msg(adapter, - VIRTCHNL_OP_GET_VF_RESOURCES, - NULL, 0); + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); } /** @@ -193,7 +191,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter) struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; enum virtchnl_ops op; - i40e_status err; + iavf_status err; u16 len; len = sizeof(struct virtchnl_vf_resource) + @@ -218,7 +216,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter) break; } - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + err = (iavf_status)le32_to_cpu(event.desc.cookie_low); memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); /* some PFs send more queues than we should have so validate that @@ -493,8 +491,7 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter) spin_unlock_bh(&adapter->mac_vlan_list_lock); - iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, - (u8 *)veal, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); kfree(veal); } @@ -565,8 +562,7 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) spin_unlock_bh(&adapter->mac_vlan_list_lock); - iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, - (u8 *)veal, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); kfree(veal); } @@ -776,8 +772,8 @@ void iavf_request_stats(struct iavf_adapter *adapter) adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ - if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, - (u8 *)&vqs, sizeof(vqs))) + if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, + sizeof(vqs))) /* if the request failed, don't lock out others */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; } @@ -1188,8 +1184,7 @@ void iavf_request_reset(struct iavf_adapter *adapter) * This function handles the reply messages. **/ void iavf_virtchnl_completion(struct iavf_adapter *adapter, - enum virtchnl_ops v_opcode, - i40e_status v_retval, + enum virtchnl_ops v_opcode, iavf_status v_retval, u8 *msg, u16 msglen) { struct net_device *netdev = adapter->netdev; @@ -1198,6 +1193,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; bool link_up = vpe->event_data.link_event.link_status; + switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: adapter->link_speed = @@ -1323,8 +1319,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", - v_retval, - iavf_stat_str(&adapter->hw, v_retval), + v_retval, iavf_stat_str(&adapter->hw, v_retval), v_opcode); } } @@ -1402,6 +1397,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; + if (msglen == sizeof(*vrh)) adapter->hena = vrh->hena; else @@ -1412,6 +1408,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, case VIRTCHNL_OP_REQUEST_QUEUES: { struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; + if (vfres->num_queue_pairs != adapter->num_req_queues) { dev_info(&adapter->pdev->dev, "Requested %d queues, PF can support %d\n", |