aboutsummaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorGiovanni Cabiddu2020-10-12 21:38:34 +0100
committerHerbert Xu2020-10-30 17:34:54 +1100
commit070a34c9dfa3eeb0b18d63c6fec5dfe7861a43ac (patch)
tree2d7d3a16cea4902a731ca8a48bef3bddb818444d /drivers/crypto
parentdbf568755e7781a4ce8bd3c587c2c388600ae661 (diff)
crypto: qat - enable ring after pair is programmed
Enable arbitration on the TX ring only after the RX ring is programmed. Before this change, arbitration was enabled on the TX ring before the RX ring was programmed allowing the HW to process a request before having the ring pair configured. With this change, the arbitration logic is programmed only if the TX half of the ring mask matches the RX half. This change does not affect QAT GEN2 devices (c62x, c3xxx and dh895xcc), but it is a must for QAT GEN4 devices since the CSRs of the ring pair are locked after arbitration is enabled on the TX ring. Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com> Reviewed-by: Maksim Lukoshkov <maksim.lukoshkov@intel.com> Reviewed-by: Fiona Trahe <fiona.trahe@intel.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/qat/qat_common/adf_hw_arbiter.c20
1 files changed, 19 insertions, 1 deletions
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
index 9dc9d58f6093..bd03c8f54eb4 100644
--- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -55,9 +55,27 @@ EXPORT_SYMBOL_GPL(adf_init_arb);
void adf_update_ring_arb(struct adf_etr_ring_data *ring)
{
+ struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 tx_ring_mask = hw_data->tx_rings_mask;
+ u32 shift = hw_data->tx_rx_gap;
+ u32 arben, arben_tx, arben_rx;
+ u32 rx_ring_mask;
+
+ /*
+ * Enable arbitration on a ring only if the TX half of the ring mask
+ * matches the RX part. This results in writes to CSR on both TX and
+ * RX update - only one is necessary, but both are done for
+ * simplicity.
+ */
+ rx_ring_mask = tx_ring_mask << shift;
+ arben_tx = (ring->bank->ring_mask & tx_ring_mask) >> 0;
+ arben_rx = (ring->bank->ring_mask & rx_ring_mask) >> shift;
+ arben = arben_tx & arben_rx;
+
WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
ring->bank->bank_number,
- ring->bank->ring_mask & 0xFF);
+ arben);
}
void adf_exit_arb(struct adf_accel_dev *accel_dev)