aboutsummaryrefslogtreecommitdiff
path: root/kernel/irq
diff options
context:
space:
mode:
authorThomas Gleixner2021-12-06 23:51:52 +0100
committerThomas Gleixner2021-12-16 22:22:20 +0100
commitcd6cf06590b9792340dceaa285138777f3cc4d90 (patch)
tree7012825c2c5947ddc13516012f5d4bfb77f4487a /kernel/irq
parentbf5e758f02fc739589dcc6a3395c3a3eb77b5c90 (diff)
genirq/msi: Convert storage to xarray
The current linked list storage for MSI descriptors is suboptimal in several ways: 1) Looking up a MSI desciptor requires a O(n) list walk in the worst case 2) The upcoming support of runtime expansion of MSI-X vectors would need to do a full list walk to figure out whether a particular index is already associated. 3) Runtime expansion of sparse allocations is even more complex as the current implementation assumes an ordered list (increasing MSI index). Use an xarray which solves all of the above problems nicely. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Nishanth Menon <nm@ti.com> Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20211206210749.280627070@linutronix.de
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/msi.c169
1 files changed, 77 insertions, 92 deletions
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index d290e09258bc..173bc04f9fe5 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -20,7 +20,6 @@
#include "internals.h"
static inline int msi_sysfs_create_group(struct device *dev);
-#define dev_to_msi_list(dev) (&(dev)->msi.data->list)
/**
* msi_alloc_desc - Allocate an initialized msi_desc
@@ -41,7 +40,6 @@ static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
if (!desc)
return NULL;
- INIT_LIST_HEAD(&desc->list);
desc->dev = dev;
desc->nvec_used = nvec;
if (affinity) {
@@ -60,6 +58,17 @@ static void msi_free_desc(struct msi_desc *desc)
kfree(desc);
}
+static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index)
+{
+ int ret;
+
+ desc->msi_index = index;
+ ret = xa_insert(&md->__store, index, desc, GFP_KERNEL);
+ if (ret)
+ msi_free_desc(desc);
+ return ret;
+}
+
/**
* msi_add_msi_desc - Allocate and initialize a MSI descriptor
* @dev: Pointer to the device for which the descriptor is allocated
@@ -77,12 +86,9 @@ int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
if (!desc)
return -ENOMEM;
- /* Copy the MSI index and type specific data to the new descriptor. */
- desc->msi_index = init_desc->msi_index;
+ /* Copy type specific data to the new descriptor. */
desc->pci = init_desc->pci;
-
- list_add_tail(&desc->list, &dev->msi.data->list);
- return 0;
+ return msi_insert_desc(dev->msi.data, desc, init_desc->msi_index);
}
/**
@@ -95,28 +101,41 @@ int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
*/
static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc)
{
- struct msi_desc *desc, *tmp;
- LIST_HEAD(list);
- unsigned int i;
+ unsigned int idx, last = index + ndesc - 1;
+ struct msi_desc *desc;
+ int ret;
lockdep_assert_held(&dev->msi.data->mutex);
- for (i = 0; i < ndesc; i++) {
+ for (idx = index; idx <= last; idx++) {
desc = msi_alloc_desc(dev, 1, NULL);
if (!desc)
+ goto fail_mem;
+ ret = msi_insert_desc(dev->msi.data, desc, idx);
+ if (ret)
goto fail;
- desc->msi_index = index + i;
- list_add_tail(&desc->list, &list);
}
- list_splice_tail(&list, &dev->msi.data->list);
return 0;
+fail_mem:
+ ret = -ENOMEM;
fail:
- list_for_each_entry_safe(desc, tmp, &list, list) {
- list_del(&desc->list);
- msi_free_desc(desc);
+ msi_free_msi_descs_range(dev, MSI_DESC_NOTASSOCIATED, index, last);
+ return ret;
+}
+
+static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
+{
+ switch (filter) {
+ case MSI_DESC_ALL:
+ return true;
+ case MSI_DESC_NOTASSOCIATED:
+ return !desc->irq;
+ case MSI_DESC_ASSOCIATED:
+ return !!desc->irq;
}
- return -ENOMEM;
+ WARN_ON_ONCE(1);
+ return false;
}
/**
@@ -129,19 +148,17 @@ fail:
void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter,
unsigned int first_index, unsigned int last_index)
{
+ struct xarray *xa = &dev->msi.data->__store;
struct msi_desc *desc;
+ unsigned long idx;
lockdep_assert_held(&dev->msi.data->mutex);
- msi_for_each_desc(desc, dev, filter) {
- /*
- * Stupid for now to handle MSI device domain until the
- * storage is switched over to an xarray.
- */
- if (desc->msi_index < first_index || desc->msi_index > last_index)
- continue;
- list_del(&desc->list);
- msi_free_desc(desc);
+ xa_for_each_range(xa, idx, desc, first_index, last_index) {
+ if (msi_desc_match(desc, filter)) {
+ xa_erase(xa, idx);
+ msi_free_desc(desc);
+ }
}
}
@@ -162,7 +179,8 @@ static void msi_device_data_release(struct device *dev, void *res)
{
struct msi_device_data *md = res;
- WARN_ON_ONCE(!list_empty(&md->list));
+ WARN_ON_ONCE(!xa_empty(&md->__store));
+ xa_destroy(&md->__store);
dev->msi.data = NULL;
}
@@ -194,7 +212,7 @@ int msi_setup_device_data(struct device *dev)
return ret;
}
- INIT_LIST_HEAD(&md->list);
+ xa_init(&md->__store);
mutex_init(&md->mutex);
dev->msi.data = md;
devres_add(dev, md);
@@ -217,34 +235,21 @@ EXPORT_SYMBOL_GPL(msi_lock_descs);
*/
void msi_unlock_descs(struct device *dev)
{
- /* Clear the next pointer which was cached by the iterator */
- dev->msi.data->__next = NULL;
+ /* Invalidate the index wich was cached by the iterator */
+ dev->msi.data->__iter_idx = MSI_MAX_INDEX;
mutex_unlock(&dev->msi.data->mutex);
}
EXPORT_SYMBOL_GPL(msi_unlock_descs);
-static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
-{
- switch (filter) {
- case MSI_DESC_ALL:
- return true;
- case MSI_DESC_NOTASSOCIATED:
- return !desc->irq;
- case MSI_DESC_ASSOCIATED:
- return !!desc->irq;
- }
- WARN_ON_ONCE(1);
- return false;
-}
-
-static struct msi_desc *msi_find_first_desc(struct device *dev, enum msi_desc_filter filter)
+static struct msi_desc *msi_find_desc(struct msi_device_data *md, enum msi_desc_filter filter)
{
struct msi_desc *desc;
- list_for_each_entry(desc, dev_to_msi_list(dev), list) {
+ xa_for_each_start(&md->__store, md->__iter_idx, desc, md->__iter_idx) {
if (msi_desc_match(desc, filter))
return desc;
}
+ md->__iter_idx = MSI_MAX_INDEX;
return NULL;
}
@@ -261,37 +266,24 @@ static struct msi_desc *msi_find_first_desc(struct device *dev, enum msi_desc_fi
*/
struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter)
{
- struct msi_desc *desc;
+ struct msi_device_data *md = dev->msi.data;
- if (WARN_ON_ONCE(!dev->msi.data))
+ if (WARN_ON_ONCE(!md))
return NULL;
- lockdep_assert_held(&dev->msi.data->mutex);
+ lockdep_assert_held(&md->mutex);
- desc = msi_find_first_desc(dev, filter);
- dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL;
- return desc;
+ md->__iter_idx = 0;
+ return msi_find_desc(md, filter);
}
EXPORT_SYMBOL_GPL(msi_first_desc);
-static struct msi_desc *__msi_next_desc(struct device *dev, enum msi_desc_filter filter,
- struct msi_desc *from)
-{
- struct msi_desc *desc = from;
-
- list_for_each_entry_from(desc, dev_to_msi_list(dev), list) {
- if (msi_desc_match(desc, filter))
- return desc;
- }
- return NULL;
-}
-
/**
* msi_next_desc - Get the next MSI descriptor of a device
* @dev: Device to operate on
*
* The first invocation of msi_next_desc() has to be preceeded by a
- * successful incovation of __msi_first_desc(). Consecutive invocations are
+ * successful invocation of __msi_first_desc(). Consecutive invocations are
* only valid if the previous one was successful. All these operations have
* to be done within the same MSI mutex held region.
*
@@ -300,20 +292,18 @@ static struct msi_desc *__msi_next_desc(struct device *dev, enum msi_desc_filter
*/
struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter)
{
- struct msi_device_data *data = dev->msi.data;
- struct msi_desc *desc;
+ struct msi_device_data *md = dev->msi.data;
- if (WARN_ON_ONCE(!data))
+ if (WARN_ON_ONCE(!md))
return NULL;
- lockdep_assert_held(&data->mutex);
+ lockdep_assert_held(&md->mutex);
- if (!data->__next)
+ if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX)
return NULL;
- desc = __msi_next_desc(dev, filter, data->__next);
- dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL;
- return desc;
+ md->__iter_idx++;
+ return msi_find_desc(md, filter);
}
EXPORT_SYMBOL_GPL(msi_next_desc);
@@ -336,21 +326,18 @@ unsigned int msi_get_virq(struct device *dev, unsigned int index)
pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
msi_lock_descs(dev);
- msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
- /* PCI-MSI has only one descriptor for multiple interrupts. */
- if (pcimsi) {
- if (index < desc->nvec_used)
- ret = desc->irq + index;
- break;
- }
-
+ desc = xa_load(&dev->msi.data->__store, pcimsi ? 0 : index);
+ if (desc && desc->irq) {
/*
+ * PCI-MSI has only one descriptor for multiple interrupts.
* PCI-MSIX and platform MSI use a descriptor per
* interrupt.
*/
- if (desc->msi_index == index) {
+ if (pcimsi) {
+ if (index < desc->nvec_used)
+ ret = desc->irq + index;
+ } else {
ret = desc->irq;
- break;
}
}
msi_unlock_descs(dev);
@@ -731,16 +718,13 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
int ret, virq;
msi_lock_descs(dev);
- for (virq = virq_base; virq < virq_base + nvec; virq++) {
- desc = msi_alloc_desc(dev, 1, NULL);
- if (!desc) {
- ret = -ENOMEM;
- goto fail;
- }
+ ret = msi_add_simple_msi_descs(dev, virq_base, nvec);
+ if (ret)
+ goto unlock;
- desc->msi_index = virq;
+ for (virq = virq_base; virq < virq_base + nvec; virq++) {
+ desc = xa_load(&dev->msi.data->__store, virq);
desc->irq = virq;
- list_add_tail(&desc->list, &dev->msi.data->list);
ops->set_desc(arg, desc);
ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
@@ -756,6 +740,7 @@ fail:
for (--virq; virq >= virq_base; virq--)
irq_domain_free_irqs_common(domain, virq, 1);
msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, virq_base + nvec - 1);
+unlock:
msi_unlock_descs(dev);
return ret;
}