aboutsummaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
authorJames Bottomley2005-08-22 10:06:19 -0500
committerJames Bottomley2005-08-30 22:44:20 -0500
commit53c165e0a6c8a4ff7df316557528fa7a52d20711 (patch)
tree354c599a07c45c71da2b848a90bbe4a98c42d333 /drivers/base
parent51490c89f95b8581782e9baa855da166441852be (diff)
[SCSI] correct attribute_container list usage
One of the changes in the attribute_container code in the scsi-misc tree was to add a lock to protect the list of devices per container. This, unfortunately, leads to potential scheduling while atomic problems if there's a sleep in the function called by a trigger. The correct solution is to use the kernel klist infrastructure instead which allows lockless traversal of a list. Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/attribute_container.c51
1 files changed, 29 insertions, 22 deletions
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index ebcae5c34133..6c0f49340eb2 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -22,7 +22,7 @@
/* This is a private structure used to tie the classdev and the
* container .. it should never be visible outside this file */
struct internal_container {
- struct list_head node;
+ struct klist_node node;
struct attribute_container *cont;
struct class_device classdev;
};
@@ -57,8 +57,7 @@ int
attribute_container_register(struct attribute_container *cont)
{
INIT_LIST_HEAD(&cont->node);
- INIT_LIST_HEAD(&cont->containers);
- spin_lock_init(&cont->containers_lock);
+ klist_init(&cont->containers);
down(&attribute_container_mutex);
list_add_tail(&cont->node, &attribute_container_list);
@@ -78,13 +77,13 @@ attribute_container_unregister(struct attribute_container *cont)
{
int retval = -EBUSY;
down(&attribute_container_mutex);
- spin_lock(&cont->containers_lock);
- if (!list_empty(&cont->containers))
+ spin_lock(&cont->containers.k_lock);
+ if (!list_empty(&cont->containers.k_list))
goto out;
retval = 0;
list_del(&cont->node);
out:
- spin_unlock(&cont->containers_lock);
+ spin_unlock(&cont->containers.k_lock);
up(&attribute_container_mutex);
return retval;
@@ -143,7 +142,6 @@ attribute_container_add_device(struct device *dev,
continue;
}
memset(ic, 0, sizeof(struct internal_container));
- INIT_LIST_HEAD(&ic->node);
ic->cont = cont;
class_device_initialize(&ic->classdev);
ic->classdev.dev = get_device(dev);
@@ -154,13 +152,22 @@ attribute_container_add_device(struct device *dev,
fn(cont, dev, &ic->classdev);
else
attribute_container_add_class_device(&ic->classdev);
- spin_lock(&cont->containers_lock);
- list_add_tail(&ic->node, &cont->containers);
- spin_unlock(&cont->containers_lock);
+ klist_add_tail(&ic->node, &cont->containers);
}
up(&attribute_container_mutex);
}
+/* FIXME: can't break out of this unless klist_iter_exit is also
+ * called before doing the break
+ */
+#define klist_for_each_entry(pos, head, member, iter) \
+ for (klist_iter_init(head, iter); (pos = ({ \
+ struct klist_node *n = klist_next(iter); \
+ n ? ({ klist_iter_exit(iter) ; NULL; }) : \
+ container_of(n, typeof(*pos), member);\
+ }) ) != NULL; )
+
+
/**
* attribute_container_remove_device - make device eligible for removal.
*
@@ -187,18 +194,19 @@ attribute_container_remove_device(struct device *dev,
down(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
- struct internal_container *ic, *tmp;
+ struct internal_container *ic;
+ struct klist_iter iter;
if (attribute_container_no_classdevs(cont))
continue;
if (!cont->match(cont, dev))
continue;
- spin_lock(&cont->containers_lock);
- list_for_each_entry_safe(ic, tmp, &cont->containers, node) {
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (dev != ic->classdev.dev)
continue;
- list_del(&ic->node);
+ klist_remove(&ic->node);
if (fn)
fn(cont, dev, &ic->classdev);
else {
@@ -206,7 +214,6 @@ attribute_container_remove_device(struct device *dev,
class_device_unregister(&ic->classdev);
}
}
- spin_unlock(&cont->containers_lock);
}
up(&attribute_container_mutex);
}
@@ -232,7 +239,8 @@ attribute_container_device_trigger(struct device *dev,
down(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
- struct internal_container *ic, *tmp;
+ struct internal_container *ic;
+ struct klist_iter iter;
if (!cont->match(cont, dev))
continue;
@@ -242,12 +250,10 @@ attribute_container_device_trigger(struct device *dev,
continue;
}
- spin_lock(&cont->containers_lock);
- list_for_each_entry_safe(ic, tmp, &cont->containers, node) {
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (dev == ic->classdev.dev)
fn(cont, dev, &ic->classdev);
}
- spin_unlock(&cont->containers_lock);
}
up(&attribute_container_mutex);
}
@@ -397,15 +403,16 @@ attribute_container_find_class_device(struct attribute_container *cont,
{
struct class_device *cdev = NULL;
struct internal_container *ic;
+ struct klist_iter iter;
- spin_lock(&cont->containers_lock);
- list_for_each_entry(ic, &cont->containers, node) {
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (ic->classdev.dev == dev) {
cdev = &ic->classdev;
+ /* FIXME: must exit iterator then break */
+ klist_iter_exit(&iter);
break;
}
}
- spin_unlock(&cont->containers_lock);
return cdev;
}