aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorNeilBrown2019-04-12 11:52:08 +1000
committerDavid S. Miller2019-04-12 17:34:45 -0700
commitc5783311a1248c437614d438b69c5f31fe483ecb (patch)
treead689580e015c966a1ed5b949e88e47b0eca8775 /include
parente4edbe3c1f44c84f319149aeb998e7e36b3b897f (diff)
rhashtable: reorder some inline functions and macros.
This patch only moves some code around, it doesn't change the code at all. A subsequent patch will benefit from this as it needs to add calls to functions which are now defined before the call-site, but weren't before. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/rhashtable.h142
1 files changed, 71 insertions, 71 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 2711cbf01b64..c504cd820736 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -88,77 +88,6 @@ struct bucket_table {
};
/*
- * We lock a bucket by setting BIT(1) in the pointer - this is always
- * zero in real pointers and in the nulls marker.
- * bit_spin_locks do not handle contention well, but the whole point
- * of the hashtable design is to achieve minimum per-bucket contention.
- * A nested hash table might not have a bucket pointer. In that case
- * we cannot get a lock. For remove and replace the bucket cannot be
- * interesting and doesn't need locking.
- * For insert we allocate the bucket if this is the last bucket_table,
- * and then take the lock.
- * Sometimes we unlock a bucket by writing a new pointer there. In that
- * case we don't need to unlock, but we do need to reset state such as
- * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
- * provides the same release semantics that bit_spin_unlock() provides,
- * this is safe.
- */
-
-static inline void rht_lock(struct bucket_table *tbl,
- struct rhash_lock_head **bkt)
-{
- local_bh_disable();
- bit_spin_lock(1, (unsigned long *)bkt);
- lock_map_acquire(&tbl->dep_map);
-}
-
-static inline void rht_lock_nested(struct bucket_table *tbl,
- struct rhash_lock_head **bucket,
- unsigned int subclass)
-{
- local_bh_disable();
- bit_spin_lock(1, (unsigned long *)bucket);
- lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
-}
-
-static inline void rht_unlock(struct bucket_table *tbl,
- struct rhash_lock_head **bkt)
-{
- lock_map_release(&tbl->dep_map);
- bit_spin_unlock(1, (unsigned long *)bkt);
- local_bh_enable();
-}
-
-static inline void rht_assign_unlock(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bkt,
- struct rhash_head *obj)
-{
- struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
-
- lock_map_release(&tbl->dep_map);
- rcu_assign_pointer(*p, obj);
- preempt_enable();
- __release(bitlock);
- local_bh_enable();
-}
-
-/*
- * If 'p' is a bucket head and might be locked:
- * rht_ptr() returns the address without the lock bit.
- * rht_ptr_locked() returns the address WITH the lock bit.
- */
-static inline struct rhash_head __rcu *rht_ptr(const struct rhash_lock_head *p)
-{
- return (void *)(((unsigned long)p) & ~BIT(1));
-}
-
-static inline struct rhash_lock_head __rcu *rht_ptr_locked(const
- struct rhash_head *p)
-{
- return (void *)(((unsigned long)p) | BIT(1));
-}
-
-/*
* NULLS_MARKER() expects a hash value with the low
* bits mostly likely to be significant, and it discards
* the msb.
@@ -372,6 +301,77 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
&tbl->buckets[hash];
}
+/*
+ * We lock a bucket by setting BIT(1) in the pointer - this is always
+ * zero in real pointers and in the nulls marker.
+ * bit_spin_locks do not handle contention well, but the whole point
+ * of the hashtable design is to achieve minimum per-bucket contention.
+ * A nested hash table might not have a bucket pointer. In that case
+ * we cannot get a lock. For remove and replace the bucket cannot be
+ * interesting and doesn't need locking.
+ * For insert we allocate the bucket if this is the last bucket_table,
+ * and then take the lock.
+ * Sometimes we unlock a bucket by writing a new pointer there. In that
+ * case we don't need to unlock, but we do need to reset state such as
+ * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
+ * provides the same release semantics that bit_spin_unlock() provides,
+ * this is safe.
+ */
+
+static inline void rht_lock(struct bucket_table *tbl,
+ struct rhash_lock_head **bkt)
+{
+ local_bh_disable();
+ bit_spin_lock(1, (unsigned long *)bkt);
+ lock_map_acquire(&tbl->dep_map);
+}
+
+static inline void rht_lock_nested(struct bucket_table *tbl,
+ struct rhash_lock_head **bucket,
+ unsigned int subclass)
+{
+ local_bh_disable();
+ bit_spin_lock(1, (unsigned long *)bucket);
+ lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
+}
+
+static inline void rht_unlock(struct bucket_table *tbl,
+ struct rhash_lock_head **bkt)
+{
+ lock_map_release(&tbl->dep_map);
+ bit_spin_unlock(1, (unsigned long *)bkt);
+ local_bh_enable();
+}
+
+/*
+ * If 'p' is a bucket head and might be locked:
+ * rht_ptr() returns the address without the lock bit.
+ * rht_ptr_locked() returns the address WITH the lock bit.
+ */
+static inline struct rhash_head __rcu *rht_ptr(const struct rhash_lock_head *p)
+{
+ return (void *)(((unsigned long)p) & ~BIT(1));
+}
+
+static inline struct rhash_lock_head __rcu *rht_ptr_locked(const
+ struct rhash_head *p)
+{
+ return (void *)(((unsigned long)p) | BIT(1));
+}
+
+static inline void rht_assign_unlock(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bkt,
+ struct rhash_head *obj)
+{
+ struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
+
+ lock_map_release(&tbl->dep_map);
+ rcu_assign_pointer(*p, obj);
+ preempt_enable();
+ __release(bitlock);
+ local_bh_enable();
+}
+
/**
* rht_for_each_from - iterate over hash chain from given head
* @pos: the &struct rhash_head to use as a loop cursor.