aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds2018-08-15 22:06:26 -0700
committerLinus Torvalds2018-08-15 22:06:26 -0700
commit72f02ba66bd83b54054da20eae550123de84da6f (patch)
tree96a8360400e040aa2e38e7352594dbbc485461db /lib
parentdb06f826ec12bf0701ea7fc0a3c0aa00b84417c8 (diff)
parent51372570ac3c919b036e760f4ca449e81cf8e995 (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly updates to the usual drivers: mpt3sas, lpfc, qla2xxx, hisi_sas, smartpqi, megaraid_sas, arcmsr. In addition, with the continuing absence of Nic we have target updates for tcmu and target core (all with reviews and acks). The biggest observable change is going to be that we're (again) trying to switch to mulitqueue as the default (a user can still override the setting on the kernel command line). Other major core stuff is the removal of the remaining Microchannel drivers, an update of the internal timers and some reworks of completion and result handling" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (203 commits) scsi: core: use blk_mq_run_hw_queues in scsi_kick_queue scsi: ufs: remove unnecessary query(DM) UPIU trace scsi: qla2xxx: Fix issue reported by static checker for qla2x00_els_dcmd2_sp_done() scsi: aacraid: Spelling fix in comment scsi: mpt3sas: Fix calltrace observed while running IO & reset scsi: aic94xx: fix an error code in aic94xx_init() scsi: st: remove redundant pointer STbuffer scsi: qla2xxx: Update driver version to 10.00.00.08-k scsi: qla2xxx: Migrate NVME N2N handling into state machine scsi: qla2xxx: Save frame payload size from ICB scsi: qla2xxx: Fix stalled relogin scsi: qla2xxx: Fix race between switch cmd completion and timeout scsi: qla2xxx: Fix Management Server NPort handle reservation logic scsi: qla2xxx: Flush mailbox commands on chip reset scsi: qla2xxx: Fix unintended Logout scsi: qla2xxx: Fix session state stuck in Get Port DB scsi: qla2xxx: Fix redundant fc_rport registration scsi: qla2xxx: Silent erroneous message scsi: qla2xxx: Prevent sysfs access when chip is down scsi: qla2xxx: Add longer window for chip reset ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile2
-rw-r--r--lib/klist.c10
-rw-r--r--lib/percpu_ida.c370
3 files changed, 7 insertions, 375 deletions
diff --git a/lib/Makefile b/lib/Makefile
index ff3a397bbb12..d95bb2525101 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -37,7 +37,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
+ percpu-refcount.o rhashtable.o reciprocal_div.o \
once.o refcount.o usercopy.o errseq.o bucket_locks.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
diff --git a/lib/klist.c b/lib/klist.c
index 0507fa5d84c5..f6b547812fe3 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i)
prev = to_klist_node(prev->n_node.prev);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i)
next = to_klist_node(next->n_node.next);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
deleted file mode 100644
index beb14839b41a..000000000000
--- a/lib/percpu_ida.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Percpu IDA library
- *
- * Copyright (C) 2013 Datera, Inc. Kent Overstreet
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/mm.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/bug.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/percpu.h>
-#include <linux/sched/signal.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/percpu_ida.h>
-
-struct percpu_ida_cpu {
- /*
- * Even though this is percpu, we need a lock for tag stealing by remote
- * CPUs:
- */
- spinlock_t lock;
-
- /* nr_free/freelist form a stack of free IDs */
- unsigned nr_free;
- unsigned freelist[];
-};
-
-static inline void move_tags(unsigned *dst, unsigned *dst_nr,
- unsigned *src, unsigned *src_nr,
- unsigned nr)
-{
- *src_nr -= nr;
- memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
- *dst_nr += nr;
-}
-
-/*
- * Try to steal tags from a remote cpu's percpu freelist.
- *
- * We first check how many percpu freelists have tags
- *
- * Then we iterate through the cpus until we find some tags - we don't attempt
- * to find the "best" cpu to steal from, to keep cacheline bouncing to a
- * minimum.
- */
-static inline void steal_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
- struct percpu_ida_cpu *remote;
-
- for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
- cpus_have_tags; cpus_have_tags--) {
- cpu = cpumask_next(cpu, &pool->cpus_have_tags);
-
- if (cpu >= nr_cpu_ids) {
- cpu = cpumask_first(&pool->cpus_have_tags);
- if (cpu >= nr_cpu_ids)
- BUG();
- }
-
- pool->cpu_last_stolen = cpu;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
-
- cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
-
- if (remote == tags)
- continue;
-
- spin_lock(&remote->lock);
-
- if (remote->nr_free) {
- memcpy(tags->freelist,
- remote->freelist,
- sizeof(unsigned) * remote->nr_free);
-
- tags->nr_free = remote->nr_free;
- remote->nr_free = 0;
- }
-
- spin_unlock(&remote->lock);
-
- if (tags->nr_free)
- break;
- }
-}
-
-/*
- * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
- * our percpu freelist:
- */
-static inline void alloc_global_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- move_tags(tags->freelist, &tags->nr_free,
- pool->freelist, &pool->nr_free,
- min(pool->nr_free, pool->percpu_batch_size));
-}
-
-/**
- * percpu_ida_alloc - allocate a tag
- * @pool: pool to allocate from
- * @state: task state for prepare_to_wait
- *
- * Returns a tag - an integer in the range [0..nr_tags) (passed to
- * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
- *
- * Safe to be called from interrupt context (assuming it isn't passed
- * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
- *
- * @gfp indicates whether or not to wait until a free id is available (it's not
- * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
- * however long it takes until another thread frees an id (same semantics as a
- * mempool).
- *
- * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
- */
-int percpu_ida_alloc(struct percpu_ida *pool, int state)
-{
- DEFINE_WAIT(wait);
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- int tag = -ENOSPC;
-
- tags = raw_cpu_ptr(pool->tag_cpu);
- spin_lock_irqsave(&tags->lock, flags);
-
- /* Fastpath */
- if (likely(tags->nr_free)) {
- tag = tags->freelist[--tags->nr_free];
- spin_unlock_irqrestore(&tags->lock, flags);
- return tag;
- }
- spin_unlock_irqrestore(&tags->lock, flags);
-
- while (1) {
- spin_lock_irqsave(&pool->lock, flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- /*
- * prepare_to_wait() must come before steal_tags(), in case
- * percpu_ida_free() on another cpu flips a bit in
- * cpus_have_tags
- *
- * global lock held and irqs disabled, don't need percpu lock
- */
- if (state != TASK_RUNNING)
- prepare_to_wait(&pool->wait, &wait, state);
-
- if (!tags->nr_free)
- alloc_global_tags(pool, tags);
- if (!tags->nr_free)
- steal_tags(pool, tags);
-
- if (tags->nr_free) {
- tag = tags->freelist[--tags->nr_free];
- if (tags->nr_free)
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- }
-
- spin_unlock_irqrestore(&pool->lock, flags);
-
- if (tag >= 0 || state == TASK_RUNNING)
- break;
-
- if (signal_pending_state(state, current)) {
- tag = -ERESTARTSYS;
- break;
- }
-
- schedule();
- }
- if (state != TASK_RUNNING)
- finish_wait(&pool->wait, &wait);
-
- return tag;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_alloc);
-
-/**
- * percpu_ida_free - free a tag
- * @pool: pool @tag was allocated from
- * @tag: a tag previously allocated with percpu_ida_alloc()
- *
- * Safe to be called from interrupt context.
- */
-void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
-{
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- unsigned nr_free;
-
- BUG_ON(tag >= pool->nr_tags);
-
- tags = raw_cpu_ptr(pool->tag_cpu);
-
- spin_lock_irqsave(&tags->lock, flags);
- tags->freelist[tags->nr_free++] = tag;
-
- nr_free = tags->nr_free;
-
- if (nr_free == 1) {
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- wake_up(&pool->wait);
- }
- spin_unlock_irqrestore(&tags->lock, flags);
-
- if (nr_free == pool->percpu_max_size) {
- spin_lock_irqsave(&pool->lock, flags);
- spin_lock(&tags->lock);
-
- if (tags->nr_free == pool->percpu_max_size) {
- move_tags(pool->freelist, &pool->nr_free,
- tags->freelist, &tags->nr_free,
- pool->percpu_batch_size);
-
- wake_up(&pool->wait);
- }
- spin_unlock(&tags->lock);
- spin_unlock_irqrestore(&pool->lock, flags);
- }
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free);
-
-/**
- * percpu_ida_destroy - release a tag pool's resources
- * @pool: pool to free
- *
- * Frees the resources allocated by percpu_ida_init().
- */
-void percpu_ida_destroy(struct percpu_ida *pool)
-{
- free_percpu(pool->tag_cpu);
- free_pages((unsigned long) pool->freelist,
- get_order(pool->nr_tags * sizeof(unsigned)));
-}
-EXPORT_SYMBOL_GPL(percpu_ida_destroy);
-
-/**
- * percpu_ida_init - initialize a percpu tag pool
- * @pool: pool to initialize
- * @nr_tags: number of tags that will be available for allocation
- *
- * Initializes @pool so that it can be used to allocate tags - integers in the
- * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
- * preallocated array of tag structures.
- *
- * Allocation is percpu, but sharding is limited by nr_tags - for best
- * performance, the workload should not span more cpus than nr_tags / 128.
- */
-int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
- unsigned long max_size, unsigned long batch_size)
-{
- unsigned i, cpu, order;
-
- memset(pool, 0, sizeof(*pool));
-
- init_waitqueue_head(&pool->wait);
- spin_lock_init(&pool->lock);
- pool->nr_tags = nr_tags;
- pool->percpu_max_size = max_size;
- pool->percpu_batch_size = batch_size;
-
- /* Guard against overflow */
- if (nr_tags > (unsigned) INT_MAX + 1) {
- pr_err("percpu_ida_init(): nr_tags too large\n");
- return -EINVAL;
- }
-
- order = get_order(nr_tags * sizeof(unsigned));
- pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
- if (!pool->freelist)
- return -ENOMEM;
-
- for (i = 0; i < nr_tags; i++)
- pool->freelist[i] = i;
-
- pool->nr_free = nr_tags;
-
- pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
- pool->percpu_max_size * sizeof(unsigned),
- sizeof(unsigned));
- if (!pool->tag_cpu)
- goto err;
-
- for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
-
- return 0;
-err:
- percpu_ida_destroy(pool);
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(__percpu_ida_init);
-
-/**
- * percpu_ida_for_each_free - iterate free ids of a pool
- * @pool: pool to iterate
- * @fn: interate callback function
- * @data: parameter for @fn
- *
- * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
- * ids might be missed, some might be iterated duplicated, and some might
- * be iterated and not free soon.
- */
-int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- void *data)
-{
- unsigned long flags;
- struct percpu_ida_cpu *remote;
- unsigned cpu, i, err = 0;
-
- for_each_possible_cpu(cpu) {
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- spin_lock_irqsave(&remote->lock, flags);
- for (i = 0; i < remote->nr_free; i++) {
- err = fn(remote->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock_irqrestore(&remote->lock, flags);
- if (err)
- goto out;
- }
-
- spin_lock_irqsave(&pool->lock, flags);
- for (i = 0; i < pool->nr_free; i++) {
- err = fn(pool->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock_irqrestore(&pool->lock, flags);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-
-/**
- * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
- * @pool: pool related
- * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
- *
- * Note: this just returns a snapshot of free tags number.
- */
-unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
-{
- struct percpu_ida_cpu *remote;
- if (cpu == nr_cpu_ids)
- return pool->nr_free;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- return remote->nr_free;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free_tags);