diff options
author | Ming Lei | 2018-11-02 22:59:50 +0800 |
---|---|---|
committer | Thomas Gleixner | 2018-11-05 12:16:26 +0100 |
commit | 060746d9e394084b7401e7532f2de528ecbfb521 (patch) | |
tree | b7a3f816c1af23dd6b51bb857f9cbad952f7b1bc /kernel/irq | |
parent | 5c903e108d0b005cf59904ca3520934fca4b9439 (diff) |
genirq/affinity: Pass first vector to __irq_build_affinity_masks()
No functional change.
Prepares for support of allocating and affinitizing sets of interrupts, in
which each set of interrupts needs a full two stage spreading. The first
vector argument is necessary for this so the affinitizing starts from the
first vector of each set.
[ tglx: Minor changelog tweaks ]
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: linux-block@vger.kernel.org
Cc: Hannes Reinecke <hare@suse.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Sagi Grimberg <sagi@grimberg.me>
Link: https://lkml.kernel.org/r/20181102145951.31979-4-ming.lei@redhat.com
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/affinity.c | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 2f9812b6035e..e028b773e38a 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -95,14 +95,14 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask, } static int __irq_build_affinity_masks(const struct irq_affinity *affd, - int startvec, int numvecs, + int startvec, int numvecs, int firstvec, cpumask_var_t *node_to_cpumask, const struct cpumask *cpu_mask, struct cpumask *nmsk, struct cpumask *masks) { int n, nodes, cpus_per_vec, extra_vecs, done = 0; - int last_affv = affd->pre_vectors + numvecs; + int last_affv = firstvec + numvecs; int curvec = startvec; nodemask_t nodemsk = NODE_MASK_NONE; @@ -119,7 +119,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, for_each_node_mask(n, nodemsk) { cpumask_or(masks + curvec, masks + curvec, node_to_cpumask[n]); if (++curvec == last_affv) - curvec = affd->pre_vectors; + curvec = firstvec; } done = numvecs; goto out; @@ -129,7 +129,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, int ncpus, v, vecs_to_assign, vecs_per_node; /* Spread the vectors per node */ - vecs_per_node = (numvecs - (curvec - affd->pre_vectors)) / nodes; + vecs_per_node = (numvecs - (curvec - firstvec)) / nodes; /* Get the cpus on this node which are in the mask */ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); @@ -157,7 +157,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, if (done >= numvecs) break; if (curvec >= last_affv) - curvec = affd->pre_vectors; + curvec = firstvec; --nodes; } @@ -190,8 +190,9 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, /* Spread on present CPUs starting from affd->pre_vectors */ usedvecs = __irq_build_affinity_masks(affd, curvec, numvecs, - node_to_cpumask, cpu_present_mask, - nmsk, masks); + affd->pre_vectors, + node_to_cpumask, + cpu_present_mask, nmsk, masks); /* * Spread on non present CPUs starting from the next vector to be @@ -205,8 +206,9 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, curvec = affd->pre_vectors + usedvecs; cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); usedvecs += __irq_build_affinity_masks(affd, curvec, numvecs, - node_to_cpumask, npresmsk, - nmsk, masks); + affd->pre_vectors, + node_to_cpumask, npresmsk, + nmsk, masks); put_online_cpus(); free_cpumask_var(npresmsk); |