aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTejun Heo2014-09-02 14:46:04 -0400
committerTejun Heo2014-09-02 14:46:04 -0400
commite04d320838f573d8fa989a0d7af0972f9b0142d9 (patch)
tree134ab3b64a07aca992f4d3b81f048b3791c12df4
parenta16037c8dfc2734c1a2c8e3ffd4766ed25f2a41d (diff)
percpu: indent the population block in pcpu_alloc()
The next patch will conditionalize the population block in pcpu_alloc() which will end up making a rather large indentation change obfuscating the actual logic change. This patch puts the block under "if (true)" so that the next patch can avoid indentation changes. The defintions of the local variables which are used only in the block are moved into the block. This patch is purely cosmetic. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--mm/percpu.c38
1 files changed, 21 insertions, 17 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index e18aa143aab1..577d84fb3002 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -742,7 +742,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
struct pcpu_chunk *chunk;
const char *err;
int slot, off, new_alloc, cpu, ret;
- int page_start, page_end, rs, re;
unsigned long flags;
void __percpu *ptr;
@@ -847,27 +846,32 @@ area_found:
spin_unlock_irqrestore(&pcpu_lock, flags);
/* populate if not all pages are already there */
- mutex_lock(&pcpu_alloc_mutex);
- page_start = PFN_DOWN(off);
- page_end = PFN_UP(off + size);
+ if (true) {
+ int page_start, page_end, rs, re;
- pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
- WARN_ON(chunk->immutable);
+ mutex_lock(&pcpu_alloc_mutex);
- ret = pcpu_populate_chunk(chunk, rs, re);
+ page_start = PFN_DOWN(off);
+ page_end = PFN_UP(off + size);
- spin_lock_irqsave(&pcpu_lock, flags);
- if (ret) {
- mutex_unlock(&pcpu_alloc_mutex);
- pcpu_free_area(chunk, off);
- err = "failed to populate";
- goto fail_unlock;
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
+ WARN_ON(chunk->immutable);
+
+ ret = pcpu_populate_chunk(chunk, rs, re);
+
+ spin_lock_irqsave(&pcpu_lock, flags);
+ if (ret) {
+ mutex_unlock(&pcpu_alloc_mutex);
+ pcpu_free_area(chunk, off);
+ err = "failed to populate";
+ goto fail_unlock;
+ }
+ bitmap_set(chunk->populated, rs, re - rs);
+ spin_unlock_irqrestore(&pcpu_lock, flags);
}
- bitmap_set(chunk->populated, rs, re - rs);
- spin_unlock_irqrestore(&pcpu_lock, flags);
- }
- mutex_unlock(&pcpu_alloc_mutex);
+ mutex_unlock(&pcpu_alloc_mutex);
+ }
/* clear the areas and return address relative to base address */
for_each_possible_cpu(cpu)