diff options
author | Michael Holzheu | 2009-09-22 22:58:53 +0200 |
---|---|---|
committer | Martin Schwidefsky | 2009-09-22 22:58:46 +0200 |
commit | 1aaf179d043856d80bbb354f9feaf706b9cfbcd3 (patch) | |
tree | b477b19f229869f45fc9f331f308dcdb5e3b01a0 /arch/s390/kernel | |
parent | 68d1e5f08b13132504752cad54169376739753db (diff) |
[S390] hibernate: Do real CPU swap at resume time
Currently, when the physical resume CPU is not equal to the physical suspend
CPU, we swap the CPUs logically, by modifying the logical/physical CPU mapping.
This has two major drawbacks: First the change is visible from user space (e.g.
CPU sysfs files) and second it is hard to ensure that nowhere in the kernel
the physical CPU ID is stored before suspend.
To fix this, we now really swap the physical CPUs, if the resume CPU is not
the pysical suspend CPU. We restart the suspend CPU and stop the resume CPU
using SIGP restart and SIGP stop. If the suspend CPU is no longer available,
we write a message and load a disabled wait PSW.
Signed-off-by: Michael Holzheu <michael.holzheu@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 7 | ||||
-rw-r--r-- | arch/s390/kernel/sclp.S | 5 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 36 | ||||
-rw-r--r-- | arch/s390/kernel/swsusp_asm64.S | 91 |
4 files changed, 70 insertions, 69 deletions
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index fa9905ce7d0b..63e46433e81d 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -7,6 +7,7 @@ #include <linux/sched.h> #include <linux/kbuild.h> #include <asm/vdso.h> +#include <asm/sigp.h> int main(void) { @@ -59,6 +60,10 @@ int main(void) DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); - + /* constants for SIGP */ + DEFINE(__SIGP_STOP, sigp_stop); + DEFINE(__SIGP_RESTART, sigp_restart); + DEFINE(__SIGP_SENSE, sigp_sense); + DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset); return 0; } diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index 20639dfe0c42..e27ca63076d1 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -24,8 +24,6 @@ LC_EXT_INT_CODE = 0x86 # addr of ext int code # R3 = external interruption parameter if R2=0 # -.section ".init.text","ax" - _sclp_wait_int: stm %r6,%r15,24(%r15) # save registers basr %r13,0 # get base register @@ -318,9 +316,8 @@ _sclp_print_early: .long _sclp_work_area .Lascebc: .long _ascebc -.previous -.section ".init.data","a" +.section .data,"aw",@progbits .balign 4096 _sclp_work_area: .fill 4096 diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 6f14734abe72..b4b6396e6cf0 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1041,42 +1041,6 @@ out: static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, dispatching_store); -/* - * If the resume kernel runs on another cpu than the suspended kernel, - * we have to switch the cpu IDs in the logical map. - */ -void smp_switch_boot_cpu_in_resume(u32 resume_phys_cpu_id, - struct _lowcore *suspend_lowcore) -{ - int cpu, suspend_cpu_id, resume_cpu_id; - u32 suspend_phys_cpu_id; - - suspend_phys_cpu_id = __cpu_logical_map[suspend_lowcore->cpu_nr]; - suspend_cpu_id = suspend_lowcore->cpu_nr; - - for_each_present_cpu(cpu) { - if (__cpu_logical_map[cpu] == resume_phys_cpu_id) { - resume_cpu_id = cpu; - goto found; - } - } - panic("Could not find resume cpu in logical map.\n"); - -found: - printk("Resume cpu ID: %i/%i\n", resume_phys_cpu_id, resume_cpu_id); - printk("Suspend cpu ID: %i/%i\n", suspend_phys_cpu_id, suspend_cpu_id); - - __cpu_logical_map[resume_cpu_id] = suspend_phys_cpu_id; - __cpu_logical_map[suspend_cpu_id] = resume_phys_cpu_id; - - lowcore_ptr[suspend_cpu_id]->cpu_addr = resume_phys_cpu_id; -} - -u32 smp_get_phys_cpu_id(void) -{ - return __cpu_logical_map[smp_processor_id()]; -} - static int __init topology_init(void) { int cpu; diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index fc056810a017..fe927d0bc20b 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -9,6 +9,7 @@ #include <asm/page.h> #include <asm/ptrace.h> +#include <asm/thread_info.h> #include <asm/asm-offsets.h> /* @@ -41,6 +42,9 @@ swsusp_arch_suspend: /* Get pointer to save area */ lghi %r1,0x1000 + /* Save CPU address */ + stap __LC_CPU_ADDRESS(%r1) + /* Store registers */ mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ stfpc 0x31c(%r1) /* store fpu control */ @@ -105,12 +109,7 @@ swsusp_arch_resume: /* Make all free pages stable */ lghi %r2,1 brasl %r14,arch_set_page_states -#ifdef CONFIG_SMP - /* Save boot cpu number */ - brasl %r14,smp_get_phys_cpu_id - larl %r1,saved_cpu_id - st %r2,0(%r1) -#endif + /* Deactivate DAT */ stnsm __SF_EMPTY(%r15),0xfb @@ -139,12 +138,10 @@ swsusp_arch_resume: /* Reset System */ larl %r1,restart_entry - larl %r2,restart_psw + larl %r2,.Lrestart_diag308_psw og %r1,0(%r2) stg %r1,0(%r0) - larl %r1,saved_pgm_check_psw - mvc 0(16,%r1),__LC_PGM_NEW_PSW(%r0) - larl %r1,new_pgm_check_psw + larl %r1,.Lnew_pgm_check_psw epsw %r2,%r3 stm %r2,%r3,0(%r1) mvc __LC_PGM_NEW_PSW(16,%r0),0(%r1) @@ -154,12 +151,54 @@ restart_entry: lhi %r1,1 sigp %r1,%r0,0x12 sam64 - larl %r1,new_pgm_check_psw + larl %r1,.Lnew_pgm_check_psw lpswe 0(%r1) pgm_check_entry: - larl %r1,saved_pgm_check_psw - mvc __LC_PGM_NEW_PSW(16,%r0),0(%r1) + /* Switch to original suspend CPU */ + larl %r1,.Lresume_cpu /* Resume CPU address: r2 */ + stap 0(%r1) + llgh %r2,0(%r1) + lghi %r3,0x1000 + llgh %r1,__LC_CPU_ADDRESS(%r3) /* Suspend CPU address: r1 */ + cgr %r1,%r2 + je restore_registers /* r1 = r2 -> nothing to do */ + larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ + mvc __LC_RESTART_PSW(16,%r0),0(%r4) +3: + sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET + brc 8,4f /* accepted */ + brc 2,3b /* busy, try again */ + + /* Suspend CPU not available -> panic */ + larl %r15,init_thread_union + ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) + larl %r2,.Lpanic_string + larl %r3,_sclp_print_early + lghi %r1,0 + sam31 + sigp %r1,%r0,0x12 + basr %r14,%r3 + larl %r3,.Ldisabled_wait_31 + lpsw 0(%r3) +4: + /* Switch to suspend CPU */ + sigp %r9,%r1,__SIGP_RESTART /* start suspend CPU */ + brc 2,4b /* busy, try again */ +5: + sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */ +6: j 6b + +restart_suspend: + larl %r1,.Lresume_cpu + llgh %r2,0(%r1) +7: + sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */ + brc 2,7b /* busy, try again */ + tmll %r9,0x40 /* Test if resume CPU is stopped */ + jz 7b + +restore_registers: /* Restore registers */ lghi %r13,0x1000 /* %r1 = pointer to save arae */ @@ -193,13 +232,6 @@ pgm_check_entry: /* Pointer to save area */ lghi %r13,0x1000 -#ifdef CONFIG_SMP - /* Switch CPUs */ - larl %r1,saved_cpu_id - llgf %r2,0(%r1) - llgf %r3,0x318(%r13) - brasl %r14,smp_switch_boot_cpu_in_resume -#endif /* Restore prefix register */ spx 0x318(%r13) @@ -217,13 +249,16 @@ pgm_check_entry: .section .data.nosave,"aw",@progbits .align 8 -restart_psw: +.Ldisabled_wait_31: + .long 0x000a0000,0x00000000 +.Lpanic_string: + .asciz "Resume not possible because suspend CPU is no longer available" + .align 8 +.Lrestart_diag308_psw: .long 0x00080000,0x80000000 -new_pgm_check_psw: +.Lrestart_suspend_psw: + .quad 0x0000000180000000,restart_suspend +.Lnew_pgm_check_psw: .quad 0,pgm_check_entry -saved_pgm_check_psw: - .quad 0,0 -#ifdef CONFIG_SMP -saved_cpu_id: - .long 0 -#endif +.Lresume_cpu: + .byte 0,0 |