diff options
-rw-r--r-- | arch/i386/kernel/process.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/vm86.c | 4 | ||||
-rw-r--r-- | arch/x86_64/kernel/process.c | 12 | ||||
-rw-r--r-- | include/asm-i386/system.h | 6 |
4 files changed, 13 insertions, 13 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index b2203e21acb3..85bd56d44314 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -611,8 +611,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas * Save away %fs and %gs. No need to save %es and %ds, as * those are always kernel segments while inside the kernel. */ - asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs)); - asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); + asm volatile("mov %%fs,%0":"=m" (prev->fs)); + asm volatile("mov %%gs,%0":"=m" (prev->gs)); /* * Restore %fs and %gs if needed. diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index 2f3d52dacff7..d16cd3738a48 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c @@ -294,8 +294,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk */ info->regs32->eax = 0; tsk->thread.saved_esp0 = tsk->thread.esp0; - asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs)); - asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs)); + asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs)); + asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs)); tss = &per_cpu(init_tss, get_cpu()); tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 9922d2ba24a3..761b6d35e338 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -402,10 +402,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, p->thread.fs = me->thread.fs; p->thread.gs = me->thread.gs; - asm("movl %%gs,%0" : "=m" (p->thread.gsindex)); - asm("movl %%fs,%0" : "=m" (p->thread.fsindex)); - asm("movl %%es,%0" : "=m" (p->thread.es)); - asm("movl %%ds,%0" : "=m" (p->thread.ds)); + asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); + asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); + asm("mov %%es,%0" : "=m" (p->thread.es)); + asm("mov %%ds,%0" : "=m" (p->thread.ds)); if (unlikely(me->thread.io_bitmap_ptr != NULL)) { p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); @@ -468,11 +468,11 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct * * Switch DS and ES. * This won't pick up thread selector changes, but I guess that is ok. */ - asm volatile("movl %%es,%0" : "=m" (prev->es)); + asm volatile("mov %%es,%0" : "=m" (prev->es)); if (unlikely(next->es | prev->es)) loadsegment(es, next->es); - asm volatile ("movl %%ds,%0" : "=m" (prev->ds)); + asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 6f74d4c44a0e..3db717a244f0 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -81,7 +81,7 @@ static inline unsigned long _get_base(char * addr) #define loadsegment(seg,value) \ asm volatile("\n" \ "1:\t" \ - "movl %0,%%" #seg "\n" \ + "mov %0,%%" #seg "\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3:\t" \ @@ -93,13 +93,13 @@ static inline unsigned long _get_base(char * addr) ".align 4\n\t" \ ".long 1b,3b\n" \ ".previous" \ - : :"m" (*(unsigned int *)&(value))) + : :"m" (value)) /* * Save a segment register away */ #define savesegment(seg, value) \ - asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value))) + asm volatile("mov %%" #seg ",%0":"=m" (value)) /* * Clear and set 'TS' bit respectively |