diff options
Diffstat (limited to 'arch/x86/lib')
| -rw-r--r-- | arch/x86/lib/copy_user_64.S | 2 | ||||
| -rw-r--r-- | arch/x86/lib/copy_user_nocache_64.S | 3 | ||||
| -rw-r--r-- | arch/x86/lib/msr-on-cpu.c | 22 |
3 files changed, 14 insertions, 13 deletions
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index dfdf428975c0..f118c110af32 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -52,7 +52,7 @@ jnz 100b 102: .section .fixup,"ax" -103: addl %r8d,%edx /* ecx is zerorest also */ +103: addl %ecx,%edx /* ecx is zerorest also */ jmp copy_user_handle_tail .previous diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S index 40e0e309d27e..cb0c112386fb 100644 --- a/arch/x86/lib/copy_user_nocache_64.S +++ b/arch/x86/lib/copy_user_nocache_64.S @@ -32,7 +32,7 @@ jnz 100b 102: .section .fixup,"ax" -103: addl %r8d,%edx /* ecx is zerorest also */ +103: addl %ecx,%edx /* ecx is zerorest also */ jmp copy_user_handle_tail .previous @@ -108,7 +108,6 @@ ENTRY(__copy_user_nocache) jmp 60f 50: movl %ecx,%edx 60: sfence - movl %r8d,%ecx jmp copy_user_handle_tail .previous diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index d5a2b39f882b..01b868ba82f8 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c @@ -30,10 +30,11 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) rv.msr_no = msr_no; if (safe) { - smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); - err = rv.err; + err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, + &rv, 1); + err = err ? err : rv.err; } else { - smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); + err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); } *l = rv.l; *h = rv.h; @@ -64,23 +65,24 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) rv.l = l; rv.h = h; if (safe) { - smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); - err = rv.err; + err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, + &rv, 1); + err = err ? err : rv.err; } else { - smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); + err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); } return err; } -void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) +int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { - _wrmsr_on_cpu(cpu, msr_no, l, h, 0); + return _wrmsr_on_cpu(cpu, msr_no, l, h, 0); } -void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) +int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) { - _rdmsr_on_cpu(cpu, msr_no, l, h, 0); + return _rdmsr_on_cpu(cpu, msr_no, l, h, 0); } /* These "safe" variants are slower and should be used when the target MSR |
