aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorRudolf Marek2007-05-08 17:22:01 +0200
committerJean Delvare2007-05-08 17:22:01 +0200
commit4e9baad8f5cb2040e802eff484fad7e721b21c0b (patch)
tree35bd0bdbc9185b0a21d56b32fbd78390a06b1b41 /arch
parent9ca8e40c8414d25e880b587cbd4d130750c49588 (diff)
i386: Add safe variants of rdmsr_on_cpu and wrmsr_on_cpu
Add safe (exception handled) variants of rdmsr_on_cpu and wrmsr_on_cpu. You should use these when the target MSR may not actually exist, as doing so could trigger an exception which the regular functions do not handle. The safe variants are slower, though. The upcoming coretemp hardware monitoring driver will need this. Signed-off-by: Rudolf Marek <r.marek@assembler.cz> Cc: Alexey Dobriyan <adobriyan@openvz.org> Cc: Dave Jones <davej@redhat.com> Signed-off-by: Jean Delvare <khali@linux-fr.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/lib/msr-on-cpu.c73
1 files changed, 67 insertions, 6 deletions
diff --git a/arch/i386/lib/msr-on-cpu.c b/arch/i386/lib/msr-on-cpu.c
index 1c46bda409ff..7767962f25d3 100644
--- a/arch/i386/lib/msr-on-cpu.c
+++ b/arch/i386/lib/msr-on-cpu.c
@@ -6,6 +6,7 @@
struct msr_info {
u32 msr_no;
u32 l, h;
+ int err;
};
static void __rdmsr_on_cpu(void *info)
@@ -15,20 +16,38 @@ static void __rdmsr_on_cpu(void *info)
rdmsr(rv->msr_no, rv->l, rv->h);
}
-void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+static void __rdmsr_safe_on_cpu(void *info)
{
+ struct msr_info *rv = info;
+
+ rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h);
+}
+
+static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
+{
+ int err = 0;
preempt_disable();
if (smp_processor_id() == cpu)
- rdmsr(msr_no, *l, *h);
+ if (safe)
+ err = rdmsr_safe(msr_no, l, h);
+ else
+ rdmsr(msr_no, *l, *h);
else {
struct msr_info rv;
rv.msr_no = msr_no;
- smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
+ if (safe) {
+ smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
+ &rv, 0, 1);
+ err = rv.err;
+ } else {
+ smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
+ }
*l = rv.l;
*h = rv.h;
}
preempt_enable();
+ return err;
}
static void __wrmsr_on_cpu(void *info)
@@ -38,21 +57,63 @@ static void __wrmsr_on_cpu(void *info)
wrmsr(rv->msr_no, rv->l, rv->h);
}
-void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+static void __wrmsr_safe_on_cpu(void *info)
{
+ struct msr_info *rv = info;
+
+ rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h);
+}
+
+static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
+{
+ int err = 0;
preempt_disable();
if (smp_processor_id() == cpu)
- wrmsr(msr_no, l, h);
+ if (safe)
+ err = wrmsr_safe(msr_no, l, h);
+ else
+ wrmsr(msr_no, l, h);
else {
struct msr_info rv;
rv.msr_no = msr_no;
rv.l = l;
rv.h = h;
- smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
+ if (safe) {
+ smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
+ &rv, 0, 1);
+ err = rv.err;
+ } else {
+ smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
+ }
}
preempt_enable();
+ return err;
+}
+
+void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+{
+ _wrmsr_on_cpu(cpu, msr_no, l, h, 0);
+}
+
+void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+{
+ _rdmsr_on_cpu(cpu, msr_no, l, h, 0);
+}
+
+/* These "safe" variants are slower and should be used when the target MSR
+ may not actually exist. */
+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+{
+ return _wrmsr_on_cpu(cpu, msr_no, l, h, 1);
+}
+
+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+{
+ return _rdmsr_on_cpu(cpu, msr_no, l, h, 1);
}
EXPORT_SYMBOL(rdmsr_on_cpu);
EXPORT_SYMBOL(wrmsr_on_cpu);
+EXPORT_SYMBOL(rdmsr_safe_on_cpu);
+EXPORT_SYMBOL(wrmsr_safe_on_cpu);