aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorPaul Burton2017-06-02 15:38:02 -0700
committerRalf Baechle2017-06-29 02:42:29 +0200
commitf39878cc5b09c75d35eaf52131e920b872e3feb4 (patch)
tree1e033d3bd1f29a4a59cd0fb2be4ec5d52989cb2a /arch
parente7bc8557428f069eaa613b3676ea6931c0f7fe43 (diff)
MIPS: Handle tlbex-tlbp race condition
In systems where there are multiple actors updating the TLB, the potential exists for a race condition wherein a CPU hits a TLB exception but by the time it reaches a TLBP instruction the affected TLB entry may have been replaced. This can happen if, for example, a CPU shares the TLB between hardware threads (VPs) within a core and one of them replaces the entry that another has just taken a TLB exception for. We handle this race in the case of the Hardware Table Walker (HTW) being the other actor already, but didn't take into account the potential for multiple threads racing. Include the code for aborting TLB exception handling in affected multi-threaded systems, those being the I6400 & I6500 CPUs which share TLB entries between VPs. In the case of using RiXi without dedicated exceptions we have never handled this race even for HTW. This patch adds WARN()s to these cases which ought never to be hit because all CPUs with either HTW or shared FTLB RAMs also include dedicated RiXi exceptions, but the WARN()s will ensure this is always the case. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16203/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/mm/tlbex.c38
1 files changed, 37 insertions, 1 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index ed1c5297547a..e6499209b81c 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -2015,6 +2015,26 @@ static void build_r3000_tlb_modify_handler(void)
}
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
+static bool cpu_has_tlbex_tlbp_race(void)
+{
+ /*
+ * When a Hardware Table Walker is running it can replace TLB entries
+ * at any time, leading to a race between it & the CPU.
+ */
+ if (cpu_has_htw)
+ return true;
+
+ /*
+ * If the CPU shares FTLB RAM with its siblings then our entry may be
+ * replaced at any time by a sibling performing a write to the FTLB.
+ */
+ if (cpu_has_shared_ftlb_ram)
+ return true;
+
+ /* In all other cases there ought to be no race condition to handle */
+ return false;
+}
+
/*
* R4000 style TLB load/store/modify handlers.
*/
@@ -2051,7 +2071,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
if (!m4kc_tlbp_war()) {
build_tlb_probe_entry(p);
- if (cpu_has_htw) {
+ if (cpu_has_tlbex_tlbp_race()) {
/* race condition happens, leaving */
uasm_i_ehb(p);
uasm_i_mfc0(p, wr.r3, C0_INDEX);
@@ -2125,6 +2145,14 @@ static void build_r4000_tlb_load_handler(void)
}
uasm_i_nop(&p);
+ /*
+ * Warn if something may race with us & replace the TLB entry
+ * before we read it here. Everything with such races should
+ * also have dedicated RiXi exception handlers, so this
+ * shouldn't be hit.
+ */
+ WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
+
uasm_i_tlbr(&p);
switch (current_cpu_type()) {
@@ -2192,6 +2220,14 @@ static void build_r4000_tlb_load_handler(void)
}
uasm_i_nop(&p);
+ /*
+ * Warn if something may race with us & replace the TLB entry
+ * before we read it here. Everything with such races should
+ * also have dedicated RiXi exception handlers, so this
+ * shouldn't be hit.
+ */
+ WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
+
uasm_i_tlbr(&p);
switch (current_cpu_type()) {