summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/book3s64
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-01-30 23:08:25 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2021-02-09 00:02:10 +1100
commitbf0e2374aa7b4f8b01fd59fcb0746a9b6b05326a (patch)
tree69dfed6db9b66e5d80134cd53fabf86c0942ffd9 /arch/powerpc/mm/book3s64
parentf4c03b0e520c5f56e569a8da3fce5ddbd0696742 (diff)
powerpc/64s: split do_hash_fault
This is required for subsequent interrupt wrapper implementation. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210130130852.2952424-16-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/mm/book3s64')
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c56
1 files changed, 33 insertions, 23 deletions
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 1a270cc37d97..d7d3a80a51d4 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1512,7 +1512,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
}
EXPORT_SYMBOL_GPL(hash_page);
-long do_hash_fault(struct pt_regs *regs)
+static long __do_hash_fault(struct pt_regs *regs)
{
unsigned long ea = regs->dar;
unsigned long dsisr = regs->dsisr;
@@ -1522,27 +1522,6 @@ long do_hash_fault(struct pt_regs *regs)
unsigned int region_id;
long err;
- if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT)))
- goto page_fault;
-
- /*
- * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
- * don't call hash_page, just fail the fault. This is required to
- * prevent re-entrancy problems in the hash code, namely perf
- * interrupts hitting while something holds H_PAGE_BUSY, and taking a
- * hash fault. See the comment in hash_preload().
- *
- * We come here as a result of a DSI at a point where we don't want
- * to call hash_page, such as when we are accessing memory (possibly
- * user memory) inside a PMU interrupt that occurred while interrupts
- * were soft-disabled. We want to invoke the exception handler for
- * the access, or panic if there isn't a handler.
- */
- if (unlikely(in_nmi())) {
- bad_page_fault(regs, SIGSEGV);
- return 0;
- }
-
region_id = get_region_id(ea);
if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID))
mm = &init_mm;
@@ -1581,8 +1560,39 @@ long do_hash_fault(struct pt_regs *regs)
bad_page_fault(regs, SIGBUS);
}
err = 0;
+ }
+
+ return err;
+}
+
+long do_hash_fault(struct pt_regs *regs)
+{
+ unsigned long dsisr = regs->dsisr;
+ long err;
+
+ if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT)))
+ goto page_fault;
+
+ /*
+ * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
+ * don't call hash_page, just fail the fault. This is required to
+ * prevent re-entrancy problems in the hash code, namely perf
+ * interrupts hitting while something holds H_PAGE_BUSY, and taking a
+ * hash fault. See the comment in hash_preload().
+ *
+ * We come here as a result of a DSI at a point where we don't want
+ * to call hash_page, such as when we are accessing memory (possibly
+ * user memory) inside a PMU interrupt that occurred while interrupts
+ * were soft-disabled. We want to invoke the exception handler for
+ * the access, or panic if there isn't a handler.
+ */
+ if (unlikely(in_nmi())) {
+ bad_page_fault(regs, SIGSEGV);
+ return 0;
+ }
- } else if (err) {
+ err = __do_hash_fault(regs);
+ if (err) {
page_fault:
err = do_page_fault(regs);
}