From 23d5de8efb9aed48074a72bf3d43841e1556ca42 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 22 Sep 2015 11:12:16 -0700 Subject: MIPS: CM: Introduce core-other locking functions Introduce mips_cm_lock_other & mips_cm_unlock_other, mirroring the existing CPC equivalents, in order to lock access from the current core to another via the core-other GCR region. This hasn't been required in the past but with CM3 the CPC starts using GCR_CL_OTHER rather than CPC_CL_OTHER and this will be required for safety. [ralf@linux-mips.org: Fix merge conflict.] Signed-off-by: Paul Burton Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: James Hogan Cc: Markos Chandras Patchwork: https://patchwork.linux-mips.org/patch/11207/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/mips-cm.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 02f7d7a41133..01908dbdf677 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -9,6 +9,8 @@ */ #include +#include +#include #include #include @@ -136,6 +138,9 @@ static char *cm3_causes[32] = { "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f" }; +static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock); +static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags); + phys_addr_t __mips_cm_phys_base(void) { u32 config3 = read_c0_config3(); @@ -200,6 +205,7 @@ int mips_cm_probe(void) { phys_addr_t addr; u32 base_reg; + unsigned cpu; /* * No need to probe again if we have already been @@ -247,9 +253,42 @@ int mips_cm_probe(void) /* determine register width for this CM */ mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3); + for_each_possible_cpu(cpu) + spin_lock_init(&per_cpu(cm_core_lock, cpu)); + return 0; } +void mips_cm_lock_other(unsigned int core, unsigned int vp) +{ + unsigned curr_core; + u32 val; + + preempt_disable(); + curr_core = current_cpu_data.core; + spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), + per_cpu(cm_core_lock_flags, curr_core)); + + if (mips_cm_revision() >= CM_REV_CM3) { + val = core << CM3_GCR_Cx_OTHER_CORE_SHF; + val |= vp << CM3_GCR_Cx_OTHER_VP_SHF; + } else { + BUG_ON(vp != 0); + val = core << CM_GCR_Cx_OTHER_CORENUM_SHF; + } + + write_gcr_cl_other(val); +} + +void mips_cm_unlock_other(void) +{ + unsigned curr_core = current_cpu_data.core; + + spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), + per_cpu(cm_core_lock_flags, curr_core)); + preempt_enable(); +} + void mips_cm_error_report(void) { u64 cm_error, cm_addr, cm_other; -- cgit v1.2.3