summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorVincenzo Frascino <vincenzo.frascino@arm.com>2020-12-22 12:01:42 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-22 12:55:07 -0800
commit620954a67bcec6ca6b902baaaa1e3f2601b371a7 (patch)
treee259f490abfa25e9eddc4d19f1349918390bc6d5 /arch/arm64
parentbfc62c5985274e926ee959dd3aaf999d4bdfbb1d (diff)
arm64: mte: convert gcr_user into an exclude mask
The gcr_user mask is a per thread mask that represents the tags that are excluded from random generation when the Memory Tagging Extension is present and an 'irg' instruction is invoked. gcr_user affects the behavior on EL0 only. Currently that mask is an include mask and it is controlled by the user via prctl() while GCR_EL1 accepts an exclude mask. Convert the include mask into an exclude one to make it easier the register setting. Note: This change will affect gcr_kernel (for EL1) introduced with a future patch. Link: https://lkml.kernel.org/r/946dd31be833b660334c4f93410acf6d6c4cf3c4.1606161801.git.andreyknvl@google.com Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Marco Elver <elver@google.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/kernel/mte.c29
2 files changed, 16 insertions, 15 deletions
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 724249f37af5..ca2cd75d3286 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -152,7 +152,7 @@ struct thread_struct {
#endif
#ifdef CONFIG_ARM64_MTE
u64 sctlr_tcf0;
- u64 gcr_user_incl;
+ u64 gcr_user_excl;
#endif
};
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index fd88fe98bc10..2e1b1d14c0db 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -156,23 +156,22 @@ static void set_sctlr_el1_tcf0(u64 tcf0)
preempt_enable();
}
-static void update_gcr_el1_excl(u64 incl)
+static void update_gcr_el1_excl(u64 excl)
{
- u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
/*
- * Note that 'incl' is an include mask (controlled by the user via
- * prctl()) while GCR_EL1 accepts an exclude mask.
+ * Note that the mask controlled by the user via prctl() is an
+ * include while GCR_EL1 accepts an exclude mask.
* No need for ISB since this only affects EL0 currently, implicit
* with ERET.
*/
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
}
-static void set_gcr_el1_excl(u64 incl)
+static void set_gcr_el1_excl(u64 excl)
{
- current->thread.gcr_user_incl = incl;
- update_gcr_el1_excl(incl);
+ current->thread.gcr_user_excl = excl;
+ update_gcr_el1_excl(excl);
}
void flush_mte_state(void)
@@ -187,7 +186,7 @@ void flush_mte_state(void)
/* disable tag checking */
set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
/* reset tag generation mask */
- set_gcr_el1_excl(0);
+ set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
}
void mte_thread_switch(struct task_struct *next)
@@ -198,7 +197,7 @@ void mte_thread_switch(struct task_struct *next)
/* avoid expensive SCTLR_EL1 accesses if no change */
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
- update_gcr_el1_excl(next->thread.gcr_user_incl);
+ update_gcr_el1_excl(next->thread.gcr_user_excl);
}
void mte_suspend_exit(void)
@@ -206,13 +205,14 @@ void mte_suspend_exit(void)
if (!system_supports_mte())
return;
- update_gcr_el1_excl(current->thread.gcr_user_incl);
+ update_gcr_el1_excl(current->thread.gcr_user_excl);
}
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
u64 tcf0;
- u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT;
+ u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
+ SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte())
return 0;
@@ -233,10 +233,10 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
if (task != current) {
task->thread.sctlr_tcf0 = tcf0;
- task->thread.gcr_user_incl = gcr_incl;
+ task->thread.gcr_user_excl = gcr_excl;
} else {
set_sctlr_el1_tcf0(tcf0);
- set_gcr_el1_excl(gcr_incl);
+ set_gcr_el1_excl(gcr_excl);
}
return 0;
@@ -245,11 +245,12 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
long get_mte_ctrl(struct task_struct *task)
{
unsigned long ret;
+ u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte())
return 0;
- ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT;
+ ret = incl << PR_MTE_TAG_SHIFT;
switch (task->thread.sctlr_tcf0) {
case SCTLR_EL1_TCF0_NONE: