summaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm
diff options
context:
space:
mode:
authorKhalid Aziz <khalid.aziz@oracle.com>2018-02-23 15:46:41 -0700
committerDavid S. Miller <davem@davemloft.net>2018-03-18 07:38:48 -0700
commit74a04967482faa7144b93dae3b2e913870dd421c (patch)
tree387f2497cfd053d99504a00ff8ccce0dd5fbd647 /arch/sparc/include/asm
parenta4602b62d9fdea41412ba765bbf32ecfc2b6a94c (diff)
sparc64: Add support for ADI (Application Data Integrity)
ADI is a new feature supported on SPARC M7 and newer processors to allow hardware to catch rogue accesses to memory. ADI is supported for data fetches only and not instruction fetches. An app can enable ADI on its data pages, set version tags on them and use versioned addresses to access the data pages. Upper bits of the address contain the version tag. On M7 processors, upper four bits (bits 63-60) contain the version tag. If a rogue app attempts to access ADI enabled data pages, its access is blocked and processor generates an exception. Please see Documentation/sparc/adi.txt for further details. This patch extends mprotect to enable ADI (TSTATE.mcde), enable/disable MCD (Memory Corruption Detection) on selected memory ranges, enable TTE.mcd in PTEs, return ADI parameters to userspace and save/restore ADI version tags on page swap out/in or migration. ADI is not enabled by default for any task. A task must explicitly enable ADI on a memory range and set version tag for ADI to be effective for the task. Signed-off-by: Khalid Aziz <khalid.aziz@oracle.com> Cc: Khalid Aziz <khalid@gonehiking.org> Reviewed-by: Anthony Yznaga <anthony.yznaga@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/include/asm')
-rw-r--r--arch/sparc/include/asm/mman.h84
-rw-r--r--arch/sparc/include/asm/mmu_64.h17
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h51
-rw-r--r--arch/sparc/include/asm/page_64.h6
-rw-r--r--arch/sparc/include/asm/pgtable_64.h46
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/trap_block.h2
7 files changed, 206 insertions, 2 deletions
diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h
index 7e9472143f9b..f94532f25db1 100644
--- a/arch/sparc/include/asm/mman.h
+++ b/arch/sparc/include/asm/mman.h
@@ -7,5 +7,87 @@
#ifndef __ASSEMBLY__
#define arch_mmap_check(addr,len,flags) sparc_mmap_check(addr,len)
int sparc_mmap_check(unsigned long addr, unsigned long len);
-#endif
+
+#ifdef CONFIG_SPARC64
+#include <asm/adi_64.h>
+
+static inline void ipi_set_tstate_mcde(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ /* Set TSTATE_MCDE for the task using address map that ADI has been
+ * enabled on if the task is running. If not, it will be set
+ * automatically at the next context switch
+ */
+ if (current->mm == mm) {
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(current);
+ regs->tstate |= TSTATE_MCDE;
+ }
+}
+
+#define arch_calc_vm_prot_bits(prot, pkey) sparc_calc_vm_prot_bits(prot)
+static inline unsigned long sparc_calc_vm_prot_bits(unsigned long prot)
+{
+ if (adi_capable() && (prot & PROT_ADI)) {
+ struct pt_regs *regs;
+
+ if (!current->mm->context.adi) {
+ regs = task_pt_regs(current);
+ regs->tstate |= TSTATE_MCDE;
+ current->mm->context.adi = true;
+ on_each_cpu_mask(mm_cpumask(current->mm),
+ ipi_set_tstate_mcde, current->mm, 0);
+ }
+ return VM_SPARC_ADI;
+ } else {
+ return 0;
+ }
+}
+
+#define arch_vm_get_page_prot(vm_flags) sparc_vm_get_page_prot(vm_flags)
+static inline pgprot_t sparc_vm_get_page_prot(unsigned long vm_flags)
+{
+ return (vm_flags & VM_SPARC_ADI) ? __pgprot(_PAGE_MCD_4V) : __pgprot(0);
+}
+
+#define arch_validate_prot(prot, addr) sparc_validate_prot(prot, addr)
+static inline int sparc_validate_prot(unsigned long prot, unsigned long addr)
+{
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI))
+ return 0;
+ if (prot & PROT_ADI) {
+ if (!adi_capable())
+ return 0;
+
+ if (addr) {
+ struct vm_area_struct *vma;
+
+ vma = find_vma(current->mm, addr);
+ if (vma) {
+ /* ADI can not be enabled on PFN
+ * mapped pages
+ */
+ if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+ return 0;
+
+ /* Mergeable pages can become unmergeable
+ * if ADI is enabled on them even if they
+ * have identical data on them. This can be
+ * because ADI enabled pages with identical
+ * data may still not have identical ADI
+ * tags on them. Disallow ADI on mergeable
+ * pages.
+ */
+ if (vma->vm_flags & VM_MERGEABLE)
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+#endif /* CONFIG_SPARC64 */
+
+#endif /* __ASSEMBLY__ */
#endif /* __SPARC_MMAN_H__ */
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index ad4fb93508ba..7e2704c770e9 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -90,6 +90,20 @@ struct tsb_config {
#define MM_NUM_TSBS 1
#endif
+/* ADI tags are stored when a page is swapped out and the storage for
+ * tags is allocated dynamically. There is a tag storage descriptor
+ * associated with each set of tag storage pages. Tag storage descriptors
+ * are allocated dynamically. Since kernel will allocate a full page for
+ * each tag storage descriptor, we can store up to
+ * PAGE_SIZE/sizeof(tag storage descriptor) descriptors on that page.
+ */
+typedef struct {
+ unsigned long start; /* Start address for this tag storage */
+ unsigned long end; /* Last address for tag storage */
+ unsigned char *tags; /* Where the tags are */
+ unsigned long tag_users; /* number of references to descriptor */
+} tag_storage_desc_t;
+
typedef struct {
spinlock_t lock;
unsigned long sparc64_ctx_val;
@@ -98,6 +112,9 @@ typedef struct {
struct tsb_config tsb_block[MM_NUM_TSBS];
struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
void *vdso;
+ bool adi;
+ tag_storage_desc_t *tag_store;
+ spinlock_t tag_lock;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index b361702ef52a..312fcee8df2b 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -9,8 +9,10 @@
#include <linux/spinlock.h>
#include <linux/mm_types.h>
#include <linux/smp.h>
+#include <linux/sched.h>
#include <asm/spitfire.h>
+#include <asm/adi_64.h>
#include <asm-generic/mm_hooks.h>
#include <asm/percpu.h>
@@ -136,6 +138,55 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
+
+#define __HAVE_ARCH_START_CONTEXT_SWITCH
+static inline void arch_start_context_switch(struct task_struct *prev)
+{
+ /* Save the current state of MCDPER register for the process
+ * we are switching from
+ */
+ if (adi_capable()) {
+ register unsigned long tmp_mcdper;
+
+ __asm__ __volatile__(
+ ".word 0x83438000\n\t" /* rd %mcdper, %g1 */
+ "mov %%g1, %0\n\t"
+ : "=r" (tmp_mcdper)
+ :
+ : "g1");
+ if (tmp_mcdper)
+ set_tsk_thread_flag(prev, TIF_MCDPER);
+ else
+ clear_tsk_thread_flag(prev, TIF_MCDPER);
+ }
+}
+
+#define finish_arch_post_lock_switch finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
+{
+ /* Restore the state of MCDPER register for the new process
+ * just switched to.
+ */
+ if (adi_capable()) {
+ register unsigned long tmp_mcdper;
+
+ tmp_mcdper = test_thread_flag(TIF_MCDPER);
+ __asm__ __volatile__(
+ "mov %0, %%g1\n\t"
+ ".word 0x9d800001\n\t" /* wr %g0, %g1, %mcdper" */
+ ".word 0xaf902001\n\t" /* wrpr %g0, 1, %pmcdper */
+ :
+ : "ir" (tmp_mcdper)
+ : "g1");
+ if (current && current->mm && current->mm->context.adi) {
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(current);
+ regs->tstate |= TSTATE_MCDE;
+ }
+ }
+}
+
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index c28379b1b0fc..e80f2d5bf62f 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -48,6 +48,12 @@ struct page;
void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+struct vm_area_struct;
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+#define __HAVE_ARCH_COPY_HIGHPAGE
+void copy_highpage(struct page *to, struct page *from);
/* Unlike sparc32, sparc64's parameter passing API is more
* sane in that structures which as small enough are passed
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 619332a44402..44d6ac47e035 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -19,6 +19,7 @@
#include <asm/types.h>
#include <asm/spitfire.h>
#include <asm/asi.h>
+#include <asm/adi.h>
#include <asm/page.h>
#include <asm/processor.h>
@@ -606,6 +607,18 @@ static inline pte_t pte_mkspecial(pte_t pte)
return pte;
}
+static inline pte_t pte_mkmcd(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_MCD_4V;
+ return pte;
+}
+
+static inline pte_t pte_mknotmcd(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_MCD_4V;
+ return pte;
+}
+
static inline unsigned long pte_young(pte_t pte)
{
unsigned long mask;
@@ -1048,6 +1061,39 @@ int page_in_phys_avail(unsigned long paddr);
int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
unsigned long, pgprot_t);
+void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pte_t pte);
+
+int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pte_t oldpte);
+
+#define __HAVE_ARCH_DO_SWAP_PAGE
+static inline void arch_do_swap_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte)
+{
+ /* If this is a new page being mapped in, there can be no
+ * ADI tags stored away for this page. Skip looking for
+ * stored tags
+ */
+ if (pte_none(oldpte))
+ return;
+
+ if (adi_state.enabled && (pte_val(pte) & _PAGE_MCD_4V))
+ adi_restore_tags(mm, vma, addr, pte);
+}
+
+#define __HAVE_ARCH_UNMAP_ONE
+static inline int arch_unmap_one(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr, pte_t oldpte)
+{
+ if (adi_state.enabled && (pte_val(oldpte) & _PAGE_MCD_4V))
+ return adi_save_tags(mm, vma, addr, oldpte);
+ return 0;
+}
+
static inline int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot)
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index f7e7b0baec9f..7fb676360928 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -188,7 +188,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
* in using in assembly, else we can't use the mask as
* an immediate value in instructions such as andcc.
*/
-/* flag bit 12 is available */
+#define TIF_MCDPER 12 /* Precise MCD exception */
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 14
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
index 6a4c8652ad67..0f6d0c4f6683 100644
--- a/arch/sparc/include/asm/trap_block.h
+++ b/arch/sparc/include/asm/trap_block.h
@@ -76,6 +76,8 @@ extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
__sun4v_1insn_patch_end;
extern struct sun4v_1insn_patch_entry __fast_win_ctrl_1insn_patch,
__fast_win_ctrl_1insn_patch_end;
+extern struct sun4v_1insn_patch_entry __sun_m7_1insn_patch,
+ __sun_m7_1insn_patch_end;
struct sun4v_2insn_patch_entry {
unsigned int addr;