diff options
Diffstat (limited to 'arch/powerpc/include')
54 files changed, 755 insertions, 458 deletions
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index d0b832cbbec8..939f3c94c8f3 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -56,35 +56,6 @@ int exit_vmx_usercopy(void); int enter_vmx_ops(void); void *exit_vmx_ops(void *dest); -/* Traps */ -long machine_check_early(struct pt_regs *regs); -long hmi_exception_realmode(struct pt_regs *regs); -void SMIException(struct pt_regs *regs); -void handle_hmi_exception(struct pt_regs *regs); -void instruction_breakpoint_exception(struct pt_regs *regs); -void RunModeException(struct pt_regs *regs); -void single_step_exception(struct pt_regs *regs); -void program_check_exception(struct pt_regs *regs); -void alignment_exception(struct pt_regs *regs); -void StackOverflow(struct pt_regs *regs); -void stack_overflow_exception(struct pt_regs *regs); -void kernel_fp_unavailable_exception(struct pt_regs *regs); -void altivec_unavailable_exception(struct pt_regs *regs); -void vsx_unavailable_exception(struct pt_regs *regs); -void fp_unavailable_tm(struct pt_regs *regs); -void altivec_unavailable_tm(struct pt_regs *regs); -void vsx_unavailable_tm(struct pt_regs *regs); -void facility_unavailable_exception(struct pt_regs *regs); -void TAUException(struct pt_regs *regs); -void altivec_assist_exception(struct pt_regs *regs); -void unrecoverable_exception(struct pt_regs *regs); -void kernel_bad_stack(struct pt_regs *regs); -void system_reset_exception(struct pt_regs *regs); -void machine_check_exception(struct pt_regs *regs); -void emulation_assist_interrupt(struct pt_regs *regs); -long do_slb_fault(struct pt_regs *regs, unsigned long ea); -void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err); - /* signals, syscalls and interrupts */ long sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index a0117a9d5b06..73bc5d2c431d 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -95,12 +95,12 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end) addr &= 0xf0000000; /* align addr to start of segment */ barrier(); /* make sure thread.kuap is updated before playing with SRs */ while (addr < end) { - mtsrin(sr, addr); + mtsr(sr, addr); sr += 0x111; /* next VSID */ sr &= 0xf0ffffff; /* clear VSID overflow */ addr += 0x10000000; /* address of next segment */ } - isync(); /* Context sync required after mtsrin() */ + isync(); /* Context sync required after mtsr() */ } static __always_inline void allow_user_access(void __user *to, const void __user *from, @@ -122,7 +122,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user end = min(addr + size, TASK_SIZE); current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf); - kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */ + kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */ } static __always_inline void prevent_user_access(void __user *to, const void __user *from, @@ -151,7 +151,7 @@ static __always_inline void prevent_user_access(void __user *to, const void __us } current->thread.kuap = 0; - kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */ + kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* set Ks */ } static inline unsigned long prevent_user_access_return(void) diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index 685c589e723f..b85f8e114a9c 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -94,7 +94,7 @@ typedef struct { } mm_context_t; void update_bats(void); -static inline void cleanup_cpu_mmu_context(void) { }; +static inline void cleanup_cpu_mmu_context(void) { } /* patch sites */ extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2; diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h index f50f72e535aa..8bd905050896 100644 --- a/arch/powerpc/include/asm/book3s/64/kup.h +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -199,25 +199,31 @@ DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); #ifdef CONFIG_PPC_PKEY +extern u64 __ro_after_init default_uamor; +extern u64 __ro_after_init default_amr; +extern u64 __ro_after_init default_iamr; + #include <asm/mmu.h> #include <asm/ptrace.h> -/* - * For kernel thread that doesn't have thread.regs return - * default AMR/IAMR values. +/* usage of kthread_use_mm() should inherit the + * AMR value of the operating address space. But, the AMR value is + * thread-specific and we inherit the address space and not thread + * access restrictions. Because of this ignore AMR value when accessing + * userspace via kernel thread. */ static inline u64 current_thread_amr(void) { if (current->thread.regs) return current->thread.regs->amr; - return AMR_KUAP_BLOCKED; + return default_amr; } static inline u64 current_thread_iamr(void) { if (current->thread.regs) return current->thread.regs->iamr; - return AMR_KUEP_BLOCKED; + return default_iamr; } #endif /* CONFIG_PPC_PKEY */ @@ -333,7 +339,7 @@ static inline unsigned long get_kuap(void) * This has no effect in terms of actually blocking things on hash, * so it doesn't break anything. */ - if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) + if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) return AMR_KUAP_BLOCKED; return mfspr(SPRN_AMR); @@ -341,7 +347,7 @@ static inline unsigned long get_kuap(void) static inline void set_kuap(unsigned long value) { - if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) + if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) return; /* diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 066b1d34c7bc..f911bdb68d8b 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -454,6 +454,8 @@ static inline unsigned long hpt_hash(unsigned long vpn, #define HPTE_NOHPTE_UPDATE 0x2 #define HPTE_USE_KERNEL_KEY 0x4 +long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa, + unsigned long rlags, unsigned long vflags, int psize, int ssize); extern int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize, int subpage_prot); @@ -467,6 +469,8 @@ extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long flags); extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap, unsigned long dsisr); +void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc); +int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, unsigned long msr); int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize, unsigned int shift, unsigned int mmu_psize); @@ -521,6 +525,7 @@ void slb_dump_contents(struct slb_entry *slb_ptr); extern void slb_vmalloc_update(void); extern void slb_set_size(u16 size); +void preload_new_slb_context(unsigned long start, unsigned long sp); #endif /* __ASSEMBLY__ */ /* diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 995bbcdd0ef8..eace8c3f7b0a 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -239,7 +239,7 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, #ifdef CONFIG_PPC_PSERIES extern void radix_init_pseries(void); #else -static inline void radix_init_pseries(void) { }; +static inline void radix_init_pseries(void) { } #endif #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index a39886681629..058601efbc8a 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -388,11 +388,28 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ ({ \ - int __r; \ - __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ - __r; \ + __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ }) +/* + * On Book3S CPUs, clearing the accessed bit without a TLB flush + * doesn't cause data corruption. [ It could cause incorrect + * page aging and the (mistaken) reclaim of hot pages, but the + * chance of that should be relatively low. ] + * + * So as a performance optimization don't flush the TLB when + * clearing the accessed bit, it will eventually be flushed by + * a context switch or a VM operation anyway. [ In the rare + * event of it not getting flushed for a long time the delay + * shouldn't really matter because there's no real memory + * pressure for swapout to react to. ] + */ +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define ptep_clear_flush_young ptep_test_and_clear_young + +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +#define pmdp_clear_flush_young pmdp_test_and_clear_young + static inline int __pte_write(pte_t pte) { return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); diff --git a/arch/powerpc/include/asm/book3s/64/pkeys.h b/arch/powerpc/include/asm/book3s/64/pkeys.h index 3b8640498f5b..5b178139f3c0 100644 --- a/arch/powerpc/include/asm/book3s/64/pkeys.h +++ b/arch/powerpc/include/asm/book3s/64/pkeys.h @@ -5,10 +5,6 @@ #include <asm/book3s/64/hash-pkey.h> -extern u64 __ro_after_init default_uamor; -extern u64 __ro_after_init default_amr; -extern u64 __ro_after_init default_iamr; - static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags) { if (!mmu_has_feature(MMU_FTR_PKEY)) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 94439e0cefc9..8b33601cdb9d 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -35,7 +35,7 @@ extern void radix__flush_pwc_lpid(unsigned int lpid); extern void radix__flush_all_lpid(unsigned int lpid); extern void radix__flush_all_lpid_guest(unsigned int lpid); #else -static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); }; +static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); } static inline void radix__flush_tlb_lpid_page(unsigned int lpid, unsigned long addr, unsigned long page_size) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index dcb5c3839d2f..215973b4cb26 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -31,7 +31,7 @@ static inline void tlbiel_all(void) hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL); } #else -static inline void tlbiel_all(void) { BUG(); }; +static inline void tlbiel_all(void) { BUG(); } #endif static inline void tlbiel_all_lpid(bool radix) diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 464f8ca8a5c9..d1635ffbb179 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -111,12 +111,15 @@ #ifndef __ASSEMBLY__ struct pt_regs; -extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); -extern void bad_page_fault(struct pt_regs *, unsigned long, int); -void __bad_page_fault(struct pt_regs *regs, unsigned long address, int sig); +long do_page_fault(struct pt_regs *); +long hash__do_page_fault(struct pt_regs *); +void bad_page_fault(struct pt_regs *, int); +void __bad_page_fault(struct pt_regs *regs, int sig); +void do_bad_page_fault_segv(struct pt_regs *regs); extern void _exception(int, struct pt_regs *, int, unsigned long); extern void _exception_pkey(struct pt_regs *, unsigned long, int); extern void die(const char *, struct pt_regs *, long); +void die_mce(const char *str, struct pt_regs *regs, long err); extern bool die_will_crash(void); extern void panic_flush_kmsg_start(void); extern void panic_flush_kmsg_end(void); diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index 138e46d8c04e..f63495109f63 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -8,6 +8,12 @@ #include <asm/cputable.h> #include <asm/cpu_has_feature.h> +/* + * This flag is used to indicate that the page pointed to by a pte is clean + * and does not require cleaning before returning it to the user. + */ +#define PG_dcache_clean PG_arch_1 + #ifdef CONFIG_PPC_BOOK3S_64 /* * Book3s has no ptesync after setting a pte, so without this ptesync it's diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 5f21a5bab467..e85c849214a2 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -17,16 +17,6 @@ struct cpu_spec; typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec); typedef void (*cpu_restore_t)(void); -enum powerpc_oprofile_type { - PPC_OPROFILE_INVALID = 0, - PPC_OPROFILE_RS64 = 1, - PPC_OPROFILE_POWER4 = 2, - PPC_OPROFILE_G4 = 3, - PPC_OPROFILE_FSL_EMB = 4, - PPC_OPROFILE_CELL = 5, - PPC_OPROFILE_PA6T = 6, -}; - enum powerpc_pmc_type { PPC_PMC_DEFAULT = 0, PPC_PMC_IBM = 1, @@ -83,16 +73,6 @@ struct cpu_spec { /* Used by oprofile userspace to select the right counters */ char *oprofile_cpu_type; - /* Processor specific oprofile operations */ - enum powerpc_oprofile_type oprofile_type; - - /* Bit locations inside the mmcra change */ - unsigned long oprofile_mmcra_sihv; - unsigned long oprofile_mmcra_sipr; - - /* Bits to clear during an oprofile exception */ - unsigned long oprofile_mmcra_clear; - /* Name of processor class, for the ELF AT_PLATFORM entry */ char *platform; diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index ed75d1c318e3..504f7fe6711a 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -87,6 +87,17 @@ static notrace inline void account_cpu_user_exit(void) acct->starttime_user = tb; } +static notrace inline void account_stolen_time(void) +{ +#ifdef CONFIG_PPC_SPLPAR + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { + struct lppaca *lp = local_paca->lppaca_ptr; + + if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx))) + accumulate_stolen_time(); + } +#endif +} #endif /* __KERNEL__ */ #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ @@ -96,5 +107,8 @@ static inline void account_cpu_user_entry(void) static inline void account_cpu_user_exit(void) { } +static notrace inline void account_stolen_time(void) +{ +} #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #endif /* __POWERPC_CPUTIME_H */ diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h index ec57daf87f40..86a14736c76c 100644 --- a/arch/powerpc/include/asm/debug.h +++ b/arch/powerpc/include/asm/debug.h @@ -50,10 +50,6 @@ bool ppc_breakpoint_available(void); #ifdef CONFIG_PPC_ADV_DEBUG_REGS extern void do_send_trap(struct pt_regs *regs, unsigned long address, unsigned long error_code, int brkpt); -#else - -extern void do_break(struct pt_regs *regs, unsigned long address, - unsigned long error_code); #endif #endif /* _ASM_POWERPC_DEBUG_H */ diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index aa6a5ef5d483..7604673787d6 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -137,7 +137,7 @@ extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup; #ifdef CONFIG_PPC_PSERIES void pseries_probe_fw_features(void); #else -static inline void pseries_probe_fw_features(void) { }; +static inline void pseries_probe_fw_features(void) { } #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 013165e62618..f18c543bc01d 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -17,8 +17,6 @@ extern bool hugetlb_disabled; void hugetlbpage_init_default(void); -void flush_dcache_icache_hugepage(struct page *page); - int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len); diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index c98f5141e3fc..ed6086d57b22 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -535,9 +535,12 @@ struct h_cpu_char_result { u64 behaviour; }; -/* Register state for entering a nested guest with H_ENTER_NESTED */ +/* + * Register state for entering a nested guest with H_ENTER_NESTED. + * New member must be added at the end. + */ struct hv_guest_state { - u64 version; /* version of this structure layout */ + u64 version; /* version of this structure layout, must be first */ u32 lpid; u32 vcpu_token; /* These registers are hypervisor privileged (at least for writing) */ @@ -566,10 +569,26 @@ struct hv_guest_state { u64 pidr; u64 cfar; u64 ppr; + /* Version 1 ends here */ + u64 dawr1; + u64 dawrx1; + /* Version 2 ends here */ }; /* Latest version of hv_guest_state structure */ -#define HV_GUEST_STATE_VERSION 1 +#define HV_GUEST_STATE_VERSION 2 + +static inline int hv_guest_state_size(unsigned int version) +{ + switch (version) { + case 1: + return offsetofend(struct hv_guest_state, ppr); + case 2: + return offsetofend(struct hv_guest_state, dawrx1); + default: + return -1; + } +} /* * From the document "H_GetPerformanceCounterInfo Interface" v1.07 diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 0363734ff56e..56a98936a6a9 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -38,6 +38,8 @@ #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE) #endif +#endif /* CONFIG_PPC64 */ + /* * flags for paca->irq_soft_mask */ @@ -46,18 +48,56 @@ #define IRQS_PMI_DISABLED 2 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED) -#endif /* CONFIG_PPC64 */ - #ifndef __ASSEMBLY__ -extern void replay_system_reset(void); -extern void replay_soft_interrupts(void); +static inline void __hard_irq_enable(void) +{ + if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + wrtee(MSR_EE); + else if (IS_ENABLED(CONFIG_PPC_8xx)) + wrtspr(SPRN_EIE); + else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + __mtmsrd(MSR_EE | MSR_RI, 1); + else + mtmsr(mfmsr() | MSR_EE); +} -extern void timer_interrupt(struct pt_regs *); -extern void timer_broadcast_interrupt(void); -extern void performance_monitor_exception(struct pt_regs *regs); -extern void WatchdogException(struct pt_regs *regs); -extern void unknown_exception(struct pt_regs *regs); +static inline void __hard_irq_disable(void) +{ + if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + wrtee(0); + else if (IS_ENABLED(CONFIG_PPC_8xx)) + wrtspr(SPRN_EID); + else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + __mtmsrd(MSR_RI, 1); + else + mtmsr(mfmsr() & ~MSR_EE); +} + +static inline void __hard_EE_RI_disable(void) +{ + if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + wrtee(0); + else if (IS_ENABLED(CONFIG_PPC_8xx)) + wrtspr(SPRN_NRI); + else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + __mtmsrd(0, 1); + else + mtmsr(mfmsr() & ~(MSR_EE | MSR_RI)); +} + +static inline void __hard_RI_enable(void) +{ + if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x)) + return; + + if (IS_ENABLED(CONFIG_PPC_8xx)) + wrtspr(SPRN_EID); + else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + __mtmsrd(MSR_RI, 1); + else + mtmsr(mfmsr() | MSR_RI); +} #ifdef CONFIG_PPC64 #include <asm/paca.h> @@ -221,18 +261,6 @@ static inline bool arch_irqs_disabled(void) #endif /* CONFIG_PPC_BOOK3S */ -#ifdef CONFIG_PPC_BOOK3E -#define __hard_irq_enable() wrtee(MSR_EE) -#define __hard_irq_disable() wrtee(0) -#define __hard_EE_RI_disable() wrtee(0) -#define __hard_RI_enable() do { } while (0) -#else -#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1) -#define __hard_irq_disable() __mtmsrd(MSR_RI, 1) -#define __hard_EE_RI_disable() __mtmsrd(0, 1) -#define __hard_RI_enable() __mtmsrd(MSR_RI, 1) -#endif - #define hard_irq_disable() do { \ unsigned long flags; \ __hard_irq_disable(); \ @@ -296,8 +324,17 @@ extern void irq_set_pending_from_srr1(unsigned long srr1); extern void force_external_irq_replay(void); +static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) +{ + regs->softe = val; +} #else /* CONFIG_PPC64 */ +static inline notrace unsigned long irq_soft_mask_return(void) +{ + return 0; +} + static inline unsigned long arch_local_save_flags(void) { return mfmsr(); @@ -327,22 +364,12 @@ static inline unsigned long arch_local_irq_save(void) static inline void arch_local_irq_disable(void) { - if (IS_ENABLED(CONFIG_BOOKE)) - wrtee(0); - else if (IS_ENABLED(CONFIG_PPC_8xx)) - wrtspr(SPRN_EID); - else - mtmsr(mfmsr() & ~MSR_EE); + __hard_irq_disable(); } static inline void arch_local_irq_enable(void) { - if (IS_ENABLED(CONFIG_BOOKE)) - wrtee(MSR_EE); - else if (IS_ENABLED(CONFIG_PPC_8xx)) - wrtspr(SPRN_EIE); - else - mtmsr(mfmsr() | MSR_EE); + __hard_irq_enable(); } static inline bool arch_irqs_disabled_flags(unsigned long flags) @@ -364,6 +391,9 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) static inline void may_hard_irq_enable(void) { } +static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) +{ +} #endif /* CONFIG_PPC64 */ #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h new file mode 100644 index 000000000000..aedfba29e43a --- /dev/null +++ b/arch/powerpc/include/asm/interrupt.h @@ -0,0 +1,449 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_INTERRUPT_H +#define _ASM_POWERPC_INTERRUPT_H + +#include <linux/context_tracking.h> +#include <linux/hardirq.h> +#include <asm/cputime.h> +#include <asm/ftrace.h> +#include <asm/kprobes.h> +#include <asm/runlatch.h> + +struct interrupt_state { +#ifdef CONFIG_PPC_BOOK3E_64 + enum ctx_state ctx_state; +#endif +}; + +static inline void booke_restore_dbcr0(void) +{ +#ifdef CONFIG_PPC_ADV_DEBUG_REGS + unsigned long dbcr0 = current->thread.debug.dbcr0; + + if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) { + mtspr(SPRN_DBSR, -1); + mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]); + } +#endif +} + +static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ + /* + * Book3E reconciles irq soft mask in asm + */ +#ifdef CONFIG_PPC_BOOK3S_64 + if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED) + trace_hardirqs_off(); + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + + if (user_mode(regs)) { + CT_WARN_ON(ct_state() != CONTEXT_USER); + user_exit_irqoff(); + + account_cpu_user_entry(); + account_stolen_time(); + } else { + /* + * CT_WARN_ON comes here via program_check_exception, + * so avoid recursion. + */ + if (TRAP(regs) != 0x700) + CT_WARN_ON(ct_state() != CONTEXT_KERNEL); + } +#endif + +#ifdef CONFIG_PPC_BOOK3E_64 + state->ctx_state = exception_enter(); + if (user_mode(regs)) + account_cpu_user_entry(); +#endif +} + +/* + * Care should be taken to note that interrupt_exit_prepare and + * interrupt_async_exit_prepare do not necessarily return immediately to + * regs context (e.g., if regs is usermode, we don't necessarily return to + * user mode). Other interrupts might be taken between here and return, + * context switch / preemption may occur in the exit path after this, or a + * signal may be delivered, etc. + * + * The real interrupt exit code is platform specific, e.g., + * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s. + * + * However interrupt_nmi_exit_prepare does return directly to regs, because + * NMIs do not do "exit work" or replay soft-masked interrupts. + */ +static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ +#ifdef CONFIG_PPC_BOOK3E_64 + exception_exit(state->ctx_state); +#endif + + /* + * Book3S exits to user via interrupt_exit_user_prepare(), which does + * context tracking, which is a cleaner way to handle PREEMPT=y + * and avoid context entry/exit in e.g., preempt_schedule_irq()), + * which is likely to be where the core code wants to end up. + * + * The above comment explains why we can't do the + * + * if (user_mode(regs)) + * user_exit_irqoff(); + * + * sequence here. + */ +} + +static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ +#ifdef CONFIG_PPC_BOOK3S_64 + if (cpu_has_feature(CPU_FTR_CTRL) && + !test_thread_local_flags(_TLF_RUNLATCH)) + __ppc64_runlatch_on(); +#endif + + interrupt_enter_prepare(regs, state); + irq_enter(); +} + +static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ + irq_exit(); + interrupt_exit_prepare(regs, state); +} + +struct interrupt_nmi_state { +#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S_64 + u8 irq_soft_mask; + u8 irq_happened; +#endif + u8 ftrace_enabled; +#endif +}; + +static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) +{ +#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S_64 + state->irq_soft_mask = local_paca->irq_soft_mask; + state->irq_happened = local_paca->irq_happened; + + /* + * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does + * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile + * because that goes through irq tracing which we don't want in NMI. + */ + local_paca->irq_soft_mask = IRQS_ALL_DISABLED; + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + + /* Don't do any per-CPU operations until interrupt state is fixed */ +#endif + /* Allow DEC and PMI to be traced when they are soft-NMI */ + if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) { + state->ftrace_enabled = this_cpu_get_ftrace_enabled(); + this_cpu_set_ftrace_enabled(0); + } +#endif + + /* + * Do not use nmi_enter() for pseries hash guest taking a real-mode + * NMI because not everything it touches is within the RMA limit. + */ + if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || + !firmware_has_feature(FW_FEATURE_LPAR) || + radix_enabled() || (mfmsr() & MSR_DR)) + nmi_enter(); +} + +static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) +{ + if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || + !firmware_has_feature(FW_FEATURE_LPAR) || + radix_enabled() || (mfmsr() & MSR_DR)) + nmi_exit(); + +#ifdef CONFIG_PPC64 + if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) + this_cpu_set_ftrace_enabled(state->ftrace_enabled); + +#ifdef CONFIG_PPC_BOOK3S_64 + /* Check we didn't change the pending interrupt mask. */ + WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened); + local_paca->irq_happened = state->irq_happened; + local_paca->irq_soft_mask = state->irq_soft_mask; +#endif +#endif +} + +/* + * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each + * function definition. The reason for this is the noinstr section is placed + * after the main text section, i.e., very far away from the interrupt entry + * asm. That creates problems with fitting linker stubs when building large + * kernels. + */ +#define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address + +/** + * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + */ +#define DECLARE_INTERRUPT_HANDLER_RAW(func) \ + __visible long func(struct pt_regs *regs) + +/** + * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + * + * @func is called from ASM entry code. + * + * This is a plain function which does no tracing, reconciling, etc. + * The macro is written so it acts as function definition. Append the + * body with a pair of curly brackets. + * + * raw interrupt handlers must not enable or disable interrupts, or + * schedule, tracing and instrumentation (ftrace, lockdep, etc) would + * not be advisable either, although may be possible in a pinch, the + * trace will look odd at least. + * + * A raw handler may call one of the other interrupt handler functions + * to be converted into that interrupt context without these restrictions. + * + * On PPC64, _RAW handlers may return with fast_interrupt_return. + * + * Specific handlers may have additional restrictions. + */ +#define DEFINE_INTERRUPT_HANDLER_RAW(func) \ +static __always_inline long ____##func(struct pt_regs *regs); \ + \ +interrupt_handler long func(struct pt_regs *regs) \ +{ \ + long ret; \ + \ + ret = ____##func (regs); \ + \ + return ret; \ +} \ +NOKPROBE_SYMBOL(func); \ + \ +static __always_inline long ____##func(struct pt_regs *regs) + +/** + * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function + * @func: Function name of the entry point + */ +#define DECLARE_INTERRUPT_HANDLER(func) \ + __visible void func(struct pt_regs *regs) + +/** + * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function + * @func: Function name of the entry point + * + * @func is called from ASM entry code. + * + * The macro is written so it acts as function definition. Append the + * body with a pair of curly brackets. + */ +#define DEFINE_INTERRUPT_HANDLER(func) \ +static __always_inline void ____##func(struct pt_regs *regs); \ + \ +interrupt_handler void func(struct pt_regs *regs) \ +{ \ + struct interrupt_state state; \ + \ + interrupt_enter_prepare(regs, &state); \ + \ + ____##func (regs); \ + \ + interrupt_exit_prepare(regs, &state); \ +} \ +NOKPROBE_SYMBOL(func); \ + \ +static __always_inline void ____##func(struct pt_regs *regs) + +/** + * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + */ +#define DECLARE_INTERRUPT_HANDLER_RET(func) \ + __visible long func(struct pt_regs *regs) + +/** + * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + * + * @func is called from ASM entry code. + * + * The macro is written so it acts as function definition. Append the + * body with a pair of curly brackets. + */ +#define DEFINE_INTERRUPT_HANDLER_RET(func) \ +static __always_inline long ____##func(struct pt_regs *regs); \ + \ +interrupt_handler long func(struct pt_regs *regs) \ +{ \ + struct interrupt_state state; \ + long ret; \ + \ + interrupt_enter_prepare(regs, &state); \ + \ + ret = ____##func (regs); \ + \ + interrupt_exit_prepare(regs, &state); \ + \ + return ret; \ +} \ +NOKPROBE_SYMBOL(func); \ + \ +static __always_inline long ____##func(struct pt_regs *regs) + +/** + * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function + * @func: Function name of the entry point + */ +#define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \ + __visible void func(struct pt_regs *regs) + +/** + * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function + * @func: Function name of the entry point + * + * @func is called from ASM entry code. + * + * The macro is written so it acts as function definition. Append the + * body with a pair of curly brackets. + */ +#define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \ +static __always_inline void ____##func(struct pt_regs *regs); \ + \ +interrupt_handler void func(struct pt_regs *regs) \ +{ \ + struct interrupt_state state; \ + \ + interrupt_async_enter_prepare(regs, &state); \ + \ + ____##func (regs); \ + \ + interrupt_async_exit_prepare(regs, &state); \ +} \ +NOKPROBE_SYMBOL(func); \ + \ +static __always_inline void ____##func(struct pt_regs *regs) + +/** + * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + */ +#define DECLARE_INTERRUPT_HANDLER_NMI(func) \ + __visible long func(struct pt_regs *regs) + +/** + * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function + * @func: Function name of the entry point + * @returns: Returns a value back to asm caller + * + * @func is called from ASM entry code. + * + * The macro is written so it acts as function definition. Append the + * body with a pair of curly brackets. + */ +#define DEFINE_INTERRUPT_HANDLER_NMI(func) \ +static __always_inline long ____##func(struct pt_regs *regs); \ + \ +interrupt_handler long func(struct pt_regs *regs) \ +{ \ + struct interrupt_nmi_state state; \ + long ret; \ + \ + interrupt_nmi_enter_prepare(regs, &state); \ + \ + ret = ____##func (regs); \ + \ + interrupt_nmi_exit_prepare(regs, &state); \ + \ + return ret; \ +} \ +NOKPROBE_SYMBOL(func); \ + \ +static __always_inline long ____##func(struct pt_regs *regs) + + +/* Interrupt handlers */ +/* kernel/traps.c */ +DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); +#ifdef CONFIG_PPC_BOOK3S_64 +DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception); +#else +DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); +#endif +DECLARE_INTERRUPT_HANDLER(SMIException); +DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); +DECLARE_INTERRUPT_HANDLER(unknown_exception); +DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception); +DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception); +DECLARE_INTERRUPT_HANDLER(RunModeException); +DECLARE_INTERRUPT_HANDLER(single_step_exception); +DECLARE_INTERRUPT_HANDLER(program_check_exception); +DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt); +DECLARE_INTERRUPT_HANDLER(alignment_exception); +DECLARE_INTERRUPT_HANDLER(StackOverflow); +DECLARE_INTERRUPT_HANDLER(stack_overflow_exception); +DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception); +DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception); +DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception); +DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception); +DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm); +DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm); +DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm); +DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); +DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); +DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception); +DECLARE_INTERRUPT_HANDLER(DebugException); +DECLARE_INTERRUPT_HANDLER(altivec_assist_exception); +DECLARE_INTERRUPT_HANDLER(CacheLockingException); +DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException); +DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException); +DECLARE_INTERRUPT_HANDLER(unrecoverable_exception); +DECLARE_INTERRUPT_HANDLER(WatchdogException); +DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); + +/* slb.c */ +DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); +DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault); + +/* hash_utils.c */ +DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault); + +/* fault.c */ +DECLARE_INTERRUPT_HANDLER_RET(do_page_fault); +DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv); + +/* process.c */ +DECLARE_INTERRUPT_HANDLER(do_break); + +/* time.c */ +DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt); + +/* mce.c */ +DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early); +DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); + +DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); + +void replay_system_reset(void); +void replay_soft_interrupts(void); + +static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) +{ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); +} + +#endif /* _ASM_POWERPC_INTERRUPT_H */ diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 55d6ede30c19..9ab344d29a54 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -136,6 +136,7 @@ int load_crashdump_segments_ppc64(struct kimage *image, int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, unsigned long fdt_load_addr); +unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image); int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline); diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index bf221a2a523e..7ec21af49a45 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -91,6 +91,7 @@ static __always_inline void setup_kup(void) static inline void allow_read_from_user(const void __user *from, unsigned long size) { + barrier_nospec(); allow_user_access(NULL, from, size, KUAP_READ); } @@ -102,6 +103,7 @@ static inline void allow_write_to_user(void __user *to, unsigned long size) static inline void allow_read_write_user(void __user *to, const void __user *from, unsigned long size) { + barrier_nospec(); allow_user_access(to, from, size, KUAP_READ_WRITE); } diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index d32ec9ae73bd..2f5f919f6cd3 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -277,6 +277,13 @@ extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); +long kvmppc_read_intr(void); +void kvmppc_bad_interrupt(struct pt_regs *regs); +void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip); +void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip); +void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr); +void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 078f4648ea27..b6d31bff5209 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -74,16 +74,6 @@ struct kvm_split_mode { u8 do_nap; u8 napped[MAX_SMT_THREADS]; struct kvmppc_vcore *vc[MAX_SUBCORES]; - /* Bits for changing lpcr on P9 */ - unsigned long lpcr_req; - unsigned long lpidr_req; - unsigned long host_lpcr; - u32 do_set; - u32 do_restore; - union { - u32 allphases; - u8 phase[4]; - } lpcr_sync; }; /* @@ -110,7 +100,6 @@ struct kvmppc_host_state { u8 hwthread_state; u8 host_ipi; u8 ptid; /* thread number within subcore when split */ - u8 tid; /* thread number within whole core */ u8 fake_suspend; struct kvm_vcpu *kvm_vcpu; struct kvmppc_vcore *kvm_vcore; diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index d67a470e95a3..05fb00d37609 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -28,7 +28,6 @@ #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS -#define KVM_USER_MEM_SLOTS 512 #include <asm/cputhreads.h> @@ -307,6 +306,7 @@ struct kvm_arch { u8 svm_enabled; bool threads_indep; bool nested_enable; + bool dawr1_enabled; pgd_t *pgtable; u64 process_table; struct dentry *debugfs_dir; @@ -584,8 +584,10 @@ struct kvm_vcpu_arch { u32 ctrl; u32 dabrx; ulong dabr; - ulong dawr; - ulong dawrx; + ulong dawr0; + ulong dawrx0; + ulong dawr1; + ulong dawrx1; ulong ciabr; ulong cfar; ulong ppr; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 0a056c64c317..8aacd76bb702 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -314,6 +314,8 @@ struct kvmppc_ops { int size); int (*enable_svm)(struct kvm *kvm); int (*svm_off)(struct kvm *kvm); + int (*enable_dawr1)(struct kvm *kvm); + bool (*hash_v3_possible)(void); }; extern struct kvmppc_ops *kvmppc_hv_ops; @@ -627,9 +629,9 @@ extern int h_ipi_redirect; static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap( struct kvm *kvm) { return NULL; } -static inline void kvmppc_alloc_host_rm_ops(void) {}; -static inline void kvmppc_free_host_rm_ops(void) {}; -static inline void kvmppc_free_pimap(struct kvm *kvm) {}; +static inline void kvmppc_alloc_host_rm_ops(void) {} +static inline void kvmppc_free_host_rm_ops(void) {} +static inline void kvmppc_free_pimap(struct kvm *kvm) {} static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall) { return 0; } static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu) @@ -881,9 +883,9 @@ static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn) /* Clear i-cache for new pages */ page = pfn_to_page(pfn); - if (!test_bit(PG_arch_1, &page->flags)) { + if (!test_bit(PG_dcache_clean, &page->flags)) { flush_dcache_icache_page(page); - set_bit(PG_arch_1, &page->flags); + set_bit(PG_dcache_clean, &page->flags); } } diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index cf6ebbc16cb4..764f2732a821 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -59,6 +59,9 @@ struct machdep_calls { int (*pcibios_root_bridge_prepare)(struct pci_host_bridge *bridge); + /* finds all the pci_controllers present at boot */ + void (*discover_phbs)(void); + /* To setup PHBs when using automatic OF platform driver for PCI */ int (*pci_setup_phb)(struct pci_controller *host); diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index e6c27ae843dc..331d944280b8 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -204,7 +204,18 @@ struct mce_error_info { bool ignore_event; }; -#define MAX_MC_EVT 100 +#define MAX_MC_EVT 10 + +struct mce_info { + int mce_nest_count; + struct machine_check_event mce_event[MAX_MC_EVT]; + /* Queue for delayed MCE events. */ + int mce_queue_count; + struct machine_check_event mce_event_queue[MAX_MC_EVT]; + /* Queue for delayed MCE UE events. */ + int mce_ue_count; + struct machine_check_event mce_ue_event_queue[MAX_MC_EVT]; +}; /* Release flags for get_mce_event() */ #define MCE_EVENT_RELEASE true @@ -234,4 +245,11 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs); long __machine_check_early_realmode_p9(struct pt_regs *regs); long __machine_check_early_realmode_p10(struct pt_regs *regs); #endif /* CONFIG_PPC_BOOK3S_64 */ + +#ifdef CONFIG_PPC_BOOK3S_64 +void mce_init(void); +#else +static inline void mce_init(void) { }; +#endif /* CONFIG_PPC_BOOK3S_64 */ + #endif /* __ASM_PPC64_MCE_H__ */ diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index d5821834dba9..652ce85f9410 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -282,9 +282,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, } #define pkey_mm_init(mm) -#define thread_pkey_regs_save(thread) -#define thread_pkey_regs_restore(new_thread, old_thread) -#define thread_pkey_regs_init(thread) #define arch_dup_pkeys(oldmm, mm) static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags) diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h index 84b4cfe73edd..160abcb8e9fa 100644 --- a/arch/powerpc/include/asm/nmi.h +++ b/arch/powerpc/include/asm/nmi.h @@ -4,6 +4,7 @@ #ifdef CONFIG_PPC_WATCHDOG extern void arch_touch_nmi_watchdog(void); +long soft_nmi_interrupt(struct pt_regs *regs); #else static inline void arch_touch_nmi_watchdog(void) {} #endif diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h deleted file mode 100644 index 2a166c297f97..000000000000 --- a/arch/powerpc/include/asm/oprofile_impl.h +++ /dev/null @@ -1,135 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM - * - * Based on alpha version. - */ - -#ifndef _ASM_POWERPC_OPROFILE_IMPL_H -#define _ASM_POWERPC_OPROFILE_IMPL_H -#ifdef __KERNEL__ - -#define OP_MAX_COUNTER 8 - -/* Per-counter configuration as set via oprofilefs. */ -struct op_counter_config { - unsigned long enabled; - unsigned long event; - unsigned long count; - /* Classic doesn't support per-counter user/kernel selection */ - unsigned long kernel; - unsigned long user; - unsigned long unit_mask; -}; - -/* System-wide configuration as set via oprofilefs. */ -struct op_system_config { -#ifdef CONFIG_PPC64 - unsigned long mmcr0; - unsigned long mmcr1; - unsigned long mmcra; -#ifdef CONFIG_OPROFILE_CELL - /* Register for oprofile user tool to check cell kernel profiling - * support. - */ - unsigned long cell_support; -#endif -#endif - unsigned long enable_kernel; - unsigned long enable_user; -}; - -/* Per-arch configuration */ -struct op_powerpc_model { - int (*reg_setup) (struct op_counter_config *, - struct op_system_config *, - int num_counters); - int (*cpu_setup) (struct op_counter_config *); - int (*start) (struct op_counter_config *); - int (*global_start) (struct op_counter_config *); - void (*stop) (void); - void (*global_stop) (void); - int (*sync_start)(void); - int (*sync_stop)(void); - void (*handle_interrupt) (struct pt_regs *, - struct op_counter_config *); - int num_counters; -}; - -extern struct op_powerpc_model op_model_fsl_emb; -extern struct op_powerpc_model op_model_power4; -extern struct op_powerpc_model op_model_7450; -extern struct op_powerpc_model op_model_cell; -extern struct op_powerpc_model op_model_pa6t; - - -/* All the classic PPC parts use these */ -static inline unsigned int classic_ctr_read(unsigned int i) -{ - switch(i) { - case 0: - return mfspr(SPRN_PMC1); - case 1: - return mfspr(SPRN_PMC2); - case 2: - return mfspr(SPRN_PMC3); - case 3: - return mfspr(SPRN_PMC4); - case 4: - return mfspr(SPRN_PMC5); - case 5: - return mfspr(SPRN_PMC6); - -/* No PPC32 chip has more than 6 so far */ -#ifdef CONFIG_PPC64 - case 6: - return mfspr(SPRN_PMC7); - case 7: - return mfspr(SPRN_PMC8); -#endif - default: - return 0; - } -} - -static inline void classic_ctr_write(unsigned int i, unsigned int val) -{ - switch(i) { - case 0: - mtspr(SPRN_PMC1, val); - break; - case 1: - mtspr(SPRN_PMC2, val); - break; - case 2: - mtspr(SPRN_PMC3, val); - break; - case 3: - mtspr(SPRN_PMC4, val); - break; - case 4: - mtspr(SPRN_PMC5, val); - break; - case 5: - mtspr(SPRN_PMC6, val); - break; - -/* No PPC32 chip has more than 6, yet */ -#ifdef CONFIG_PPC64 - case 6: - mtspr(SPRN_PMC7, val); - break; - case 7: - mtspr(SPRN_PMC8, val); - break; -#endif - default: - break; - } -} - - -extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth); - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 9454d29ff4b4..ec18ac818e3a 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -29,6 +29,7 @@ #include <asm/hmi.h> #include <asm/cpuidle.h> #include <asm/atomic.h> +#include <asm/mce.h> #include <asm-generic/mmiowb_types.h> @@ -108,8 +109,7 @@ struct paca_struct { */ /* used for most interrupts/exceptions */ u64 exgen[EX_SIZE] __attribute__((aligned(0x80))); - u64 exslb[EX_SIZE]; /* used for SLB/segment table misses - * on the linear mapping */ + /* SLB related definitions */ u16 vmalloc_sllp; u8 slb_cache_ptr; @@ -273,6 +273,9 @@ struct paca_struct { #ifdef CONFIG_MMIOWB struct mmiowb_state mmiowb_state; #endif +#ifdef CONFIG_PPC_BOOK3S_64 + struct mce_info *mce_info; +#endif /* CONFIG_PPC_BOOK3S_64 */ } ____cacheline_aligned; extern void copy_mm_to_paca(struct mm_struct *mm); @@ -285,9 +288,9 @@ extern void free_unused_pacas(void); #else /* CONFIG_PPC64 */ -static inline void allocate_paca_ptrs(void) { }; -static inline void allocate_paca(int cpu) { }; -static inline void free_unused_pacas(void) { }; +static inline void allocate_paca_ptrs(void) { } +static inline void allocate_paca(int cpu) { } +static inline void free_unused_pacas(void) { } #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index edc08f04aef7..5d1726bb28e7 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -10,6 +10,7 @@ #endif #ifdef CONFIG_PPC_SPLPAR +#include <linux/smp.h> #include <asm/kvm_guest.h> #include <asm/cputhreads.h> diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index daec64d41b44..164e910bf654 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h @@ -14,6 +14,7 @@ #include <asm/perf_event_server.h> #else static inline bool is_sier_available(void) { return false; } +static inline unsigned long get_pmcs_ext_regs(int idx) { return 0; } #endif #ifdef CONFIG_FSL_EMB_PERF_EVENT @@ -40,6 +41,7 @@ static inline bool is_sier_available(void) { return false; } /* To support perf_regs sier update */ extern bool is_sier_available(void); +extern unsigned long get_pmcs_ext_regs(int idx); /* To define perf extended regs mask value */ extern u64 PERF_REG_EXTENDED_MASK; #define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 3b7baba01c92..00e7e671bb4b 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -36,9 +36,9 @@ struct power_pmu { unsigned long test_adder; int (*compute_mmcr)(u64 events[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, - struct perf_event *pevents[]); + struct perf_event *pevents[], u32 flags); int (*get_constraint)(u64 event_id, unsigned long *mskp, - unsigned long *valp); + unsigned long *valp, u64 event_config1); int (*get_alternatives)(u64 event_id, unsigned int flags, u64 alt[]); void (*get_mem_data_src)(union perf_mem_data_src *dsrc, @@ -83,6 +83,7 @@ struct power_pmu { #define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */ #define PPMU_ARCH_31 0x00000200 /* Has MMCR3, SIER2 and SIER3 */ #define PPMU_P10_DD1 0x00000400 /* Is power10 DD1 processor version */ +#define PPMU_HAS_ATTR_CONFIG1 0x00000800 /* Using config1 attribute */ /* * Values for flags to get_alternatives() diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index f7613f43c9cf..4eed82172e33 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -162,6 +162,9 @@ static inline bool is_ioremap_addr(const void *x) return addr >= IOREMAP_BASE && addr < IOREMAP_END; } + +struct seq_file; +void arch_report_meminfo(struct seq_file *m); #endif /* CONFIG_PPC64 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index a7951049e129..59a2c7dbc78f 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -169,10 +169,4 @@ static inline bool arch_pkeys_enabled(void) } extern void pkey_mm_init(struct mm_struct *mm); -extern bool arch_supports_pkeys(int cap); -extern unsigned int arch_usable_pkeys(void); -extern void thread_pkey_regs_save(struct thread_struct *thread); -extern void thread_pkey_regs_restore(struct thread_struct *new_thread, - struct thread_struct *old_thread); -extern void thread_pkey_regs_init(struct thread_struct *thread); #endif /*_ASM_POWERPC_KEYS_H */ diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index 7f4be5a05eb3..2b9edbf6e929 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h @@ -13,10 +13,6 @@ extern unsigned long isa_io_base; -extern void pci_setup_phb_io(struct pci_controller *hose, int primary); -extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary); - - extern struct list_head hose_list; extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */ @@ -32,9 +28,6 @@ struct pci_dn; void *pci_traverse_device_nodes(struct device_node *start, void *(*fn)(struct device_node *, void *), void *data); -void *traverse_pci_dn(struct pci_dn *root, - void *(*fn)(struct pci_dn *, void *), - void *data); extern void pci_devs_phb_init_dynamic(struct pci_controller *phb); /* From rtas_pci.h */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index cc1bca571332..3dceb64fc9af 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -25,7 +25,6 @@ #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) -#define ACCOUNT_STOLEN_TIME #else #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \ MFTB(ra); /* get timebase */ \ @@ -44,29 +43,6 @@ PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \ add ra,ra,rb; /* add on to system time */ \ PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr) - -#ifdef CONFIG_PPC_SPLPAR -#define ACCOUNT_STOLEN_TIME \ -BEGIN_FW_FTR_SECTION; \ - beq 33f; \ - /* from user - see if there are any DTL entries to process */ \ - ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \ - ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \ - addi r10,r10,LPPACA_DTLIDX; \ - LDX_BE r10,0,r10; /* get log write index */ \ - cmpd cr1,r11,r10; \ - beq+ cr1,33f; \ - bl accumulate_stolen_time; \ - ld r12,_MSR(r1); \ - andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \ -33: \ -END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) - -#else /* CONFIG_PPC_SPLPAR */ -#define ACCOUNT_STOLEN_TIME - -#endif /* CONFIG_PPC_SPLPAR */ - #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ /* diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 58f9dc060a7b..975ba260006a 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -70,6 +70,9 @@ struct pt_regs }; #endif + +#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)) + #ifdef __powerpc64__ /* @@ -229,6 +232,11 @@ static inline bool trap_is_scv(struct pt_regs *regs) return (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x3000); } +static inline bool trap_is_unsupported_scv(struct pt_regs *regs) +{ + return IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x7ff0; +} + static inline bool trap_is_syscall(struct pt_regs *regs) { return (trap_is_scv(regs) || TRAP(regs) == 0xc00); diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e40a921d78f9..da103e92c112 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1375,6 +1375,7 @@ #define mtmsr(v) asm volatile("mtmsr %0" : \ : "r" ((unsigned long)(v)) \ : "memory") +#define __mtmsrd(v, l) BUILD_BUG() #define __MTMSR "mtmsr" #endif @@ -1413,13 +1414,24 @@ static inline void msr_check_and_clear(unsigned long bits) } #ifdef CONFIG_PPC32 -#define mfsrin(v) ({unsigned int rval; \ - asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \ - rval;}) +static inline u32 mfsr(u32 idx) +{ + u32 val; + + if (__builtin_constant_p(idx)) + asm volatile("mfsr %0, %1" : "=r" (val): "i" (idx >> 28)); + else + asm volatile("mfsrin %0, %1" : "=r" (val): "r" (idx)); -static inline void mtsrin(u32 val, u32 idx) + return val; +} + +static inline void mtsr(u32 val, u32 idx) { - asm volatile("mtsrin %0, %1" : : "r" (val), "r" (idx)); + if (__builtin_constant_p(idx)) + asm volatile("mtsr %1, %0" : : "r" (val), "i" (idx >> 28)); + else + asm volatile("mtsrin %0, %1" : : "r" (val), "r" (idx)); } #endif diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 262782f08fd4..17b8dcd9a40d 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -691,6 +691,9 @@ #define mttmr(rn, v) asm volatile(MTTMR(rn, %0) : \ : "r" ((unsigned long)(v)) \ : "memory") + +extern unsigned long global_dbcr0[]; + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_POWERPC_REG_BOOKE_H__ */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 332e1000ca0f..658448ca5b8a 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -369,7 +369,7 @@ void rtas_initialize(void); #else static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;} static inline void pSeries_coalesce_init(void) { } -static inline void rtas_initialize(void) { }; +static inline void rtas_initialize(void) { } #endif extern int call_rtas(const char *, int, int, unsigned long *, ...); diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index a466749703f1..e89bfebd4e00 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -58,7 +58,7 @@ void do_rfi_flush_fixups(enum l1d_flush_type types); #ifdef CONFIG_PPC_BARRIER_NOSPEC void setup_barrier_nospec(void); #else -static inline void setup_barrier_nospec(void) { }; +static inline void setup_barrier_nospec(void) { } #endif void do_uaccess_flush_fixups(enum l1d_flush_type types); void do_entry_flush_fixups(enum l1d_flush_type types); @@ -68,13 +68,13 @@ extern bool barrier_nospec_enabled; #ifdef CONFIG_PPC_BARRIER_NOSPEC void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); #else -static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; +static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { } #endif #ifdef CONFIG_PPC_FSL_BOOK3E void setup_spectre_v2(void); #else -static inline void setup_spectre_v2(void) {}; +static inline void setup_spectre_v2(void) {} #endif void do_btb_flush_fixups(void); diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h index 9c3c30534333..5b862de29dff 100644 --- a/arch/powerpc/include/asm/simple_spinlock.h +++ b/arch/powerpc/include/asm/simple_spinlock.h @@ -90,8 +90,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) void splpar_spin_yield(arch_spinlock_t *lock); void splpar_rw_yield(arch_rwlock_t *lock); #else /* SPLPAR */ -static inline void splpar_spin_yield(arch_spinlock_t *lock) {}; -static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; +static inline void splpar_spin_yield(arch_spinlock_t *lock) {} +static inline void splpar_rw_yield(arch_rwlock_t *lock) {} #endif static inline void spin_yield(arch_spinlock_t *lock) diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index c4e2d53acd2b..7a13bc20f0a0 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -236,7 +236,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys) #if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)) extern void smp_release_cpus(void); #else -static inline void smp_release_cpus(void) { }; +static inline void smp_release_cpus(void) { } #endif extern int smt_enabled_at_boot; diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h index 9666491bcb8a..8a2d11ba0dae 100644 --- a/arch/powerpc/include/asm/spu.h +++ b/arch/powerpc/include/asm/spu.h @@ -201,20 +201,6 @@ int spu_64k_pages_available(void); struct mm_struct; extern void spu_flush_all_slbs(struct mm_struct *mm); -/* This interface allows a profiler (e.g., OProfile) to store a ref - * to spu context information that it creates. This caching technique - * avoids the need to recreate this information after a save/restore operation. - * - * Assumes the caller has already incremented the ref count to - * profile_info; then spu_context_destroy must call kref_put - * on prof_info_kref. - */ -void spu_set_profile_private_kref(struct spu_context *ctx, - struct kref *prof_info_kref, - void ( * prof_info_release) (struct kref *kref)); - -void *spu_get_profile_private_kref(struct spu_context *ctx); - /* system callbacks from the SPU */ struct spu_syscall_block { u64 nr_ret; @@ -266,25 +252,6 @@ void spu_remove_dev_attr(struct device_attribute *attr); int spu_add_dev_attr_group(struct attribute_group *attrs); void spu_remove_dev_attr_group(struct attribute_group *attrs); -/* - * Notifier blocks: - * - * oprofile can get notified when a context switch is performed - * on an spe. The notifer function that gets called is passed - * a pointer to the SPU structure as well as the object-id that - * identifies the binary running on that SPU now. - * - * For a context save, the object-id that is passed is zero, - * identifying that the kernel will run from that moment on. - * - * For a context restore, the object-id is the value written - * to object-id spufs file from user space and the notifer - * function can assume that spu->ctx is valid. - */ -struct notifier_block; -int spu_switch_event_register(struct notifier_block * n); -int spu_switch_event_unregister(struct notifier_block * n); - extern void notify_spus_active(void); extern void do_notify_spus_active(void); diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 3d8a47af7a25..386d576673a1 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -94,7 +94,6 @@ void arch_setup_new_exec(void); #define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ -#define TIF_NOHZ 9 /* in adaptive nohz mode */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ @@ -128,11 +127,10 @@ void arch_setup_new_exec(void); #define _TIF_UPROBE (1<<TIF_UPROBE) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) -#define _TIF_NOHZ (1<<TIF_NOHZ) #define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU) #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ - _TIF_NOHZ | _TIF_SYSCALL_EMU) + _TIF_SYSCALL_EMU) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 8f789b597bae..8dd3cdb25338 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -102,6 +102,8 @@ DECLARE_PER_CPU(u64, decrementers_next_tb); /* Convert timebase ticks to nanoseconds */ unsigned long long tb_to_ns(unsigned long long tb_ticks); +void timer_broadcast_interrupt(void); + /* SPLPAR */ void accumulate_stolen_time(void); diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 501c9a79038c..78e2a3990eab 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -52,8 +52,6 @@ static inline bool __access_ok(unsigned long addr, unsigned long size) __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true) #define __put_user(x, ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -#define __put_user_goto(x, ptr, label) \ - __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) #define __get_user_allowed(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false) @@ -110,22 +108,18 @@ static inline bool __access_ok(unsigned long addr, unsigned long size) extern long __put_user_bad(void); -#define __put_user_size_allowed(x, ptr, size, retval) \ +#define __put_user_size(x, ptr, size, retval) \ do { \ __label__ __pu_failed; \ \ retval = 0; \ + allow_write_to_user(ptr, size); \ __put_user_size_goto(x, ptr, size, __pu_failed); \ + prevent_write_to_user(ptr, size); \ break; \ \ __pu_failed: \ retval = -EFAULT; \ -} while (0) - -#define __put_user_size(x, ptr, size, retval) \ -do { \ - allow_write_to_user(ptr, size); \ - __put_user_size_allowed(x, ptr, size, retval); \ prevent_write_to_user(ptr, size); \ } while (0) @@ -213,11 +207,9 @@ do { \ } \ } while (0) -#define __put_user_nocheck_goto(x, ptr, size, label) \ +#define __unsafe_put_user_goto(x, ptr, size, label) \ do { \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - if (!is_kernel_addr((unsigned long)__pu_addr)) \ - might_fault(); \ __chk_user_ptr(ptr); \ __put_user_size_goto((x), __pu_addr, (size), label); \ } while (0) @@ -313,9 +305,8 @@ do { \ __typeof__(size) __gu_size = (size); \ \ __chk_user_ptr(__gu_addr); \ - if (!is_kernel_addr((unsigned long)__gu_addr)) \ + if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \ might_fault(); \ - barrier_nospec(); \ if (do_allow) \ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ else \ @@ -333,10 +324,8 @@ do { \ __typeof__(size) __gu_size = (size); \ \ might_fault(); \ - if (access_ok(__gu_addr, __gu_size)) { \ - barrier_nospec(); \ + if (access_ok(__gu_addr, __gu_size)) \ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ - } \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ \ __gu_err; \ @@ -350,7 +339,6 @@ do { \ __typeof__(size) __gu_size = (size); \ \ __chk_user_ptr(__gu_addr); \ - barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ \ @@ -395,7 +383,6 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) { unsigned long ret; - barrier_nospec(); allow_read_write_user(to, from, n); ret = __copy_tofrom_user(to, from, n); prevent_read_write_user(to, from, n); @@ -407,32 +394,7 @@ static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long ret; - if (__builtin_constant_p(n) && (n <= 8)) { - ret = 1; - - switch (n) { - case 1: - barrier_nospec(); - __get_user_size(*(u8 *)to, from, 1, ret); - break; - case 2: - barrier_nospec(); - __get_user_size(*(u16 *)to, from, 2, ret); - break; - case 4: - barrier_nospec(); - __get_user_size(*(u32 *)to, from, 4, ret); - break; - case 8: - barrier_nospec(); - __get_user_size(*(u64 *)to, from, 8, ret); - break; - } - if (ret == 0) - return 0; - } - barrier_nospec(); allow_read_from_user(from, n); ret = __copy_tofrom_user((__force void __user *)to, from, n); prevent_read_from_user(from, n); @@ -440,39 +402,12 @@ static inline unsigned long raw_copy_from_user(void *to, } static inline unsigned long -raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n) -{ - if (__builtin_constant_p(n) && (n <= 8)) { - unsigned long ret = 1; - - switch (n) { - case 1: - __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret); - break; - case 2: - __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret); - break; - case 4: - __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret); - break; - case 8: - __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret); - break; - } - if (ret == 0) - return 0; - } - - return __copy_tofrom_user(to, (__force const void __user *)from, n); -} - -static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { unsigned long ret; allow_write_to_user(to, n); - ret = raw_copy_to_user_allowed(to, from, n); + ret = __copy_tofrom_user(to, (__force const void __user *)from, n); prevent_write_to_user(to, n); return ret; } @@ -508,6 +443,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t { if (unlikely(!access_ok(ptr, len))) return false; + + might_fault(); + allow_read_write_user((void __user *)ptr, ptr, len); return true; } @@ -521,6 +459,9 @@ user_read_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr, len))) return false; + + might_fault(); + allow_read_from_user(ptr, len); return true; } @@ -532,6 +473,9 @@ user_write_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr, len))) return false; + + might_fault(); + allow_write_to_user((void __user *)ptr, len); return true; } @@ -540,7 +484,8 @@ user_write_access_begin(const void __user *ptr, size_t len) #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) -#define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) +#define unsafe_put_user(x, p, e) \ + __unsafe_put_user_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e) #define unsafe_copy_to_user(d, s, l, e) \ do { \ @@ -550,17 +495,17 @@ do { \ int _i; \ \ for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \ - __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\ + unsafe_put_user(*(long*)(_src + _i), (long __user *)(_dst + _i), e); \ if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \ - __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \ + unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \ _i += 4; \ } \ if (_len & 2) { \ - __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \ + unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \ _i += 2; \ } \ if (_len & 1) \ - __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\ + unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \ } while (0) #define HAVE_GET_KERNEL_NOFAULT diff --git a/arch/powerpc/include/asm/vdso/timebase.h b/arch/powerpc/include/asm/vdso/timebase.h index 881f655caa0a..891c9d5eaabe 100644 --- a/arch/powerpc/include/asm/vdso/timebase.h +++ b/arch/powerpc/include/asm/vdso/timebase.h @@ -43,12 +43,6 @@ #define mttbl(v) asm volatile("mttbl %0":: "r"(v)) #define mttbu(v) asm volatile("mttbu %0":: "r"(v)) -/* For compatibility, get_tbl() is defined as get_tb() on ppc64 */ -static inline unsigned long get_tbl(void) -{ - return mftb(); -} - static __always_inline u64 get_tb(void) { unsigned int tbhi, tblo, tbhi2; diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h index 454a7fc6113b..68bfb2361f03 100644 --- a/arch/powerpc/include/asm/xmon.h +++ b/arch/powerpc/include/asm/xmon.h @@ -17,8 +17,8 @@ struct pt_regs; extern int xmon(struct pt_regs *excp); extern irqreturn_t xmon_irq(int, void *); #else -static inline void xmon_setup(void) { }; -static inline void xmon_register_spus(struct list_head *list) { }; +static inline void xmon_setup(void) { } +static inline void xmon_register_spus(struct list_head *list) { } #endif #if defined(CONFIG_XMON) && defined(CONFIG_SMP) diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index c3af3f324c5a..9f18fa090f1f 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -644,6 +644,8 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1) #define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2) #define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3) +#define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4) +#define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h index bdf5f10f8b9f..578b3ee86105 100644 --- a/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/arch/powerpc/include/uapi/asm/perf_regs.h @@ -55,17 +55,33 @@ enum perf_event_powerpc_regs { PERF_REG_POWERPC_MMCR3, PERF_REG_POWERPC_SIER2, PERF_REG_POWERPC_SIER3, + PERF_REG_POWERPC_PMC1, + PERF_REG_POWERPC_PMC2, + PERF_REG_POWERPC_PMC3, + PERF_REG_POWERPC_PMC4, + PERF_REG_POWERPC_PMC5, + PERF_REG_POWERPC_PMC6, /* Max regs without the extended regs */ PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1, }; #define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1) -/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 */ -#define PERF_REG_PMU_MASK_300 (((1ULL << (PERF_REG_POWERPC_MMCR2 + 1)) - 1) - PERF_REG_PMU_MASK) -/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 */ -#define PERF_REG_PMU_MASK_31 (((1ULL << (PERF_REG_POWERPC_SIER3 + 1)) - 1) - PERF_REG_PMU_MASK) +/* Exclude MMCR3, SIER2, SIER3 for CPU_FTR_ARCH_300 */ +#define PERF_EXCLUDE_REG_EXT_300 (7ULL << PERF_REG_POWERPC_MMCR3) -#define PERF_REG_MAX_ISA_300 (PERF_REG_POWERPC_MMCR2 + 1) -#define PERF_REG_MAX_ISA_31 (PERF_REG_POWERPC_SIER3 + 1) +/* + * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 + * includes 9 SPRS from MMCR0 to PMC6 excluding the + * unsupported SPRS in PERF_EXCLUDE_REG_EXT_300. + */ +#define PERF_REG_PMU_MASK_300 ((0xfffULL << PERF_REG_POWERPC_MMCR0) - PERF_EXCLUDE_REG_EXT_300) + +/* + * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 + * includes 12 SPRs from MMCR0 to PMC6. + */ +#define PERF_REG_PMU_MASK_31 (0xfffULL << PERF_REG_POWERPC_MMCR0) + +#define PERF_REG_EXTENDED_MAX (PERF_REG_POWERPC_PMC6 + 1) #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ |