diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-09 09:29:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-09 09:29:13 -0700 |
commit | bd9c3506032983d7bc3245015951db0aad9e6e3d (patch) | |
tree | 042acc990cc83b7aeebfda0bde11401fe41257d8 /include/linux | |
parent | f55966571d5eb2876a11e48e798b4592fa1ffbb7 (diff) | |
parent | feac00aad12373b994ff4d340ff818792e833a0a (diff) |
Merge branch 'akpm' (patches from Andrew)
Pull yet more updates from Andrew Morton:
"54 patches.
Subsystems affected by this patch series: lib, mm (slub, secretmem,
cleanups, init, pagemap, and mremap), and debug"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (54 commits)
powerpc/mm: enable HAVE_MOVE_PMD support
powerpc/book3s64/mm: update flush_tlb_range to flush page walk cache
mm/mremap: allow arch runtime override
mm/mremap: hold the rmap lock in write mode when moving page table entries.
mm/mremap: use pmd/pud_poplulate to update page table entries
mm/mremap: don't enable optimized PUD move if page table levels is 2
mm/mremap: convert huge PUD move to separate helper
selftest/mremap_test: avoid crash with static build
selftest/mremap_test: update the test to handle pagesize other than 4K
mm: rename p4d_page_vaddr to p4d_pgtable and make it return pud_t *
mm: rename pud_page_vaddr to pud_pgtable and make it return pmd_t *
kdump: use vmlinux_build_id to simplify
buildid: fix kernel-doc notation
buildid: mark some arguments const
scripts/decode_stacktrace.sh: indicate 'auto' can be used for base path
scripts/decode_stacktrace.sh: silence stderr messages from addr2line/nm
scripts/decode_stacktrace.sh: support debuginfod
x86/dumpstack: use %pSb/%pBb for backtrace printing
arm64: stacktrace: use %pSb for backtrace printing
module: add printk formats to add module build ID to stacktraces
...
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/bootconfig.h | 4 | ||||
-rw-r--r-- | include/linux/buildid.h | 8 | ||||
-rw-r--r-- | include/linux/compaction.h | 4 | ||||
-rw-r--r-- | include/linux/cpumask.h | 2 | ||||
-rw-r--r-- | include/linux/crash_core.h | 12 | ||||
-rw-r--r-- | include/linux/debugobjects.h | 2 | ||||
-rw-r--r-- | include/linux/hmm.h | 2 | ||||
-rw-r--r-- | include/linux/hugetlb.h | 6 | ||||
-rw-r--r-- | include/linux/kallsyms.h | 21 | ||||
-rw-r--r-- | include/linux/list_lru.h | 4 | ||||
-rw-r--r-- | include/linux/lru_cache.h | 8 | ||||
-rw-r--r-- | include/linux/mm.h | 3 | ||||
-rw-r--r-- | include/linux/mmu_notifier.h | 8 | ||||
-rw-r--r-- | include/linux/module.h | 9 | ||||
-rw-r--r-- | include/linux/nodemask.h | 6 | ||||
-rw-r--r-- | include/linux/percpu-defs.h | 2 | ||||
-rw-r--r-- | include/linux/percpu-refcount.h | 2 | ||||
-rw-r--r-- | include/linux/pgtable.h | 4 | ||||
-rw-r--r-- | include/linux/scatterlist.h | 2 | ||||
-rw-r--r-- | include/linux/secretmem.h | 54 | ||||
-rw-r--r-- | include/linux/set_memory.h | 12 | ||||
-rw-r--r-- | include/linux/shrinker.h | 2 | ||||
-rw-r--r-- | include/linux/syscalls.h | 1 | ||||
-rw-r--r-- | include/linux/vmalloc.h | 4 |
24 files changed, 142 insertions, 40 deletions
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index 6bdd94cff4e2..abe089c27529 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -214,10 +214,10 @@ static inline struct xbc_node * __init xbc_node_get_subkey(struct xbc_node *node * @value: Iterated value of array entry. * * Iterate array entries of given @key under @node. Each array entry node - * is stroed to @anode and @value. If the @node doesn't have @key node, + * is stored to @anode and @value. If the @node doesn't have @key node, * it does nothing. * Note that even if the found key node has only one value (not array) - * this executes block once. Hoever, if the found key node has no value + * this executes block once. However, if the found key node has no value * (key-only node), this does nothing. So don't use this for testing the * key-value pair existence. */ diff --git a/include/linux/buildid.h b/include/linux/buildid.h index 40232f90db6e..3b7a0ff4642f 100644 --- a/include/linux/buildid.h +++ b/include/linux/buildid.h @@ -8,5 +8,13 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); +int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); + +#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE) +extern unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX]; +void init_vmlinux_build_id(void); +#else +static inline void init_vmlinux_build_id(void) { } +#endif #endif diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 4221888bdcd6..c24098c7acca 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -35,12 +35,12 @@ enum compact_result { COMPACT_CONTINUE, /* - * The full zone was compacted scanned but wasn't successfull to compact + * The full zone was compacted scanned but wasn't successful to compact * suitable pages. */ COMPACT_COMPLETE, /* - * direct compaction has scanned part of the zone but wasn't successfull + * direct compaction has scanned part of the zone but wasn't successful * to compact suitable pages. */ COMPACT_PARTIAL_SKIPPED, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index bfc4690de4f4..f3689a52bfd0 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -259,7 +259,7 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool /** * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location * @cpu: the (optionally unsigned) integer iterator - * @mask: the cpumask poiter + * @mask: the cpumask pointer * @start: the start location * * The implementation does not assume any bit in @mask is set (including @start). diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 206bde8308b2..de62a722431e 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -38,8 +38,12 @@ phys_addr_t paddr_vmcoreinfo_note(void); #define VMCOREINFO_OSRELEASE(value) \ vmcoreinfo_append_str("OSRELEASE=%s\n", value) -#define VMCOREINFO_BUILD_ID(value) \ - vmcoreinfo_append_str("BUILD-ID=%s\n", value) +#define VMCOREINFO_BUILD_ID() \ + ({ \ + static_assert(sizeof(vmlinux_build_id) == 20); \ + vmcoreinfo_append_str("BUILD-ID=%20phN\n", vmlinux_build_id); \ + }) + #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ @@ -69,10 +73,6 @@ extern unsigned char *vmcoreinfo_data; extern size_t vmcoreinfo_size; extern u32 *vmcoreinfo_note; -/* raw contents of kernel .notes section */ -extern const void __start_notes __weak; -extern const void __stop_notes __weak; - Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); void final_note(Elf_Word *buf); diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 8d2dde23e9fb..32444686b6ff 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -18,7 +18,7 @@ enum debug_obj_state { struct debug_obj_descr; /** - * struct debug_obj - representaion of an tracked object + * struct debug_obj - representation of an tracked object * @node: hlist node to link the object into the tracker list * @state: tracked object state * @astate: current active state diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 866a0fa104c4..2fd2e91d5107 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -113,7 +113,7 @@ int hmm_range_fault(struct hmm_range *range); * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range * * When waiting for mmu notifiers we need some kind of time out otherwise we - * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to + * could potentially wait for ever, 1000ms ie 1s sounds like a long time to * wait already. */ #define HMM_RANGE_DEFAULT_TIMEOUT 1000 diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 8e0f32f935bd..f7ca1a3870ea 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -51,7 +51,7 @@ struct hugepage_subpool { long count; long max_hpages; /* Maximum huge pages or -1 if no maximum. */ long used_hpages; /* Used count against maximum, includes */ - /* both alloced and reserved pages. */ + /* both allocated and reserved pages. */ struct hstate *hstate; long min_hpages; /* Minimum huge pages or -1 if no minimum. */ long rsv_hpages; /* Pages reserved against global pool to */ @@ -85,7 +85,7 @@ struct resv_map { * by a resv_map's lock. The set of regions within the resv_map represent * reservations for huge pages, or huge pages that have already been * instantiated within the map. The from and to elements are huge page - * indicies into the associated mapping. from indicates the starting index + * indices into the associated mapping. from indicates the starting index * of the region. to represents the first index past the end of the region. * * For example, a file region structure with from == 0 and to == 4 represents @@ -797,7 +797,7 @@ static inline bool hugepage_migration_supported(struct hstate *h) * It determines whether or not a huge page should be placed on * movable zone or not. Movability of any huge page should be * required only if huge page size is supported for migration. - * There wont be any reason for the huge page to be movable if + * There won't be any reason for the huge page to be movable if * it is not migratable to start with. Also the size of the huge * page should be large enough to be placed under a movable zone * and still feasible enough to be migratable. Just the presence diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 465060acc981..a1d6fc82d7f0 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -7,6 +7,7 @@ #define _LINUX_KALLSYMS_H #include <linux/errno.h> +#include <linux/buildid.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/mm.h> @@ -15,8 +16,10 @@ #include <asm/sections.h> #define KSYM_NAME_LEN 128 -#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ - 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) +#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \ + (KSYM_NAME_LEN - 1) + \ + 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \ + (BUILD_ID_SIZE_MAX * 2) + 1) struct cred; struct module; @@ -91,8 +94,10 @@ const char *kallsyms_lookup(unsigned long addr, /* Look up a kernel symbol and return it in a text buffer. */ extern int sprint_symbol(char *buffer, unsigned long address); +extern int sprint_symbol_build_id(char *buffer, unsigned long address); extern int sprint_symbol_no_offset(char *buffer, unsigned long address); extern int sprint_backtrace(char *buffer, unsigned long address); +extern int sprint_backtrace_build_id(char *buffer, unsigned long address); int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); @@ -128,6 +133,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr) return 0; } +static inline int sprint_symbol_build_id(char *buffer, unsigned long address) +{ + *buffer = '\0'; + return 0; +} + static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr) { *buffer = '\0'; @@ -140,6 +151,12 @@ static inline int sprint_backtrace(char *buffer, unsigned long addr) return 0; } +static inline int sprint_backtrace_build_id(char *buffer, unsigned long addr) +{ + *buffer = '\0'; + return 0; +} + static inline int lookup_symbol_name(unsigned long addr, char *symname) { return -ERANGE; diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 9dcaa3e582c9..1b5fceb565df 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -146,7 +146,7 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, * @lru: the lru pointer. * @nid: the node id to scan from. * @memcg: the cgroup to scan from. - * @isolate: callback function that is resposible for deciding what to do with + * @isolate: callback function that is responsible for deciding what to do with * the item currently being scanned * @cb_arg: opaque type that will be passed to @isolate * @nr_to_walk: how many items to scan. @@ -172,7 +172,7 @@ unsigned long list_lru_walk_one(struct list_lru *lru, * @lru: the lru pointer. * @nid: the node id to scan from. * @memcg: the cgroup to scan from. - * @isolate: callback function that is resposible for deciding what to do with + * @isolate: callback function that is responsible for deciding what to do with * the item currently being scanned * @cb_arg: opaque type that will be passed to @isolate * @nr_to_walk: how many items to scan. diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h index 429d67d815ce..07add7882a5d 100644 --- a/include/linux/lru_cache.h +++ b/include/linux/lru_cache.h @@ -32,7 +32,7 @@ This header file (and its .c file; kernel-doc of functions see there) Because of this later property, it is called "lru_cache". As it actually Tracks Objects in an Active SeT, we could also call it toast (incidentally that is what may happen to the data on the - backend storage uppon next resync, if we don't get it right). + backend storage upon next resync, if we don't get it right). What for? @@ -152,7 +152,7 @@ struct lc_element { * for paranoia, and for "lc_element_to_index" */ unsigned lc_index; /* if we want to track a larger set of objects, - * it needs to become arch independend u64 */ + * it needs to become an architecture independent u64 */ unsigned lc_number; /* special label when on free list */ #define LC_FREE (~0U) @@ -263,7 +263,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char * * Allows (expects) the set to be "dirty". Note that the reference counts and * order on the active and lru lists may still change. Used to serialize - * changing transactions. Returns true if we aquired the lock. + * changing transactions. Returns true if we acquired the lock. */ static inline int lc_try_lock_for_transaction(struct lru_cache *lc) { @@ -275,7 +275,7 @@ static inline int lc_try_lock_for_transaction(struct lru_cache *lc) * @lc: the lru cache to operate on * * Note that the reference counts and order on the active and lru lists may - * still change. Only works on a "clean" set. Returns true if we aquired the + * still change. Only works on a "clean" set. Returns true if we acquired the * lock, which means there are no pending changes, and any further attempt to * change the set will not succeed until the next lc_unlock(). */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 788a0b1323d0..57453dba41b9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -238,6 +238,9 @@ int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) +void setup_initial_init_mm(void *start_code, void *end_code, + void *end_data, void *brk); + /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 6692da8d121d..45fc2c81e370 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -33,7 +33,7 @@ struct mmu_interval_notifier; * * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same * access flags). User should soft dirty the page in the end callback to make - * sure that anyone relying on soft dirtyness catch pages that might be written + * sure that anyone relying on soft dirtiness catch pages that might be written * through non CPU mappings. * * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal @@ -167,7 +167,7 @@ struct mmu_notifier_ops { * decrease the refcount. If the refcount is decreased on * invalidate_range_start() then the VM can free pages as page * table entries are removed. If the refcount is only - * droppped on invalidate_range_end() then the driver itself + * dropped on invalidate_range_end() then the driver itself * will drop the last refcount but it must take care to flush * any secondary tlb before doing the final free on the * page. Pages will no longer be referenced by the linux @@ -196,7 +196,7 @@ struct mmu_notifier_ops { * If invalidate_range() is used to manage a non-CPU TLB with * shared page-tables, it not necessary to implement the * invalidate_range_start()/end() notifiers, as - * invalidate_range() alread catches the points in time when an + * invalidate_range() already catches the points in time when an * external TLB range needs to be flushed. For more in depth * discussion on this see Documentation/vm/mmu_notifier.rst * @@ -369,7 +369,7 @@ mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub, * mmu_interval_read_retry() will return true. * * False is not reliable and only suggests a collision may not have - * occured. It can be called many times and does not have to hold the user + * occurred. It can be called many times and does not have to hold the user * provided lock. * * This call can be used as part of loops and other expensive operations to diff --git a/include/linux/module.h b/include/linux/module.h index 8100bb477d86..8a298d820dbc 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -11,6 +11,7 @@ #include <linux/list.h> #include <linux/stat.h> +#include <linux/buildid.h> #include <linux/compiler.h> #include <linux/cache.h> #include <linux/kmod.h> @@ -369,6 +370,11 @@ struct module { /* Unique handle for this module */ char name[MODULE_NAME_LEN]; +#ifdef CONFIG_STACKTRACE_BUILD_ID + /* Module build ID */ + unsigned char build_id[BUILD_ID_SIZE_MAX]; +#endif + /* Sysfs stuff. */ struct module_kobject mkobj; struct module_attribute *modinfo_attrs; @@ -636,7 +642,7 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr); const char *module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, - char **modname, + char **modname, const unsigned char **modbuildid, char *namebuf); int lookup_module_symbol_name(unsigned long addr, char *symname); int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); @@ -740,6 +746,7 @@ static inline const char *module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, + const unsigned char **modbuildid, char *namebuf) { return NULL; diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index ac398e143c9a..567c3ddba2c4 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -119,7 +119,7 @@ static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m) * The inline keyword gives the compiler room to decide to inline, or * not inline a function as it sees best. However, as these functions * are called in both __init and non-__init functions, if they are not - * inlined we will end up with a section mis-match error (of the type of + * inlined we will end up with a section mismatch error (of the type of * freeable items not being freed). So we must use __always_inline here * to fix the problem. If other functions in the future also end up in * this situation they will also need to be annotated as __always_inline @@ -515,7 +515,7 @@ static inline int node_random(const nodemask_t *mask) #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) /* - * For nodemask scrach area. + * For nodemask scratch area. * NODEMASK_ALLOC(type, name) allocates an object with a specified type and * name. */ @@ -528,7 +528,7 @@ static inline int node_random(const nodemask_t *mask) #define NODEMASK_FREE(m) do {} while (0) #endif -/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ +/* Example structure for using NODEMASK_ALLOC, used in mempolicy. */ struct nodemask_scratch { nodemask_t mask1; nodemask_t mask2; diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index dff7040f629a..af1071535de8 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -412,7 +412,7 @@ do { \ * instead. * * If there is no other protection through preempt disable and/or disabling - * interupts then one of these RMW operations can show unexpected behavior + * interrupts then one of these RMW operations can show unexpected behavior * because the execution thread was rescheduled on another processor or an * interrupt occurred and the same percpu variable was modified from the * interrupt context. diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 16c35a728b4c..ae16a9856305 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -213,7 +213,7 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) * percpu_ref_get - increment a percpu refcount * @ref: percpu_ref to get * - * Analagous to atomic_long_inc(). + * Analogous to atomic_long_inc(). * * This function is safe to call as long as @ref is between init and exit. */ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index e82660f7b9e4..d147480cdefc 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -106,7 +106,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) #ifndef pmd_offset static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { - return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); + return pud_pgtable(*pud) + pmd_index(address); } #define pmd_offset pmd_offset #endif @@ -114,7 +114,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) #ifndef pud_offset static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) { - return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); + return p4d_pgtable(*p4d) + pud_index(address); } #define pud_offset pud_offset #endif diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 6f70572b2938..ecf87484814f 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -474,7 +474,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) * Iterates over sg entries mapping page-by-page. On each successful * iteration, @miter->page points to the mapped page and * @miter->length bytes of data can be accessed at @miter->addr. As - * long as an interation is enclosed between start and stop, the user + * long as an iteration is enclosed between start and stop, the user * is free to choose control structure and when to stop. * * @miter->consumed is set to @miter->length on each iteration. It diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h new file mode 100644 index 000000000000..21c3771e6a56 --- /dev/null +++ b/include/linux/secretmem.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_SECRETMEM_H +#define _LINUX_SECRETMEM_H + +#ifdef CONFIG_SECRETMEM + +extern const struct address_space_operations secretmem_aops; + +static inline bool page_is_secretmem(struct page *page) +{ + struct address_space *mapping; + + /* + * Using page_mapping() is quite slow because of the actual call + * instruction and repeated compound_head(page) inside the + * page_mapping() function. + * We know that secretmem pages are not compound and LRU so we can + * save a couple of cycles here. + */ + if (PageCompound(page) || !PageLRU(page)) + return false; + + mapping = (struct address_space *) + ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); + + if (mapping != page->mapping) + return false; + + return mapping->a_ops == &secretmem_aops; +} + +bool vma_is_secretmem(struct vm_area_struct *vma); +bool secretmem_active(void); + +#else + +static inline bool vma_is_secretmem(struct vm_area_struct *vma) +{ + return false; +} + +static inline bool page_is_secretmem(struct page *page) +{ + return false; +} + +static inline bool secretmem_active(void) +{ + return false; +} + +#endif /* CONFIG_SECRETMEM */ + +#endif /* _LINUX_SECRETMEM_H */ diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index fe1aa4e54680..f36be5166c19 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -28,7 +28,19 @@ static inline bool kernel_page_present(struct page *page) { return true; } +#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ +/* + * Some architectures, e.g. ARM64 can disable direct map modifications at + * boot time. Let them overrive this query. + */ +#ifndef can_set_direct_map +static inline bool can_set_direct_map(void) +{ + return true; +} +#define can_set_direct_map can_set_direct_map #endif +#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ #ifndef set_mce_nospec static inline int set_mce_nospec(unsigned long pfn, bool unmap) diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 1eac79ce57d4..9814fff58a69 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -4,7 +4,7 @@ /* * This struct is used to pass information from page reclaim to the shrinkers. - * We consolidate the values for easier extention later. + * We consolidate the values for easier extension later. * * The 'gfpmask' refers to the allocation we are currently trying to * fulfil. diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 586128d5c3b8..69c9a7010081 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1050,6 +1050,7 @@ asmlinkage long sys_landlock_create_ruleset(const struct landlock_ruleset_attr _ asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type rule_type, const void __user *rule_attr, __u32 flags); asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags); +asmlinkage long sys_memfd_secret(unsigned int flags); /* * Architecture-specific system calls diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 1dabd6f22486..2644425b6dce 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -29,7 +29,7 @@ struct notifier_block; /* in notifier.h */ #define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */ /* - * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. + * VM_KASAN is used slightly differently depending on CONFIG_KASAN_VMALLOC. * * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after * shadow memory has been mapped. It's used to handle allocation errors so that @@ -247,7 +247,7 @@ static inline void set_vm_flush_reset_perms(void *addr) extern long vread(char *buf, char *addr, unsigned long count); /* - * Internals. Dont't use.. + * Internals. Don't use.. */ extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); |