diff options
author | Vijayanand Jitta <vjitta@codeaurora.org> | 2021-02-25 17:21:27 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-26 09:41:04 -0800 |
commit | e1fdc403349c64fa58f4c163f4bf9b860b4db808 (patch) | |
tree | 18d2e4c6908552dccc5069f3aa393e39f0ca9655 /lib | |
parent | d262093656a0eec6d6114a3178a9d887fddd0ded (diff) |
lib: stackdepot: add support to disable stack depot
Add a kernel parameter stack_depot_disable to disable stack depot. So
that stack hash table doesn't consume any memory when stack depot is
disabled.
The use case is CONFIG_PAGE_OWNER without page_owner=on. Without this
patch, stackdepot will consume the memory for the hashtable. By default,
it's 8M which is never trivial.
With this option, in CONFIG_PAGE_OWNER configured system, page_owner=off,
stack_depot_disable in kernel command line, we could save the wasted
memory for the hashtable.
[akpm@linux-foundation.org: fix CONFIG_STACKDEPOT=n build]
Link: https://lkml.kernel.org/r/1611749198-24316-2-git-send-email-vjitta@codeaurora.org
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Signed-off-by: Vijayanand Jitta <vjitta@codeaurora.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Yogesh Lal <ylal@codeaurora.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/stackdepot.c | 32 |
1 files changed, 28 insertions, 4 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 4b9715470e87..cc21116512a7 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -31,6 +31,7 @@ #include <linux/stackdepot.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/memblock.h> #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) @@ -145,9 +146,32 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, #define STACK_HASH_MASK (STACK_HASH_SIZE - 1) #define STACK_HASH_SEED 0x9747b28c -static struct stack_record *stack_table[STACK_HASH_SIZE] = { - [0 ... STACK_HASH_SIZE - 1] = NULL -}; +static bool stack_depot_disable; +static struct stack_record **stack_table; + +static int __init is_stack_depot_disabled(char *str) +{ + kstrtobool(str, &stack_depot_disable); + if (stack_depot_disable) { + pr_info("Stack Depot is disabled\n"); + stack_table = NULL; + } + return 0; +} +early_param("stack_depot_disable", is_stack_depot_disabled); + +int __init stack_depot_init(void) +{ + if (!stack_depot_disable) { + size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); + int i; + + stack_table = memblock_alloc(size, size); + for (i = 0; i < STACK_HASH_SIZE; i++) + stack_table[i] = NULL; + } + return 0; +} /* Calculate hash for a stack */ static inline u32 hash_stack(unsigned long *entries, unsigned int size) @@ -241,7 +265,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries, unsigned long flags; u32 hash; - if (unlikely(nr_entries == 0)) + if (unlikely(nr_entries == 0) || stack_depot_disable) goto fast_exit; hash = hash_stack(entries, nr_entries); |