stackdepot: Enable context analysis

Enable context analysis for stackdepot.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-32-elver@google.com
This commit is contained in:
Marco Elver 2025-12-19 16:40:20 +01:00 committed by Peter Zijlstra
parent 0eaa911f89
commit c3d3023f1c
2 changed files with 15 additions and 6 deletions

View File

@ -250,6 +250,7 @@ obj-$(CONFIG_POLYNOMIAL) += polynomial.o
# Prevent the compiler from calling builtins like memcmp() or bcmp() from this
# file.
CFLAGS_stackdepot.o += -fno-builtin
CONTEXT_ANALYSIS_stackdepot.o := y
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
# In particular, instrumenting stackdepot.c with KMSAN will result in infinite

View File

@ -61,18 +61,18 @@ static unsigned int stack_bucket_number_order;
/* Hash mask for indexing the table. */
static unsigned int stack_hash_mask;
/* The lock must be held when performing pool or freelist modifications. */
static DEFINE_RAW_SPINLOCK(pool_lock);
/* Array of memory regions that store stack records. */
static void **stack_pools;
static void **stack_pools __pt_guarded_by(&pool_lock);
/* Newly allocated pool that is not yet added to stack_pools. */
static void *new_pool;
/* Number of pools in stack_pools. */
static int pools_num;
/* Offset to the unused space in the currently used pool. */
static size_t pool_offset = DEPOT_POOL_SIZE;
static size_t pool_offset __guarded_by(&pool_lock) = DEPOT_POOL_SIZE;
/* Freelist of stack records within stack_pools. */
static LIST_HEAD(free_stacks);
/* The lock must be held when performing pool or freelist modifications. */
static DEFINE_RAW_SPINLOCK(pool_lock);
static __guarded_by(&pool_lock) LIST_HEAD(free_stacks);
/* Statistics counters for debugfs. */
enum depot_counter_id {
@ -291,6 +291,7 @@ EXPORT_SYMBOL_GPL(stack_depot_init);
* Initializes new stack pool, and updates the list of pools.
*/
static bool depot_init_pool(void **prealloc)
__must_hold(&pool_lock)
{
lockdep_assert_held(&pool_lock);
@ -338,6 +339,7 @@ static bool depot_init_pool(void **prealloc)
/* Keeps the preallocated memory to be used for a new stack depot pool. */
static void depot_keep_new_pool(void **prealloc)
__must_hold(&pool_lock)
{
lockdep_assert_held(&pool_lock);
@ -357,6 +359,7 @@ static void depot_keep_new_pool(void **prealloc)
* the current pre-allocation.
*/
static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
__must_hold(&pool_lock)
{
struct stack_record *stack;
void *current_pool;
@ -391,6 +394,7 @@ static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
/* Try to find next free usable entry from the freelist. */
static struct stack_record *depot_pop_free(void)
__must_hold(&pool_lock)
{
struct stack_record *stack;
@ -428,6 +432,7 @@ static inline size_t depot_stack_record_size(struct stack_record *s, unsigned in
/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
__must_hold(&pool_lock)
{
struct stack_record *stack = NULL;
size_t record_size;
@ -486,6 +491,7 @@ depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, dep
}
static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
__must_not_hold(&pool_lock)
{
const int pools_num_cached = READ_ONCE(pools_num);
union handle_parts parts = { .handle = handle };
@ -502,7 +508,8 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
return NULL;
}
pool = stack_pools[pool_index];
/* @pool_index either valid, or user passed in corrupted value. */
pool = context_unsafe(stack_pools[pool_index]);
if (WARN_ON(!pool))
return NULL;
@ -515,6 +522,7 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
/* Links stack into the freelist. */
static void depot_free_stack(struct stack_record *stack)
__must_not_hold(&pool_lock)
{
unsigned long flags;