1 From ad6fa1e1ab1b8164f1ba296b1b4dc556a483bcad Mon Sep 17 00:00:00 2001
2 From: Joe Damato <jdamato@fastly.com>
3 Date: Tue, 1 Mar 2022 23:55:48 -0800
4 Subject: [PATCH 2/3] page_pool: Add recycle stats
6 Add per-cpu stats tracking page pool recycling events:
7 - cached: recycling placed page in the page pool cache
8 - cache_full: page pool cache was full
9 - ring: page placed into the ptr ring
10 - ring_full: page released from page pool because the ptr ring was full
11 - released_refcnt: page released (and not recycled) because refcnt > 1
13 Signed-off-by: Joe Damato <jdamato@fastly.com>
14 Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
15 Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
16 Signed-off-by: David S. Miller <davem@davemloft.net>
18 include/net/page_pool.h | 16 ++++++++++++++++
19 net/core/page_pool.c | 30 ++++++++++++++++++++++++++++--
20 2 files changed, 44 insertions(+), 2 deletions(-)
22 --- a/include/net/page_pool.h
23 +++ b/include/net/page_pool.h
24 @@ -93,6 +93,18 @@ struct page_pool_alloc_stats {
25 u64 refill; /* allocations via successful refill */
26 u64 waive; /* failed refills due to numa zone mismatch */
29 +struct page_pool_recycle_stats {
30 + u64 cached; /* recycling placed page in the cache. */
31 + u64 cache_full; /* cache was full */
32 + u64 ring; /* recycling placed page back into ptr ring */
33 + u64 ring_full; /* page was released from page-pool because
34 + * PTR ring was full.
36 + u64 released_refcnt; /* page released because of elevated
43 @@ -136,6 +148,10 @@ struct page_pool {
47 +#ifdef CONFIG_PAGE_POOL_STATS
48 + /* recycle stats are per-cpu to avoid locking */
49 + struct page_pool_recycle_stats __percpu *recycle_stats;
51 atomic_t pages_state_release_cnt;
53 /* A page_pool is strictly tied to a single RX-queue being
54 --- a/net/core/page_pool.c
55 +++ b/net/core/page_pool.c
57 #ifdef CONFIG_PAGE_POOL_STATS
58 /* alloc_stat_inc is intended to be used in softirq context */
59 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
60 +/* recycle_stat_inc is safe to use when preemption is possible. */
61 +#define recycle_stat_inc(pool, __stat) \
63 + struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
64 + this_cpu_inc(s->__stat); \
67 #define alloc_stat_inc(pool, __stat)
68 +#define recycle_stat_inc(pool, __stat)
71 static int page_pool_init(struct page_pool *pool,
72 @@ -80,6 +87,12 @@ static int page_pool_init(struct page_po
73 pool->p.flags & PP_FLAG_PAGE_FRAG)
76 +#ifdef CONFIG_PAGE_POOL_STATS
77 + pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
78 + if (!pool->recycle_stats)
82 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
85 @@ -412,7 +425,12 @@ static bool page_pool_recycle_in_ring(st
87 ret = ptr_ring_produce_bh(&pool->ring, page);
89 - return (ret == 0) ? true : false;
91 + recycle_stat_inc(pool, ring);
98 /* Only allow direct recycling in special circumstances, into the
99 @@ -423,11 +441,14 @@ static bool page_pool_recycle_in_ring(st
100 static bool page_pool_recycle_in_cache(struct page *page,
101 struct page_pool *pool)
103 - if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
104 + if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
105 + recycle_stat_inc(pool, cache_full);
109 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
110 pool->alloc.cache[pool->alloc.count++] = page;
111 + recycle_stat_inc(pool, cached);
115 @@ -482,6 +503,7 @@ __page_pool_put_page(struct page_pool *p
116 * doing refcnt based recycle tricks, meaning another process
117 * will be invoking put_page.
119 + recycle_stat_inc(pool, released_refcnt);
120 /* Do not replace this with page_pool_return_page() */
121 page_pool_release_page(pool, page);
123 @@ -495,6 +517,7 @@ void page_pool_put_page(struct page_pool
124 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
125 if (page && !page_pool_recycle_in_ring(pool, page)) {
126 /* Cache full, fallback to free pages */
127 + recycle_stat_inc(pool, ring_full);
128 page_pool_return_page(pool, page);
131 @@ -641,6 +664,9 @@ static void page_pool_free(struct page_p
132 if (pool->p.flags & PP_FLAG_DMA_MAP)
133 put_device(pool->p.dev);
135 +#ifdef CONFIG_PAGE_POOL_STATS
136 + free_percpu(pool->recycle_stats);