generic: Convert incorrect generic/5.10 patches
[openwrt/staging/ansuel.git] / target / linux / generic / backport-5.15 / 601-v5.18-page_pool-Add-recycle-stats.patch
1 From ad6fa1e1ab1b8164f1ba296b1b4dc556a483bcad Mon Sep 17 00:00:00 2001
2 From: Joe Damato <jdamato@fastly.com>
3 Date: Tue, 1 Mar 2022 23:55:48 -0800
4 Subject: [PATCH 2/3] page_pool: Add recycle stats
5
6 Add per-cpu stats tracking page pool recycling events:
7 - cached: recycling placed page in the page pool cache
8 - cache_full: page pool cache was full
9 - ring: page placed into the ptr ring
10 - ring_full: page released from page pool because the ptr ring was full
11 - released_refcnt: page released (and not recycled) because refcnt > 1
12
13 Signed-off-by: Joe Damato <jdamato@fastly.com>
14 Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
15 Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
16 Signed-off-by: David S. Miller <davem@davemloft.net>
17 ---
18 include/net/page_pool.h | 16 ++++++++++++++++
19 net/core/page_pool.c | 30 ++++++++++++++++++++++++++++--
20 2 files changed, 44 insertions(+), 2 deletions(-)
21
22 diff --git a/include/net/page_pool.h b/include/net/page_pool.h
23 index 1f27e8a48830..298af95bbf96 100644
24 --- a/include/net/page_pool.h
25 +++ b/include/net/page_pool.h
26 @@ -93,6 +93,18 @@ struct page_pool_alloc_stats {
27 u64 refill; /* allocations via successful refill */
28 u64 waive; /* failed refills due to numa zone mismatch */
29 };
30 +
31 +struct page_pool_recycle_stats {
32 + u64 cached; /* recycling placed page in the cache. */
33 + u64 cache_full; /* cache was full */
34 + u64 ring; /* recycling placed page back into ptr ring */
35 + u64 ring_full; /* page was released from page-pool because
36 + * PTR ring was full.
37 + */
38 + u64 released_refcnt; /* page released because of elevated
39 + * refcnt
40 + */
41 +};
42 #endif
43
44 struct page_pool {
45 @@ -136,6 +148,10 @@ struct page_pool {
46 */
47 struct ptr_ring ring;
48
49 +#ifdef CONFIG_PAGE_POOL_STATS
50 + /* recycle stats are per-cpu to avoid locking */
51 + struct page_pool_recycle_stats __percpu *recycle_stats;
52 +#endif
53 atomic_t pages_state_release_cnt;
54
55 /* A page_pool is strictly tied to a single RX-queue being
56 --- a/net/core/page_pool.c
57 +++ b/net/core/page_pool.c
58 @@ -29,8 +29,15 @@
59 #ifdef CONFIG_PAGE_POOL_STATS
60 /* alloc_stat_inc is intended to be used in softirq context */
61 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
62 +/* recycle_stat_inc is safe to use when preemption is possible. */
63 +#define recycle_stat_inc(pool, __stat) \
64 + do { \
65 + struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
66 + this_cpu_inc(s->__stat); \
67 + } while (0)
68 #else
69 #define alloc_stat_inc(pool, __stat)
70 +#define recycle_stat_inc(pool, __stat)
71 #endif
72
73 static int page_pool_init(struct page_pool *pool,
74 @@ -80,6 +87,12 @@ static int page_pool_init(struct page_po
75 pool->p.flags & PP_FLAG_PAGE_FRAG)
76 return -EINVAL;
77
78 +#ifdef CONFIG_PAGE_POOL_STATS
79 + pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
80 + if (!pool->recycle_stats)
81 + return -ENOMEM;
82 +#endif
83 +
84 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
85 return -ENOMEM;
86
87 @@ -412,7 +425,12 @@ static bool page_pool_recycle_in_ring(st
88 else
89 ret = ptr_ring_produce_bh(&pool->ring, page);
90
91 - return (ret == 0) ? true : false;
92 + if (!ret) {
93 + recycle_stat_inc(pool, ring);
94 + return true;
95 + }
96 +
97 + return false;
98 }
99
100 /* Only allow direct recycling in special circumstances, into the
101 @@ -423,11 +441,14 @@ static bool page_pool_recycle_in_ring(st
102 static bool page_pool_recycle_in_cache(struct page *page,
103 struct page_pool *pool)
104 {
105 - if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
106 + if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
107 + recycle_stat_inc(pool, cache_full);
108 return false;
109 + }
110
111 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
112 pool->alloc.cache[pool->alloc.count++] = page;
113 + recycle_stat_inc(pool, cached);
114 return true;
115 }
116
117 @@ -482,6 +503,7 @@ __page_pool_put_page(struct page_pool *p
118 * doing refcnt based recycle tricks, meaning another process
119 * will be invoking put_page.
120 */
121 + recycle_stat_inc(pool, released_refcnt);
122 /* Do not replace this with page_pool_return_page() */
123 page_pool_release_page(pool, page);
124 put_page(page);
125 @@ -495,6 +517,7 @@ void page_pool_put_page(struct page_pool
126 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
127 if (page && !page_pool_recycle_in_ring(pool, page)) {
128 /* Cache full, fallback to free pages */
129 + recycle_stat_inc(pool, ring_full);
130 page_pool_return_page(pool, page);
131 }
132 }
133 @@ -641,6 +664,9 @@ static void page_pool_free(struct page_p
134 if (pool->p.flags & PP_FLAG_DMA_MAP)
135 put_device(pool->p.dev);
136
137 +#ifdef CONFIG_PAGE_POOL_STATS
138 + free_percpu(pool->recycle_stats);
139 +#endif
140 kfree(pool);
141 }
142