generic: Convert incorrect generic/5.10 patches
[openwrt/openwrt.git] / target / linux / generic / backport-5.15 / 600-v5.18-page_pool-Add-allocation-stats.patch
1 From 8610037e8106b48c79cfe0afb92b2b2466e51c3d Mon Sep 17 00:00:00 2001
2 From: Joe Damato <jdamato@fastly.com>
3 Date: Tue, 1 Mar 2022 23:55:47 -0800
4 Subject: [PATCH] page_pool: Add allocation stats
5
6 Add per-pool statistics counters for the allocation path of a page pool.
7 These stats are incremented in softirq context, so no locking or per-cpu
8 variables are needed.
9
10 This code is disabled by default and a kernel config option is provided for
11 users who wish to enable them.
12
13 The statistics added are:
14 - fast: successful fast path allocations
15 - slow: slow path order-0 allocations
16 - slow_high_order: slow path high order allocations
17 - empty: ptr ring is empty, so a slow path allocation was forced.
18 - refill: an allocation which triggered a refill of the cache
19 - waive: pages obtained from the ptr ring that cannot be added to
20 the cache due to a NUMA mismatch.
21
22 Signed-off-by: Joe Damato <jdamato@fastly.com>
23 Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
24 Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
25 Signed-off-by: David S. Miller <davem@davemloft.net>
26 ---
27 include/net/page_pool.h | 18 ++++++++++++++++++
28 net/Kconfig | 13 +++++++++++++
29 net/core/page_pool.c | 24 ++++++++++++++++++++----
30 3 files changed, 51 insertions(+), 4 deletions(-)
31
32 diff --git a/include/net/page_pool.h b/include/net/page_pool.h
33 index 97c3c19872ff..1f27e8a48830 100644
34 --- a/include/net/page_pool.h
35 +++ b/include/net/page_pool.h
36 @@ -82,6 +82,19 @@ struct page_pool_params {
37 unsigned int offset; /* DMA addr offset */
38 };
39
40 +#ifdef CONFIG_PAGE_POOL_STATS
41 +struct page_pool_alloc_stats {
42 + u64 fast; /* fast path allocations */
43 + u64 slow; /* slow-path order 0 allocations */
44 + u64 slow_high_order; /* slow-path high order allocations */
45 + u64 empty; /* failed refills due to empty ptr ring, forcing
46 + * slow path allocation
47 + */
48 + u64 refill; /* allocations via successful refill */
49 + u64 waive; /* failed refills due to numa zone mismatch */
50 +};
51 +#endif
52 +
53 struct page_pool {
54 struct page_pool_params p;
55
56 @@ -132,6 +145,11 @@ struct page_pool {
57 refcount_t user_cnt;
58
59 u64 destroy_cnt;
60 +
61 +#ifdef CONFIG_PAGE_POOL_STATS
62 + /* these stats are incremented while in softirq context */
63 + struct page_pool_alloc_stats alloc_stats;
64 +#endif
65 };
66
67 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
68 --- a/net/Kconfig
69 +++ b/net/Kconfig
70 @@ -434,6 +434,19 @@ config NET_DEVLINK
71 config PAGE_POOL
72 bool
73
74 +config PAGE_POOL_STATS
75 + default n
76 + bool "Page pool stats"
77 + depends on PAGE_POOL
78 + help
79 + Enable page pool statistics to track page allocation and recycling
80 + in page pools. This option incurs additional CPU cost in allocation
81 + and recycle paths and additional memory cost to store the statistics.
82 + These statistics are only available if this option is enabled and if
83 + the driver using the page pool supports exporting this data.
84 +
85 + If unsure, say N.
86 +
87 config FAILOVER
88 tristate "Generic failover module"
89 help
90 --- a/net/core/page_pool.c
91 +++ b/net/core/page_pool.c
92 @@ -26,6 +26,13 @@
93
94 #define BIAS_MAX LONG_MAX
95
96 +#ifdef CONFIG_PAGE_POOL_STATS
97 +/* alloc_stat_inc is intended to be used in softirq context */
98 +#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
99 +#else
100 +#define alloc_stat_inc(pool, __stat)
101 +#endif
102 +
103 static int page_pool_init(struct page_pool *pool,
104 const struct page_pool_params *params)
105 {
106 @@ -117,8 +124,10 @@ static struct page *page_pool_refill_all
107 int pref_nid; /* preferred NUMA node */
108
109 /* Quicker fallback, avoid locks when ring is empty */
110 - if (__ptr_ring_empty(r))
111 + if (__ptr_ring_empty(r)) {
112 + alloc_stat_inc(pool, empty);
113 return NULL;
114 + }
115
116 /* Softirq guarantee CPU and thus NUMA node is stable. This,
117 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
118 @@ -148,14 +157,17 @@ static struct page *page_pool_refill_all
119 * This limit stress on page buddy alloactor.
120 */
121 page_pool_return_page(pool, page);
122 + alloc_stat_inc(pool, waive);
123 page = NULL;
124 break;
125 }
126 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
127
128 /* Return last page */
129 - if (likely(pool->alloc.count > 0))
130 + if (likely(pool->alloc.count > 0)) {
131 page = pool->alloc.cache[--pool->alloc.count];
132 + alloc_stat_inc(pool, refill);
133 + }
134
135 spin_unlock(&r->consumer_lock);
136 return page;
137 @@ -170,6 +182,7 @@ static struct page *__page_pool_get_cach
138 if (likely(pool->alloc.count)) {
139 /* Fast-path */
140 page = pool->alloc.cache[--pool->alloc.count];
141 + alloc_stat_inc(pool, fast);
142 } else {
143 page = page_pool_refill_alloc_cache(pool);
144 }
145 @@ -241,6 +254,7 @@ static struct page *__page_pool_alloc_pa
146 return NULL;
147 }
148
149 + alloc_stat_inc(pool, slow_high_order);
150 page_pool_set_pp_info(pool, page);
151
152 /* Track how many pages are held 'in-flight' */
153 @@ -295,10 +309,12 @@ static struct page *__page_pool_alloc_pa
154 }
155
156 /* Return last page */
157 - if (likely(pool->alloc.count > 0))
158 + if (likely(pool->alloc.count > 0)) {
159 page = pool->alloc.cache[--pool->alloc.count];
160 - else
161 + alloc_stat_inc(pool, slow);
162 + } else {
163 page = NULL;
164 + }
165
166 /* When page just alloc'ed is should/must have refcnt 1. */
167 return page;