kernel: pick patches for MediaTek Ethernet from linux-next
[openwrt/staging/dedeckeh.git] / target / linux / generic / backport-5.15 / 600-v5.18-page_pool-Add-allocation-stats.patch
1 commit 8610037e8106b48c79cfe0afb92b2b2466e51c3d
2 Author: Joe Damato <jdamato@fastly.com>
3 Date: Tue Mar 1 23:55:47 2022 -0800
4
5 page_pool: Add allocation stats
6
7 Add per-pool statistics counters for the allocation path of a page pool.
8 These stats are incremented in softirq context, so no locking or per-cpu
9 variables are needed.
10
11 This code is disabled by default and a kernel config option is provided for
12 users who wish to enable them.
13
14 The statistics added are:
15 - fast: successful fast path allocations
16 - slow: slow path order-0 allocations
17 - slow_high_order: slow path high order allocations
18 - empty: ptr ring is empty, so a slow path allocation was forced.
19 - refill: an allocation which triggered a refill of the cache
20 - waive: pages obtained from the ptr ring that cannot be added to
21 the cache due to a NUMA mismatch.
22
23 Signed-off-by: Joe Damato <jdamato@fastly.com>
24 Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
25 Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
26 Signed-off-by: David S. Miller <davem@davemloft.net>
27
28 --- a/include/net/page_pool.h
29 +++ b/include/net/page_pool.h
30 @@ -82,6 +82,19 @@ struct page_pool_params {
31 unsigned int offset; /* DMA addr offset */
32 };
33
34 +#ifdef CONFIG_PAGE_POOL_STATS
35 +struct page_pool_alloc_stats {
36 + u64 fast; /* fast path allocations */
37 + u64 slow; /* slow-path order 0 allocations */
38 + u64 slow_high_order; /* slow-path high order allocations */
39 + u64 empty; /* failed refills due to empty ptr ring, forcing
40 + * slow path allocation
41 + */
42 + u64 refill; /* allocations via successful refill */
43 + u64 waive; /* failed refills due to numa zone mismatch */
44 +};
45 +#endif
46 +
47 struct page_pool {
48 struct page_pool_params p;
49
50 @@ -132,6 +145,11 @@ struct page_pool {
51 refcount_t user_cnt;
52
53 u64 destroy_cnt;
54 +
55 +#ifdef CONFIG_PAGE_POOL_STATS
56 + /* these stats are incremented while in softirq context */
57 + struct page_pool_alloc_stats alloc_stats;
58 +#endif
59 };
60
61 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
62 --- a/net/Kconfig
63 +++ b/net/Kconfig
64 @@ -434,6 +434,19 @@ config NET_DEVLINK
65 config PAGE_POOL
66 bool
67
68 +config PAGE_POOL_STATS
69 + default n
70 + bool "Page pool stats"
71 + depends on PAGE_POOL
72 + help
73 + Enable page pool statistics to track page allocation and recycling
74 + in page pools. This option incurs additional CPU cost in allocation
75 + and recycle paths and additional memory cost to store the statistics.
76 + These statistics are only available if this option is enabled and if
77 + the driver using the page pool supports exporting this data.
78 +
79 + If unsure, say N.
80 +
81 config FAILOVER
82 tristate "Generic failover module"
83 help
84 --- a/net/core/page_pool.c
85 +++ b/net/core/page_pool.c
86 @@ -26,6 +26,13 @@
87
88 #define BIAS_MAX LONG_MAX
89
90 +#ifdef CONFIG_PAGE_POOL_STATS
91 +/* alloc_stat_inc is intended to be used in softirq context */
92 +#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
93 +#else
94 +#define alloc_stat_inc(pool, __stat)
95 +#endif
96 +
97 static int page_pool_init(struct page_pool *pool,
98 const struct page_pool_params *params)
99 {
100 @@ -117,8 +124,10 @@ static struct page *page_pool_refill_all
101 int pref_nid; /* preferred NUMA node */
102
103 /* Quicker fallback, avoid locks when ring is empty */
104 - if (__ptr_ring_empty(r))
105 + if (__ptr_ring_empty(r)) {
106 + alloc_stat_inc(pool, empty);
107 return NULL;
108 + }
109
110 /* Softirq guarantee CPU and thus NUMA node is stable. This,
111 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
112 @@ -148,14 +157,17 @@ static struct page *page_pool_refill_all
113 * This limit stress on page buddy alloactor.
114 */
115 page_pool_return_page(pool, page);
116 + alloc_stat_inc(pool, waive);
117 page = NULL;
118 break;
119 }
120 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
121
122 /* Return last page */
123 - if (likely(pool->alloc.count > 0))
124 + if (likely(pool->alloc.count > 0)) {
125 page = pool->alloc.cache[--pool->alloc.count];
126 + alloc_stat_inc(pool, refill);
127 + }
128
129 spin_unlock(&r->consumer_lock);
130 return page;
131 @@ -170,6 +182,7 @@ static struct page *__page_pool_get_cach
132 if (likely(pool->alloc.count)) {
133 /* Fast-path */
134 page = pool->alloc.cache[--pool->alloc.count];
135 + alloc_stat_inc(pool, fast);
136 } else {
137 page = page_pool_refill_alloc_cache(pool);
138 }
139 @@ -241,6 +254,7 @@ static struct page *__page_pool_alloc_pa
140 return NULL;
141 }
142
143 + alloc_stat_inc(pool, slow_high_order);
144 page_pool_set_pp_info(pool, page);
145
146 /* Track how many pages are held 'in-flight' */
147 @@ -295,10 +309,12 @@ static struct page *__page_pool_alloc_pa
148 }
149
150 /* Return last page */
151 - if (likely(pool->alloc.count > 0))
152 + if (likely(pool->alloc.count > 0)) {
153 page = pool->alloc.cache[--pool->alloc.count];
154 - else
155 + alloc_stat_inc(pool, slow);
156 + } else {
157 page = NULL;
158 + }
159
160 /* When page just alloc'ed is should/must have refcnt 1. */
161 return page;