1 From 8610037e8106b48c79cfe0afb92b2b2466e51c3d Mon Sep 17 00:00:00 2001
2 From: Joe Damato <jdamato@fastly.com>
3 Date: Tue, 1 Mar 2022 23:55:47 -0800
4 Subject: [PATCH] page_pool: Add allocation stats
6 Add per-pool statistics counters for the allocation path of a page pool.
7 These stats are incremented in softirq context, so no locking or per-cpu
10 This code is disabled by default and a kernel config option is provided for
11 users who wish to enable them.
13 The statistics added are:
14 - fast: successful fast path allocations
15 - slow: slow path order-0 allocations
16 - slow_high_order: slow path high order allocations
17 - empty: ptr ring is empty, so a slow path allocation was forced.
18 - refill: an allocation which triggered a refill of the cache
19 - waive: pages obtained from the ptr ring that cannot be added to
20 the cache due to a NUMA mismatch.
22 Signed-off-by: Joe Damato <jdamato@fastly.com>
23 Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
24 Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
25 Signed-off-by: David S. Miller <davem@davemloft.net>
27 include/net/page_pool.h | 18 ++++++++++++++++++
28 net/Kconfig | 13 +++++++++++++
29 net/core/page_pool.c | 24 ++++++++++++++++++++----
30 3 files changed, 51 insertions(+), 4 deletions(-)
32 diff --git a/include/net/page_pool.h b/include/net/page_pool.h
33 index 97c3c19872ff..1f27e8a48830 100644
34 --- a/include/net/page_pool.h
35 +++ b/include/net/page_pool.h
36 @@ -82,6 +82,19 @@ struct page_pool_params {
37 unsigned int offset; /* DMA addr offset */
40 +#ifdef CONFIG_PAGE_POOL_STATS
41 +struct page_pool_alloc_stats {
42 + u64 fast; /* fast path allocations */
43 + u64 slow; /* slow-path order 0 allocations */
44 + u64 slow_high_order; /* slow-path high order allocations */
45 + u64 empty; /* failed refills due to empty ptr ring, forcing
46 + * slow path allocation
48 + u64 refill; /* allocations via successful refill */
49 + u64 waive; /* failed refills due to numa zone mismatch */
54 struct page_pool_params p;
56 @@ -132,6 +145,11 @@ struct page_pool {
61 +#ifdef CONFIG_PAGE_POOL_STATS
62 + /* these stats are incremented while in softirq context */
63 + struct page_pool_alloc_stats alloc_stats;
67 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
70 @@ -434,6 +434,19 @@ config NET_DEVLINK
74 +config PAGE_POOL_STATS
76 + bool "Page pool stats"
77 + depends on PAGE_POOL
79 + Enable page pool statistics to track page allocation and recycling
80 + in page pools. This option incurs additional CPU cost in allocation
81 + and recycle paths and additional memory cost to store the statistics.
82 + These statistics are only available if this option is enabled and if
83 + the driver using the page pool supports exporting this data.
88 tristate "Generic failover module"
90 --- a/net/core/page_pool.c
91 +++ b/net/core/page_pool.c
94 #define BIAS_MAX LONG_MAX
96 +#ifdef CONFIG_PAGE_POOL_STATS
97 +/* alloc_stat_inc is intended to be used in softirq context */
98 +#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
100 +#define alloc_stat_inc(pool, __stat)
103 static int page_pool_init(struct page_pool *pool,
104 const struct page_pool_params *params)
106 @@ -117,8 +124,10 @@ static struct page *page_pool_refill_all
107 int pref_nid; /* preferred NUMA node */
109 /* Quicker fallback, avoid locks when ring is empty */
110 - if (__ptr_ring_empty(r))
111 + if (__ptr_ring_empty(r)) {
112 + alloc_stat_inc(pool, empty);
116 /* Softirq guarantee CPU and thus NUMA node is stable. This,
117 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
118 @@ -148,14 +157,17 @@ static struct page *page_pool_refill_all
119 * This limit stress on page buddy alloactor.
121 page_pool_return_page(pool, page);
122 + alloc_stat_inc(pool, waive);
126 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
128 /* Return last page */
129 - if (likely(pool->alloc.count > 0))
130 + if (likely(pool->alloc.count > 0)) {
131 page = pool->alloc.cache[--pool->alloc.count];
132 + alloc_stat_inc(pool, refill);
135 spin_unlock(&r->consumer_lock);
137 @@ -170,6 +182,7 @@ static struct page *__page_pool_get_cach
138 if (likely(pool->alloc.count)) {
140 page = pool->alloc.cache[--pool->alloc.count];
141 + alloc_stat_inc(pool, fast);
143 page = page_pool_refill_alloc_cache(pool);
145 @@ -241,6 +254,7 @@ static struct page *__page_pool_alloc_pa
149 + alloc_stat_inc(pool, slow_high_order);
150 page_pool_set_pp_info(pool, page);
152 /* Track how many pages are held 'in-flight' */
153 @@ -295,10 +309,12 @@ static struct page *__page_pool_alloc_pa
156 /* Return last page */
157 - if (likely(pool->alloc.count > 0))
158 + if (likely(pool->alloc.count > 0)) {
159 page = pool->alloc.cache[--pool->alloc.count];
161 + alloc_stat_inc(pool, slow);
166 /* When page just alloc'ed is should/must have refcnt 1. */