123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140 |
- From ad6fa1e1ab1b8164f1ba296b1b4dc556a483bcad Mon Sep 17 00:00:00 2001
- From: Joe Damato <[email protected]>
- Date: Tue, 1 Mar 2022 23:55:48 -0800
- Subject: [PATCH 2/3] page_pool: Add recycle stats
- Add per-cpu stats tracking page pool recycling events:
- - cached: recycling placed page in the page pool cache
- - cache_full: page pool cache was full
- - ring: page placed into the ptr ring
- - ring_full: page released from page pool because the ptr ring was full
- - released_refcnt: page released (and not recycled) because refcnt > 1
- Signed-off-by: Joe Damato <[email protected]>
- Acked-by: Jesper Dangaard Brouer <[email protected]>
- Reviewed-by: Ilias Apalodimas <[email protected]>
- Signed-off-by: David S. Miller <[email protected]>
- ---
- include/net/page_pool.h | 16 ++++++++++++++++
- net/core/page_pool.c | 30 ++++++++++++++++++++++++++++--
- 2 files changed, 44 insertions(+), 2 deletions(-)
- --- a/include/net/page_pool.h
- +++ b/include/net/page_pool.h
- @@ -93,6 +93,18 @@ struct page_pool_alloc_stats {
- u64 refill; /* allocations via successful refill */
- u64 waive; /* failed refills due to numa zone mismatch */
- };
- +
- +struct page_pool_recycle_stats {
- + u64 cached; /* recycling placed page in the cache. */
- + u64 cache_full; /* cache was full */
- + u64 ring; /* recycling placed page back into ptr ring */
- + u64 ring_full; /* page was released from page-pool because
- + * PTR ring was full.
- + */
- + u64 released_refcnt; /* page released because of elevated
- + * refcnt
- + */
- +};
- #endif
-
- struct page_pool {
- @@ -136,6 +148,10 @@ struct page_pool {
- */
- struct ptr_ring ring;
-
- +#ifdef CONFIG_PAGE_POOL_STATS
- + /* recycle stats are per-cpu to avoid locking */
- + struct page_pool_recycle_stats __percpu *recycle_stats;
- +#endif
- atomic_t pages_state_release_cnt;
-
- /* A page_pool is strictly tied to a single RX-queue being
- --- a/net/core/page_pool.c
- +++ b/net/core/page_pool.c
- @@ -29,8 +29,15 @@
- #ifdef CONFIG_PAGE_POOL_STATS
- /* alloc_stat_inc is intended to be used in softirq context */
- #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
- +/* recycle_stat_inc is safe to use when preemption is possible. */
- +#define recycle_stat_inc(pool, __stat) \
- + do { \
- + struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
- + this_cpu_inc(s->__stat); \
- + } while (0)
- #else
- #define alloc_stat_inc(pool, __stat)
- +#define recycle_stat_inc(pool, __stat)
- #endif
-
- static int page_pool_init(struct page_pool *pool,
- @@ -80,6 +87,12 @@ static int page_pool_init(struct page_po
- pool->p.flags & PP_FLAG_PAGE_FRAG)
- return -EINVAL;
-
- +#ifdef CONFIG_PAGE_POOL_STATS
- + pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
- + if (!pool->recycle_stats)
- + return -ENOMEM;
- +#endif
- +
- if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
- return -ENOMEM;
-
- @@ -412,7 +425,12 @@ static bool page_pool_recycle_in_ring(st
- else
- ret = ptr_ring_produce_bh(&pool->ring, page);
-
- - return (ret == 0) ? true : false;
- + if (!ret) {
- + recycle_stat_inc(pool, ring);
- + return true;
- + }
- +
- + return false;
- }
-
- /* Only allow direct recycling in special circumstances, into the
- @@ -423,11 +441,14 @@ static bool page_pool_recycle_in_ring(st
- static bool page_pool_recycle_in_cache(struct page *page,
- struct page_pool *pool)
- {
- - if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
- + if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
- + recycle_stat_inc(pool, cache_full);
- return false;
- + }
-
- /* Caller MUST have verified/know (page_ref_count(page) == 1) */
- pool->alloc.cache[pool->alloc.count++] = page;
- + recycle_stat_inc(pool, cached);
- return true;
- }
-
- @@ -482,6 +503,7 @@ __page_pool_put_page(struct page_pool *p
- * doing refcnt based recycle tricks, meaning another process
- * will be invoking put_page.
- */
- + recycle_stat_inc(pool, released_refcnt);
- /* Do not replace this with page_pool_return_page() */
- page_pool_release_page(pool, page);
- put_page(page);
- @@ -495,6 +517,7 @@ void page_pool_put_page(struct page_pool
- page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
- if (page && !page_pool_recycle_in_ring(pool, page)) {
- /* Cache full, fallback to free pages */
- + recycle_stat_inc(pool, ring_full);
- page_pool_return_page(pool, page);
- }
- }
- @@ -641,6 +664,9 @@ static void page_pool_free(struct page_p
- if (pool->p.flags & PP_FLAG_DMA_MAP)
- put_device(pool->p.dev);
-
- +#ifdef CONFIG_PAGE_POOL_STATS
- + free_percpu(pool->recycle_stats);
- +#endif
- kfree(pool);
- }
-
|