600-v5.18-page_pool-Add-allocation-stats.patch 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. From 8610037e8106b48c79cfe0afb92b2b2466e51c3d Mon Sep 17 00:00:00 2001
  2. From: Joe Damato <[email protected]>
  3. Date: Tue, 1 Mar 2022 23:55:47 -0800
  4. Subject: [PATCH] page_pool: Add allocation stats
  5. Add per-pool statistics counters for the allocation path of a page pool.
  6. These stats are incremented in softirq context, so no locking or per-cpu
  7. variables are needed.
  8. This code is disabled by default and a kernel config option is provided for
  9. users who wish to enable them.
  10. The statistics added are:
  11. - fast: successful fast path allocations
  12. - slow: slow path order-0 allocations
  13. - slow_high_order: slow path high order allocations
  14. - empty: ptr ring is empty, so a slow path allocation was forced.
  15. - refill: an allocation which triggered a refill of the cache
  16. - waive: pages obtained from the ptr ring that cannot be added to
  17. the cache due to a NUMA mismatch.
  18. Signed-off-by: Joe Damato <[email protected]>
  19. Acked-by: Jesper Dangaard Brouer <[email protected]>
  20. Reviewed-by: Ilias Apalodimas <[email protected]>
  21. Signed-off-by: David S. Miller <[email protected]>
  22. ---
  23. include/net/page_pool.h | 18 ++++++++++++++++++
  24. net/Kconfig | 13 +++++++++++++
  25. net/core/page_pool.c | 24 ++++++++++++++++++++----
  26. 3 files changed, 51 insertions(+), 4 deletions(-)
  27. --- a/include/net/page_pool.h
  28. +++ b/include/net/page_pool.h
  29. @@ -82,6 +82,19 @@ struct page_pool_params {
  30. unsigned int offset; /* DMA addr offset */
  31. };
  32. +#ifdef CONFIG_PAGE_POOL_STATS
  33. +struct page_pool_alloc_stats {
  34. + u64 fast; /* fast path allocations */
  35. + u64 slow; /* slow-path order 0 allocations */
  36. + u64 slow_high_order; /* slow-path high order allocations */
  37. + u64 empty; /* failed refills due to empty ptr ring, forcing
  38. + * slow path allocation
  39. + */
  40. + u64 refill; /* allocations via successful refill */
  41. + u64 waive; /* failed refills due to numa zone mismatch */
  42. +};
  43. +#endif
  44. +
  45. struct page_pool {
  46. struct page_pool_params p;
  47. @@ -132,6 +145,11 @@ struct page_pool {
  48. refcount_t user_cnt;
  49. u64 destroy_cnt;
  50. +
  51. +#ifdef CONFIG_PAGE_POOL_STATS
  52. + /* these stats are incremented while in softirq context */
  53. + struct page_pool_alloc_stats alloc_stats;
  54. +#endif
  55. };
  56. struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
  57. --- a/net/Kconfig
  58. +++ b/net/Kconfig
  59. @@ -434,6 +434,19 @@ config NET_DEVLINK
  60. config PAGE_POOL
  61. bool
  62. +config PAGE_POOL_STATS
  63. + default n
  64. + bool "Page pool stats"
  65. + depends on PAGE_POOL
  66. + help
  67. + Enable page pool statistics to track page allocation and recycling
  68. + in page pools. This option incurs additional CPU cost in allocation
  69. + and recycle paths and additional memory cost to store the statistics.
  70. + These statistics are only available if this option is enabled and if
  71. + the driver using the page pool supports exporting this data.
  72. +
  73. + If unsure, say N.
  74. +
  75. config FAILOVER
  76. tristate "Generic failover module"
  77. help
  78. --- a/net/core/page_pool.c
  79. +++ b/net/core/page_pool.c
  80. @@ -49,6 +49,13 @@ static void page_pool_producer_unlock(st
  81. spin_unlock_bh(&pool->ring.producer_lock);
  82. }
  83. +#ifdef CONFIG_PAGE_POOL_STATS
  84. +/* alloc_stat_inc is intended to be used in softirq context */
  85. +#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
  86. +#else
  87. +#define alloc_stat_inc(pool, __stat)
  88. +#endif
  89. +
  90. static int page_pool_init(struct page_pool *pool,
  91. const struct page_pool_params *params)
  92. {
  93. @@ -140,8 +147,10 @@ static struct page *page_pool_refill_all
  94. int pref_nid; /* preferred NUMA node */
  95. /* Quicker fallback, avoid locks when ring is empty */
  96. - if (__ptr_ring_empty(r))
  97. + if (__ptr_ring_empty(r)) {
  98. + alloc_stat_inc(pool, empty);
  99. return NULL;
  100. + }
  101. /* Softirq guarantee CPU and thus NUMA node is stable. This,
  102. * assumes CPU refilling driver RX-ring will also run RX-NAPI.
  103. @@ -171,14 +180,17 @@ static struct page *page_pool_refill_all
  104. * This limit stress on page buddy alloactor.
  105. */
  106. page_pool_return_page(pool, page);
  107. + alloc_stat_inc(pool, waive);
  108. page = NULL;
  109. break;
  110. }
  111. } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
  112. /* Return last page */
  113. - if (likely(pool->alloc.count > 0))
  114. + if (likely(pool->alloc.count > 0)) {
  115. page = pool->alloc.cache[--pool->alloc.count];
  116. + alloc_stat_inc(pool, refill);
  117. + }
  118. spin_unlock(&r->consumer_lock);
  119. return page;
  120. @@ -193,6 +205,7 @@ static struct page *__page_pool_get_cach
  121. if (likely(pool->alloc.count)) {
  122. /* Fast-path */
  123. page = pool->alloc.cache[--pool->alloc.count];
  124. + alloc_stat_inc(pool, fast);
  125. } else {
  126. page = page_pool_refill_alloc_cache(pool);
  127. }
  128. @@ -264,6 +277,7 @@ static struct page *__page_pool_alloc_pa
  129. return NULL;
  130. }
  131. + alloc_stat_inc(pool, slow_high_order);
  132. page_pool_set_pp_info(pool, page);
  133. /* Track how many pages are held 'in-flight' */
  134. @@ -318,10 +332,12 @@ static struct page *__page_pool_alloc_pa
  135. }
  136. /* Return last page */
  137. - if (likely(pool->alloc.count > 0))
  138. + if (likely(pool->alloc.count > 0)) {
  139. page = pool->alloc.cache[--pool->alloc.count];
  140. - else
  141. + alloc_stat_inc(pool, slow);
  142. + } else {
  143. page = NULL;
  144. + }
  145. /* When page just alloc'ed is should/must have refcnt 1. */
  146. return page;