2
0

600-v5.18-page_pool-Add-allocation-stats.patch 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. commit 8610037e8106b48c79cfe0afb92b2b2466e51c3d
  2. Author: Joe Damato <[email protected]>
  3. Date: Tue Mar 1 23:55:47 2022 -0800
  4. page_pool: Add allocation stats
  5. Add per-pool statistics counters for the allocation path of a page pool.
  6. These stats are incremented in softirq context, so no locking or per-cpu
  7. variables are needed.
  8. This code is disabled by default and a kernel config option is provided for
  9. users who wish to enable them.
  10. The statistics added are:
  11. - fast: successful fast path allocations
  12. - slow: slow path order-0 allocations
  13. - slow_high_order: slow path high order allocations
  14. - empty: ptr ring is empty, so a slow path allocation was forced.
  15. - refill: an allocation which triggered a refill of the cache
  16. - waive: pages obtained from the ptr ring that cannot be added to
  17. the cache due to a NUMA mismatch.
  18. Signed-off-by: Joe Damato <[email protected]>
  19. Acked-by: Jesper Dangaard Brouer <[email protected]>
  20. Reviewed-by: Ilias Apalodimas <[email protected]>
  21. Signed-off-by: David S. Miller <[email protected]>
  22. --- a/include/net/page_pool.h
  23. +++ b/include/net/page_pool.h
  24. @@ -82,6 +82,19 @@ struct page_pool_params {
  25. unsigned int offset; /* DMA addr offset */
  26. };
  27. +#ifdef CONFIG_PAGE_POOL_STATS
  28. +struct page_pool_alloc_stats {
  29. + u64 fast; /* fast path allocations */
  30. + u64 slow; /* slow-path order 0 allocations */
  31. + u64 slow_high_order; /* slow-path high order allocations */
  32. + u64 empty; /* failed refills due to empty ptr ring, forcing
  33. + * slow path allocation
  34. + */
  35. + u64 refill; /* allocations via successful refill */
  36. + u64 waive; /* failed refills due to numa zone mismatch */
  37. +};
  38. +#endif
  39. +
  40. struct page_pool {
  41. struct page_pool_params p;
  42. @@ -132,6 +145,11 @@ struct page_pool {
  43. refcount_t user_cnt;
  44. u64 destroy_cnt;
  45. +
  46. +#ifdef CONFIG_PAGE_POOL_STATS
  47. + /* these stats are incremented while in softirq context */
  48. + struct page_pool_alloc_stats alloc_stats;
  49. +#endif
  50. };
  51. struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
  52. --- a/net/Kconfig
  53. +++ b/net/Kconfig
  54. @@ -434,6 +434,19 @@ config NET_DEVLINK
  55. config PAGE_POOL
  56. bool
  57. +config PAGE_POOL_STATS
  58. + default n
  59. + bool "Page pool stats"
  60. + depends on PAGE_POOL
  61. + help
  62. + Enable page pool statistics to track page allocation and recycling
  63. + in page pools. This option incurs additional CPU cost in allocation
  64. + and recycle paths and additional memory cost to store the statistics.
  65. + These statistics are only available if this option is enabled and if
  66. + the driver using the page pool supports exporting this data.
  67. +
  68. + If unsure, say N.
  69. +
  70. config FAILOVER
  71. tristate "Generic failover module"
  72. help
  73. --- a/net/core/page_pool.c
  74. +++ b/net/core/page_pool.c
  75. @@ -26,6 +26,13 @@
  76. #define BIAS_MAX LONG_MAX
  77. +#ifdef CONFIG_PAGE_POOL_STATS
  78. +/* alloc_stat_inc is intended to be used in softirq context */
  79. +#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
  80. +#else
  81. +#define alloc_stat_inc(pool, __stat)
  82. +#endif
  83. +
  84. static int page_pool_init(struct page_pool *pool,
  85. const struct page_pool_params *params)
  86. {
  87. @@ -117,8 +124,10 @@ static struct page *page_pool_refill_all
  88. int pref_nid; /* preferred NUMA node */
  89. /* Quicker fallback, avoid locks when ring is empty */
  90. - if (__ptr_ring_empty(r))
  91. + if (__ptr_ring_empty(r)) {
  92. + alloc_stat_inc(pool, empty);
  93. return NULL;
  94. + }
  95. /* Softirq guarantee CPU and thus NUMA node is stable. This,
  96. * assumes CPU refilling driver RX-ring will also run RX-NAPI.
  97. @@ -148,14 +157,17 @@ static struct page *page_pool_refill_all
  98. * This limit stress on page buddy alloactor.
  99. */
  100. page_pool_return_page(pool, page);
  101. + alloc_stat_inc(pool, waive);
  102. page = NULL;
  103. break;
  104. }
  105. } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
  106. /* Return last page */
  107. - if (likely(pool->alloc.count > 0))
  108. + if (likely(pool->alloc.count > 0)) {
  109. page = pool->alloc.cache[--pool->alloc.count];
  110. + alloc_stat_inc(pool, refill);
  111. + }
  112. spin_unlock(&r->consumer_lock);
  113. return page;
  114. @@ -170,6 +182,7 @@ static struct page *__page_pool_get_cach
  115. if (likely(pool->alloc.count)) {
  116. /* Fast-path */
  117. page = pool->alloc.cache[--pool->alloc.count];
  118. + alloc_stat_inc(pool, fast);
  119. } else {
  120. page = page_pool_refill_alloc_cache(pool);
  121. }
  122. @@ -241,6 +254,7 @@ static struct page *__page_pool_alloc_pa
  123. return NULL;
  124. }
  125. + alloc_stat_inc(pool, slow_high_order);
  126. page_pool_set_pp_info(pool, page);
  127. /* Track how many pages are held 'in-flight' */
  128. @@ -295,10 +309,12 @@ static struct page *__page_pool_alloc_pa
  129. }
  130. /* Return last page */
  131. - if (likely(pool->alloc.count > 0))
  132. + if (likely(pool->alloc.count > 0)) {
  133. page = pool->alloc.cache[--pool->alloc.count];
  134. - else
  135. + alloc_stat_inc(pool, slow);
  136. + } else {
  137. page = NULL;
  138. + }
  139. /* When page just alloc'ed is should/must have refcnt 1. */
  140. return page;