bufferpool.go 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. // Copyright (C) 2016 The Protocol Authors.
  2. package protocol
  3. import (
  4. "fmt"
  5. "sync"
  6. "sync/atomic"
  7. )
  8. // Global pool to get buffers from. Requires Blocksizes to be initialised,
  9. // therefore it is initialized in the same init() as BlockSizes
  10. var BufferPool bufferPool
  11. type bufferPool struct {
  12. puts atomic.Int64
  13. skips atomic.Int64
  14. misses atomic.Int64
  15. pools []sync.Pool
  16. hits []atomic.Int64
  17. }
  18. func newBufferPool() bufferPool {
  19. return bufferPool{
  20. pools: make([]sync.Pool, len(BlockSizes)),
  21. hits: make([]atomic.Int64, len(BlockSizes)),
  22. }
  23. }
  24. func (p *bufferPool) Get(size int) []byte {
  25. // Too big, isn't pooled
  26. if size > MaxBlockSize {
  27. p.skips.Add(1)
  28. return make([]byte, size)
  29. }
  30. // Try the fitting and all bigger pools
  31. bkt := getBucketForLen(size)
  32. for j := bkt; j < len(BlockSizes); j++ {
  33. if intf := p.pools[j].Get(); intf != nil {
  34. p.hits[j].Add(1)
  35. bs := *intf.(*[]byte)
  36. return bs[:size]
  37. }
  38. }
  39. p.misses.Add(1)
  40. // All pools are empty, must allocate. For very small slices where we
  41. // didn't have a block to reuse, just allocate a small slice instead of
  42. // a large one. We won't be able to reuse it, but avoid some overhead.
  43. if size < MinBlockSize/64 {
  44. return make([]byte, size)
  45. }
  46. return make([]byte, BlockSizes[bkt])[:size]
  47. }
  48. // Put makes the given byte slice available again in the global pool.
  49. // You must only Put() slices that were returned by Get() or Upgrade().
  50. func (p *bufferPool) Put(bs []byte) {
  51. // Don't buffer slices outside of our pool range
  52. if cap(bs) > MaxBlockSize || cap(bs) < MinBlockSize {
  53. p.skips.Add(1)
  54. return
  55. }
  56. p.puts.Add(1)
  57. bkt := putBucketForCap(cap(bs))
  58. p.pools[bkt].Put(&bs)
  59. }
  60. // Upgrade grows the buffer to the requested size, while attempting to reuse
  61. // it if possible.
  62. func (p *bufferPool) Upgrade(bs []byte, size int) []byte {
  63. if cap(bs) >= size {
  64. // Reslicing is enough, lets go!
  65. return bs[:size]
  66. }
  67. // It was too small. But it pack into the pool and try to get another
  68. // buffer.
  69. p.Put(bs)
  70. return p.Get(size)
  71. }
  72. // getBucketForLen returns the bucket where we should get a slice of a
  73. // certain length. Each bucket is guaranteed to hold slices that are
  74. // precisely the block size for that bucket, so if the block size is larger
  75. // than our size we are good.
  76. func getBucketForLen(len int) int {
  77. for i, blockSize := range BlockSizes {
  78. if len <= blockSize {
  79. return i
  80. }
  81. }
  82. panic(fmt.Sprintf("bug: tried to get impossible block len %d", len))
  83. }
  84. // putBucketForCap returns the bucket where we should put a slice of a
  85. // certain capacity. Each bucket is guaranteed to hold slices that are
  86. // precisely the block size for that bucket, so we just find the matching
  87. // one.
  88. func putBucketForCap(cap int) int {
  89. for i, blockSize := range BlockSizes {
  90. if cap == blockSize {
  91. return i
  92. }
  93. }
  94. panic(fmt.Sprintf("bug: tried to put impossible block cap %d", cap))
  95. }