cache.go 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. // Copyright (C) 2015 The Syncthing Authors.
  2. //
  3. // This Source Code Form is subject to the terms of the Mozilla Public
  4. // License, v. 2.0. If a copy of the MPL was not distributed with this file,
  5. // You can obtain one at http://mozilla.org/MPL/2.0/.
  6. package discover
  7. import (
  8. "sort"
  9. stdsync "sync"
  10. "time"
  11. "github.com/syncthing/syncthing/lib/protocol"
  12. "github.com/syncthing/syncthing/lib/sync"
  13. "github.com/thejerf/suture"
  14. )
  15. // The CachingMux aggregates results from multiple Finders. Each Finder has
  16. // an associated cache time and negative cache time. The cache time sets how
  17. // long we cache and return successfull lookup results, the negative cache
  18. // time sets how long we refrain from asking about the same device ID after
  19. // receiving a negative answer. The value of zero disables caching (positive
  20. // or negative).
  21. type CachingMux interface {
  22. FinderService
  23. Add(finder Finder, cacheTime, negCacheTime time.Duration, priority int)
  24. ChildErrors() map[string]error
  25. }
  26. type cachingMux struct {
  27. *suture.Supervisor
  28. finders []cachedFinder
  29. caches []*cache
  30. mut sync.RWMutex
  31. }
  32. // A cachedFinder is a Finder with associated cache timeouts.
  33. type cachedFinder struct {
  34. Finder
  35. cacheTime time.Duration
  36. negCacheTime time.Duration
  37. priority int
  38. }
  39. // A prioritizedAddress is what we use to sort addresses returned from
  40. // different sources with different priorities.
  41. type prioritizedAddress struct {
  42. priority int
  43. addr string
  44. }
  45. // An error may implement cachedError, in which case it will be interrogated
  46. // to see how long we should cache the error. This overrides the default
  47. // negative cache time.
  48. type cachedError interface {
  49. CacheFor() time.Duration
  50. }
  51. func NewCachingMux() CachingMux {
  52. return &cachingMux{
  53. Supervisor: suture.NewSimple("discover.cachingMux"),
  54. mut: sync.NewRWMutex(),
  55. }
  56. }
  57. // Add registers a new Finder, with associated cache timeouts.
  58. func (m *cachingMux) Add(finder Finder, cacheTime, negCacheTime time.Duration, priority int) {
  59. m.mut.Lock()
  60. m.finders = append(m.finders, cachedFinder{finder, cacheTime, negCacheTime, priority})
  61. m.caches = append(m.caches, newCache())
  62. m.mut.Unlock()
  63. if service, ok := finder.(suture.Service); ok {
  64. m.Supervisor.Add(service)
  65. }
  66. }
  67. // Lookup attempts to resolve the device ID using any of the added Finders,
  68. // while obeying the cache settings.
  69. func (m *cachingMux) Lookup(deviceID protocol.DeviceID) (direct []string, relays []Relay, err error) {
  70. var pdirect []prioritizedAddress
  71. m.mut.RLock()
  72. for i, finder := range m.finders {
  73. if cacheEntry, ok := m.caches[i].Get(deviceID); ok {
  74. // We have a cache entry. Lets see what it says.
  75. if cacheEntry.found && time.Since(cacheEntry.when) < finder.cacheTime {
  76. // It's a positive, valid entry. Use it.
  77. l.Debugln("cached discovery entry for", deviceID, "at", finder)
  78. l.Debugln(" cache:", cacheEntry)
  79. for _, addr := range cacheEntry.Direct {
  80. pdirect = append(pdirect, prioritizedAddress{finder.priority, addr})
  81. }
  82. relays = append(relays, cacheEntry.Relays...)
  83. continue
  84. }
  85. valid := time.Now().Before(cacheEntry.validUntil) || time.Since(cacheEntry.when) < finder.negCacheTime
  86. if !cacheEntry.found && valid {
  87. // It's a negative, valid entry. We should not make another
  88. // attempt right now.
  89. l.Debugln("negative cache entry for", deviceID, "at", finder, "valid until", cacheEntry.when.Add(finder.negCacheTime), "or", cacheEntry.validUntil)
  90. continue
  91. }
  92. // It's expired. Ignore and continue.
  93. }
  94. // Perform the actual lookup and cache the result.
  95. if td, tr, err := finder.Lookup(deviceID); err == nil {
  96. l.Debugln("lookup for", deviceID, "at", finder)
  97. l.Debugln(" direct:", td)
  98. l.Debugln(" relays:", tr)
  99. for _, addr := range td {
  100. pdirect = append(pdirect, prioritizedAddress{finder.priority, addr})
  101. }
  102. relays = append(relays, tr...)
  103. m.caches[i].Set(deviceID, CacheEntry{
  104. Direct: td,
  105. Relays: tr,
  106. when: time.Now(),
  107. found: len(td)+len(tr) > 0,
  108. })
  109. } else {
  110. // Lookup returned error, add a negative cache entry.
  111. entry := CacheEntry{
  112. when: time.Now(),
  113. found: false,
  114. }
  115. if err, ok := err.(cachedError); ok {
  116. entry.validUntil = time.Now().Add(err.CacheFor())
  117. }
  118. m.caches[i].Set(deviceID, entry)
  119. }
  120. }
  121. m.mut.RUnlock()
  122. direct = uniqueSortedAddrs(pdirect)
  123. relays = uniqueSortedRelays(relays)
  124. l.Debugln("lookup results for", deviceID)
  125. l.Debugln(" direct: ", direct)
  126. l.Debugln(" relays: ", relays)
  127. return direct, relays, nil
  128. }
  129. func (m *cachingMux) String() string {
  130. return "discovery cache"
  131. }
  132. func (m *cachingMux) Error() error {
  133. return nil
  134. }
  135. func (m *cachingMux) ChildErrors() map[string]error {
  136. children := make(map[string]error, len(m.finders))
  137. m.mut.RLock()
  138. for _, f := range m.finders {
  139. children[f.String()] = f.Error()
  140. }
  141. m.mut.RUnlock()
  142. return children
  143. }
  144. func (m *cachingMux) Cache() map[protocol.DeviceID]CacheEntry {
  145. // Res will be the "total" cache, i.e. the union of our cache and all our
  146. // children's caches.
  147. res := make(map[protocol.DeviceID]CacheEntry)
  148. m.mut.RLock()
  149. for i := range m.finders {
  150. // Each finder[i] has a corresponding cache at cache[i]. Go through it
  151. // and populate the total, if it's newer than what's already in there.
  152. // We skip any negative cache entries.
  153. for k, v := range m.caches[i].Cache() {
  154. if v.found && v.when.After(res[k].when) {
  155. res[k] = v
  156. }
  157. }
  158. // Then ask the finder itself for it's cache and do the same. If this
  159. // finder is a global discovery client, it will have no cache. If it's
  160. // a local discovery client, this will be it's current state.
  161. for k, v := range m.finders[i].Cache() {
  162. if v.found && v.when.After(res[k].when) {
  163. res[k] = v
  164. }
  165. }
  166. }
  167. m.mut.RUnlock()
  168. return res
  169. }
  170. // A cache can be embedded wherever useful
  171. type cache struct {
  172. entries map[protocol.DeviceID]CacheEntry
  173. mut stdsync.Mutex
  174. }
  175. func newCache() *cache {
  176. return &cache{
  177. entries: make(map[protocol.DeviceID]CacheEntry),
  178. }
  179. }
  180. func (c *cache) Set(id protocol.DeviceID, ce CacheEntry) {
  181. c.mut.Lock()
  182. c.entries[id] = ce
  183. c.mut.Unlock()
  184. }
  185. func (c *cache) Get(id protocol.DeviceID) (CacheEntry, bool) {
  186. c.mut.Lock()
  187. ce, ok := c.entries[id]
  188. c.mut.Unlock()
  189. return ce, ok
  190. }
  191. func (c *cache) Cache() map[protocol.DeviceID]CacheEntry {
  192. c.mut.Lock()
  193. m := make(map[protocol.DeviceID]CacheEntry, len(c.entries))
  194. for k, v := range c.entries {
  195. m[k] = v
  196. }
  197. c.mut.Unlock()
  198. return m
  199. }
  200. func uniqueSortedAddrs(ss []prioritizedAddress) []string {
  201. // We sort the addresses by priority, then filter them based on seen
  202. // (first time seen is the on kept, so we retain priority).
  203. sort.Sort(prioritizedAddressList(ss))
  204. filtered := make([]string, 0, len(ss))
  205. seen := make(map[string]struct{}, len(ss))
  206. for _, s := range ss {
  207. if _, ok := seen[s.addr]; !ok {
  208. filtered = append(filtered, s.addr)
  209. seen[s.addr] = struct{}{}
  210. }
  211. }
  212. return filtered
  213. }
  214. func uniqueSortedRelays(rs []Relay) []Relay {
  215. m := make(map[string]Relay, len(rs))
  216. for _, r := range rs {
  217. m[r.URL] = r
  218. }
  219. var ur = make([]Relay, 0, len(m))
  220. for _, r := range m {
  221. ur = append(ur, r)
  222. }
  223. sort.Sort(relayList(ur))
  224. return ur
  225. }
  226. type relayList []Relay
  227. func (l relayList) Len() int {
  228. return len(l)
  229. }
  230. func (l relayList) Swap(a, b int) {
  231. l[a], l[b] = l[b], l[a]
  232. }
  233. func (l relayList) Less(a, b int) bool {
  234. return l[a].URL < l[b].URL
  235. }
  236. type prioritizedAddressList []prioritizedAddress
  237. func (l prioritizedAddressList) Len() int {
  238. return len(l)
  239. }
  240. func (l prioritizedAddressList) Swap(a, b int) {
  241. l[a], l[b] = l[b], l[a]
  242. }
  243. func (l prioritizedAddressList) Less(a, b int) bool {
  244. if l[a].priority != l[b].priority {
  245. return l[a].priority < l[b].priority
  246. }
  247. return l[a].addr < l[b].addr
  248. }