Topology.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2015 ZeroTier, Inc.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #include "Constants.hpp"
  28. #include "Topology.hpp"
  29. #include "RuntimeEnvironment.hpp"
  30. #include "Node.hpp"
  31. #include "Network.hpp"
  32. #include "NetworkConfig.hpp"
  33. #include "Buffer.hpp"
  34. namespace ZeroTier {
  35. #define ZT_DEFAULT_WORLD_LENGTH 494
  36. static const unsigned char ZT_DEFAULT_WORLD[ZT_DEFAULT_WORLD_LENGTH] = {0x01,0x00,0x00,0x00,0x00,0x08,0xea,0xc9,0x0a,0x00,0x00,0x01,0x4f,0xdf,0xbf,0xfc,0xbb,0x6c,0x7e,0x15,0x67,0x85,0x1b,0xb4,0x65,0x04,0x01,0xaf,0x56,0xbf,0xe7,0x63,0x9d,0x77,0xef,0xa4,0x1e,0x61,0x53,0x88,0xcb,0x8d,0x78,0xe5,0x47,0x38,0x98,0x5a,0x6c,0x8a,0xdd,0xe6,0x9c,0x65,0xdf,0x1a,0x80,0x63,0xce,0x2e,0x4d,0x48,0x24,0x3d,0x68,0x87,0x96,0x13,0x89,0xba,0x25,0x6f,0xc9,0xb0,0x9f,0x20,0xc5,0x4c,0x51,0x7b,0x30,0xb7,0x5f,0xba,0xca,0xa4,0xc5,0x48,0xa3,0x15,0xab,0x2f,0x1d,0x64,0xe8,0x04,0x42,0xb3,0x1c,0x51,0x8b,0x2a,0x04,0x01,0xf8,0xe1,0x81,0xaf,0x60,0x2f,0x70,0x3e,0xcd,0x0b,0x21,0x38,0x19,0x62,0x02,0xbd,0x0e,0x33,0x1d,0x0a,0x7b,0xf1,0xec,0xad,0xef,0x54,0xb3,0x7b,0x17,0x84,0xaa,0xda,0x0a,0x85,0x5d,0x0b,0x1c,0x05,0x83,0xb9,0x0e,0x3e,0xe3,0xb4,0xd1,0x8b,0x5b,0x64,0xf7,0xcf,0xe1,0xff,0x5d,0xc2,0x2a,0xcf,0x60,0x7b,0x09,0xb4,0xa3,0x86,0x3c,0x5a,0x7e,0x31,0xa0,0xc7,0xb4,0x86,0xe3,0x41,0x33,0x04,0x7e,0x19,0x87,0x6a,0xba,0x00,0x2a,0x6e,0x2b,0x23,0x18,0x93,0x0f,0x60,0xeb,0x09,0x7f,0x70,0xd0,0xf4,0xb0,0x28,0xb2,0xcd,0x6d,0x3d,0x0c,0x63,0xc0,0x14,0xb9,0x03,0x9f,0xf3,0x53,0x90,0xe4,0x11,0x81,0xf2,0x16,0xfb,0x2e,0x6f,0xa8,0xd9,0x5c,0x1e,0xe9,0x66,0x71,0x56,0x41,0x19,0x05,0xc3,0xdc,0xcf,0xea,0x78,0xd8,0xc6,0xdf,0xaf,0xba,0x68,0x81,0x70,0xb3,0xfa,0x00,0x01,0x04,0xc6,0xc7,0x61,0xdc,0x27,0x09,0x88,0x41,0x40,0x8a,0x2e,0x00,0xbb,0x1d,0x31,0xf2,0xc3,0x23,0xe2,0x64,0xe9,0xe6,0x41,0x72,0xc1,0xa7,0x4f,0x77,0x89,0x95,0x55,0xed,0x10,0x75,0x1c,0xd5,0x6e,0x86,0x40,0x5c,0xde,0x11,0x8d,0x02,0xdf,0xfe,0x55,0x5d,0x46,0x2c,0xcf,0x6a,0x85,0xb5,0x63,0x1c,0x12,0x35,0x0c,0x8d,0x5d,0xc4,0x09,0xba,0x10,0xb9,0x02,0x5d,0x0f,0x44,0x5c,0xf4,0x49,0xd9,0x2b,0x1c,0x00,0x01,0x04,0x6b,0xbf,0x2e,0xd2,0x27,0x09,0x8a,0xcf,0x05,0x9f,0xe3,0x00,0x48,0x2f,0x6e,0xe5,0xdf,0xe9,0x02,0x31,0x9b,0x41,0x9d,0xe5,0xbd,0xc7,0x65,0x20,0x9c,0x0e,0xcd,0xa3,0x8c,0x4d,0x6e,0x4f,0xcf,0x0d,0x33,0x65,0x83,0x98,0xb4,0x52,0x7d,0xcd,0x22,0xf9,0x31,0x12,0xfb,0x9b,0xef,0xd0,0x2f,0xd7,0x8b,0xf7,0x26,0x1b,0x33,0x3f,0xc1,0x05,0xd1,0x92,0xa6,0x23,0xca,0x9e,0x50,0xfc,0x60,0xb3,0x74,0xa5,0x00,0x01,0x04,0xa2,0xf3,0x4d,0x6f,0x27,0x09,0x9d,0x21,0x90,0x39,0xf3,0x00,0x01,0xf0,0x92,0x2a,0x98,0xe3,0xb3,0x4e,0xbc,0xbf,0xf3,0x33,0x26,0x9d,0xc2,0x65,0xd7,0xa0,0x20,0xaa,0xb6,0x9d,0x72,0xbe,0x4d,0x4a,0xcc,0x9c,0x8c,0x92,0x94,0x78,0x57,0x71,0x25,0x6c,0xd1,0xd9,0x42,0xa9,0x0d,0x1b,0xd1,0xd2,0xdc,0xa3,0xea,0x84,0xef,0x7d,0x85,0xaf,0xe6,0x61,0x1f,0xb4,0x3f,0xf0,0xb7,0x41,0x26,0xd9,0x0a,0x6e,0x00,0x01,0x04,0x80,0xc7,0xc5,0xd9,0x27,0x09};
  37. Topology::Topology(const RuntimeEnvironment *renv) :
  38. RR(renv),
  39. _amRoot(false)
  40. {
  41. std::string alls(RR->node->dataStoreGet("peers.save"));
  42. const uint8_t *all = reinterpret_cast<const uint8_t *>(alls.data());
  43. RR->node->dataStoreDelete("peers.save");
  44. unsigned int ptr = 0;
  45. while ((ptr + 4) < alls.size()) {
  46. try {
  47. const unsigned int reclen = ( // each Peer serialized record is prefixed by a record length
  48. ((((unsigned int)all[ptr]) & 0xff) << 24) |
  49. ((((unsigned int)all[ptr + 1]) & 0xff) << 16) |
  50. ((((unsigned int)all[ptr + 2]) & 0xff) << 8) |
  51. (((unsigned int)all[ptr + 3]) & 0xff)
  52. );
  53. unsigned int pos = 0;
  54. SharedPtr<Peer> p(Peer::deserializeNew(RR->identity,Buffer<ZT_PEER_SUGGESTED_SERIALIZATION_BUFFER_SIZE>(all + ptr,reclen + 4),pos));
  55. ptr += pos;
  56. if (!p)
  57. break; // stop if invalid records
  58. if (p->address() != RR->identity.address())
  59. _peers[p->address()] = p;
  60. } catch ( ... ) {
  61. break; // stop if invalid records
  62. }
  63. }
  64. clean(RR->node->now());
  65. std::string dsWorld(RR->node->dataStoreGet("world"));
  66. World cachedWorld;
  67. try {
  68. Buffer<ZT_WORLD_MAX_SERIALIZED_LENGTH> dswtmp(dsWorld.data(),dsWorld.length());
  69. cachedWorld.deserialize(dswtmp,0);
  70. } catch ( ... ) {
  71. cachedWorld = World(); // clear if cached world is invalid
  72. }
  73. World defaultWorld;
  74. {
  75. Buffer<ZT_DEFAULT_WORLD_LENGTH> wtmp(ZT_DEFAULT_WORLD,ZT_DEFAULT_WORLD_LENGTH);
  76. defaultWorld.deserialize(wtmp,0); // throws on error, which would indicate a bad static variable up top
  77. }
  78. if (cachedWorld.shouldBeReplacedBy(defaultWorld,false)) {
  79. _setWorld(defaultWorld);
  80. if (dsWorld.length() > 0)
  81. RR->node->dataStoreDelete("world");
  82. } else _setWorld(cachedWorld);
  83. }
  84. Topology::~Topology()
  85. {
  86. Buffer<ZT_PEER_SUGGESTED_SERIALIZATION_BUFFER_SIZE> pbuf;
  87. std::string all;
  88. Address *a = (Address *)0;
  89. SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
  90. Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
  91. while (i.next(a,p)) {
  92. if (std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end()) {
  93. pbuf.clear();
  94. try {
  95. (*p)->serialize(pbuf);
  96. try {
  97. all.append((const char *)pbuf.data(),pbuf.size());
  98. } catch ( ... ) {
  99. return; // out of memory? just skip
  100. }
  101. } catch ( ... ) {} // peer too big? shouldn't happen, but it so skip
  102. }
  103. }
  104. RR->node->dataStorePut("peers.save",all,true);
  105. }
  106. SharedPtr<Peer> Topology::addPeer(const SharedPtr<Peer> &peer)
  107. {
  108. if (peer->address() == RR->identity.address()) {
  109. TRACE("BUG: addPeer() caught and ignored attempt to add peer for self");
  110. throw std::logic_error("cannot add peer for self");
  111. }
  112. SharedPtr<Peer> np;
  113. {
  114. Mutex::Lock _l(_lock);
  115. SharedPtr<Peer> &hp = _peers[peer->address()];
  116. if (!hp)
  117. hp = peer;
  118. np = hp;
  119. }
  120. np->use(RR->node->now());
  121. saveIdentity(np->identity());
  122. return np;
  123. }
  124. SharedPtr<Peer> Topology::getPeer(const Address &zta)
  125. {
  126. if (zta == RR->identity.address()) {
  127. TRACE("BUG: ignored attempt to getPeer() for self, returned NULL");
  128. return SharedPtr<Peer>();
  129. }
  130. Mutex::Lock _l(_lock);
  131. SharedPtr<Peer> &ap = _peers[zta];
  132. if (ap) {
  133. ap->use(RR->node->now());
  134. return ap;
  135. }
  136. Identity id(_getIdentity(zta));
  137. if (id) {
  138. try {
  139. ap = SharedPtr<Peer>(new Peer(RR->identity,id));
  140. ap->use(RR->node->now());
  141. return ap;
  142. } catch ( ... ) {} // invalid identity?
  143. }
  144. // If we get here it means we read an invalid cache identity or had some other error
  145. _peers.erase(zta);
  146. return SharedPtr<Peer>();
  147. }
  148. Identity Topology::getIdentity(const Address &zta)
  149. {
  150. {
  151. Mutex::Lock _l(_lock);
  152. SharedPtr<Peer> &ap = _peers[zta];
  153. if (ap)
  154. return ap->identity();
  155. }
  156. return _getIdentity(zta);
  157. }
  158. void Topology::saveIdentity(const Identity &id)
  159. {
  160. if (id) {
  161. char p[128];
  162. Utils::snprintf(p,sizeof(p),"iddb.d/%.10llx",(unsigned long long)id.address().toInt());
  163. RR->node->dataStorePut(p,id.toString(false),false);
  164. }
  165. }
  166. SharedPtr<Peer> Topology::getBestRoot(const Address *avoid,unsigned int avoidCount,bool strictAvoid)
  167. {
  168. SharedPtr<Peer> bestRoot;
  169. const uint64_t now = RR->node->now();
  170. Mutex::Lock _l(_lock);
  171. if (_amRoot) {
  172. /* If I am a root server, the "best" root server is the one whose address
  173. * is numerically greater than mine (with wrap at top of list). This
  174. * causes packets searching for a route to pretty much literally
  175. * circumnavigate the globe rather than bouncing between just two. */
  176. if (_rootAddresses.size() > 1) { // gotta be one other than me for this to work
  177. std::vector<Address>::const_iterator sna(std::find(_rootAddresses.begin(),_rootAddresses.end(),RR->identity.address()));
  178. if (sna != _rootAddresses.end()) { // sanity check -- _amRoot should've been false in this case
  179. for(;;) {
  180. if (++sna == _rootAddresses.end())
  181. sna = _rootAddresses.begin(); // wrap around at end
  182. if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order
  183. SharedPtr<Peer> *p = _peers.get(*sna);
  184. if ((p)&&((*p)->hasActiveDirectPath(now))) {
  185. bestRoot = *p;
  186. break;
  187. }
  188. }
  189. }
  190. }
  191. }
  192. } else {
  193. /* If I am not a root server, the best root server is the active one with
  194. * the lowest latency. */
  195. unsigned int l,bestLatency = 65536;
  196. uint64_t lds,ldr;
  197. // First look for a best root by comparing latencies, but exclude
  198. // root servers that have not responded to direct messages in order to
  199. // try to exclude any that are dead or unreachable.
  200. for(std::vector< SharedPtr<Peer> >::const_iterator sn(_rootPeers.begin());sn!=_rootPeers.end();) {
  201. // Skip explicitly avoided relays
  202. for(unsigned int i=0;i<avoidCount;++i) {
  203. if (avoid[i] == (*sn)->address())
  204. goto keep_searching_for_roots;
  205. }
  206. // Skip possibly comatose or unreachable relays
  207. lds = (*sn)->lastDirectSend();
  208. ldr = (*sn)->lastDirectReceive();
  209. if ((lds)&&(lds > ldr)&&((lds - ldr) > ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD))
  210. goto keep_searching_for_roots;
  211. if ((*sn)->hasActiveDirectPath(now)) {
  212. l = (*sn)->latency();
  213. if (bestRoot) {
  214. if ((l)&&(l < bestLatency)) {
  215. bestLatency = l;
  216. bestRoot = *sn;
  217. }
  218. } else {
  219. if (l)
  220. bestLatency = l;
  221. bestRoot = *sn;
  222. }
  223. }
  224. keep_searching_for_roots:
  225. ++sn;
  226. }
  227. if (bestRoot) {
  228. bestRoot->use(now);
  229. return bestRoot;
  230. } else if (strictAvoid)
  231. return SharedPtr<Peer>();
  232. // If we have nothing from above, just pick one without avoidance criteria.
  233. for(std::vector< SharedPtr<Peer> >::const_iterator sn=_rootPeers.begin();sn!=_rootPeers.end();++sn) {
  234. if ((*sn)->hasActiveDirectPath(now)) {
  235. unsigned int l = (*sn)->latency();
  236. if (bestRoot) {
  237. if ((l)&&(l < bestLatency)) {
  238. bestLatency = l;
  239. bestRoot = *sn;
  240. }
  241. } else {
  242. if (l)
  243. bestLatency = l;
  244. bestRoot = *sn;
  245. }
  246. }
  247. }
  248. }
  249. if (bestRoot)
  250. bestRoot->use(now);
  251. return bestRoot;
  252. }
  253. bool Topology::isUpstream(const Identity &id) const
  254. {
  255. if (isRoot(id))
  256. return true;
  257. std::vector< SharedPtr<Network> > nws(RR->node->allNetworks());
  258. for(std::vector< SharedPtr<Network> >::const_iterator nw(nws.begin());nw!=nws.end();++nw) {
  259. SharedPtr<NetworkConfig> nc((*nw)->config2());
  260. if (nc) {
  261. for(std::vector< std::pair<Address,InetAddress> >::const_iterator r(nc->relays().begin());r!=nc->relays().end();++r) {
  262. if (r->first == id.address())
  263. return true;
  264. }
  265. }
  266. }
  267. return false;
  268. }
  269. bool Topology::worldUpdateIfValid(const World &newWorld)
  270. {
  271. Mutex::Lock _l(_lock);
  272. if (_world.shouldBeReplacedBy(newWorld,true)) {
  273. _setWorld(newWorld);
  274. try {
  275. Buffer<ZT_WORLD_MAX_SERIALIZED_LENGTH> dswtmp;
  276. newWorld.serialize(dswtmp,false);
  277. RR->node->dataStorePut("world",dswtmp.data(),dswtmp.size(),false);
  278. } catch ( ... ) {
  279. RR->node->dataStoreDelete("world");
  280. }
  281. return true;
  282. }
  283. return false;
  284. }
  285. void Topology::clean(uint64_t now)
  286. {
  287. Mutex::Lock _l(_lock);
  288. Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
  289. Address *a = (Address *)0;
  290. SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
  291. while (i.next(a,p)) {
  292. if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end())) {
  293. _peers.erase(*a);
  294. } else {
  295. (*p)->clean(RR,now);
  296. }
  297. }
  298. }
  299. Identity Topology::_getIdentity(const Address &zta)
  300. {
  301. char p[128];
  302. Utils::snprintf(p,sizeof(p),"iddb.d/%.10llx",(unsigned long long)zta.toInt());
  303. std::string ids(RR->node->dataStoreGet(p));
  304. if (ids.length() > 0) {
  305. try {
  306. return Identity(ids);
  307. } catch ( ... ) {} // ignore invalid IDs
  308. }
  309. return Identity();
  310. }
  311. void Topology::_setWorld(const World &newWorld)
  312. {
  313. // assumed _lock is locked (or in constructor)
  314. _world = newWorld;
  315. _amRoot = false;
  316. _rootAddresses.clear();
  317. _rootPeers.clear();
  318. for(std::vector<World::Root>::const_iterator r(_world.roots().begin());r!=_world.roots().end();++r) {
  319. _rootAddresses.push_back(r->identity.address());
  320. if (r->identity.address() == RR->identity.address()) {
  321. _amRoot = true;
  322. } else {
  323. SharedPtr<Peer> *rp = _peers.get(r->identity.address());
  324. if (rp) {
  325. _rootPeers.push_back(*rp);
  326. } else {
  327. SharedPtr<Peer> newrp(new Peer(RR->identity,r->identity));
  328. _peers.set(r->identity.address(),newrp);
  329. _rootPeers.push_back(newrp);
  330. }
  331. }
  332. }
  333. }
  334. } // namespace ZeroTier