Peer.cpp 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2019 ZeroTier, Inc. https://www.zerotier.com/
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * You can be released from the requirements of the license by purchasing
  21. * a commercial license. Buying such a license is mandatory as soon as you
  22. * develop commercial closed-source software that incorporates or links
  23. * directly against ZeroTier software without disclosing the source code
  24. * of your own application.
  25. */
  26. #include "../version.h"
  27. #include "Constants.hpp"
  28. #include "Peer.hpp"
  29. #include "Node.hpp"
  30. #include "Switch.hpp"
  31. #include "Network.hpp"
  32. #include "SelfAwareness.hpp"
  33. #include "Packet.hpp"
  34. #include "Trace.hpp"
  35. #include "InetAddress.hpp"
  36. #include "RingBuffer.hpp"
  37. #include "Utils.hpp"
  38. #include "../include/ZeroTierDebug.h"
  39. namespace ZeroTier {
  40. static unsigned char s_freeRandomByteCounter = 0;
  41. Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Identity &peerIdentity) :
  42. RR(renv),
  43. _lastReceive(0),
  44. _lastNontrivialReceive(0),
  45. _lastTriedMemorizedPath(0),
  46. _lastDirectPathPushSent(0),
  47. _lastDirectPathPushReceive(0),
  48. _lastCredentialRequestSent(0),
  49. _lastWhoisRequestReceived(0),
  50. _lastEchoRequestReceived(0),
  51. _lastCredentialsReceived(0),
  52. _lastTrustEstablishedPacketReceived(0),
  53. _lastSentFullHello(0),
  54. _lastACKWindowReset(0),
  55. _lastQoSWindowReset(0),
  56. _lastMultipathCompatibilityCheck(0),
  57. _freeRandomByte((unsigned char)((uintptr_t)this >> 4) ^ ++s_freeRandomByteCounter),
  58. _uniqueAlivePathCount(0),
  59. _localMultipathSupported(false),
  60. _remoteMultipathSupported(false),
  61. _canUseMultipath(false),
  62. _vProto(0),
  63. _vMajor(0),
  64. _vMinor(0),
  65. _vRevision(0),
  66. _id(peerIdentity),
  67. _directPathPushCutoffCount(0),
  68. _credentialsCutoffCount(0),
  69. _linkIsBalanced(false),
  70. _linkIsRedundant(false),
  71. _remotePeerMultipathEnabled(false),
  72. _lastAggregateStatsReport(0),
  73. _lastAggregateAllocation(0),
  74. _virtualPathCount(0),
  75. _roundRobinPathAssignmentIdx(0),
  76. _pathAssignmentIdx(0)
  77. {
  78. if (!myIdentity.agree(peerIdentity,_key,ZT_PEER_SECRET_KEY_LENGTH))
  79. throw ZT_EXCEPTION_INVALID_ARGUMENT;
  80. }
  81. void Peer::received(
  82. void *tPtr,
  83. const SharedPtr<Path> &path,
  84. const unsigned int hops,
  85. const uint64_t packetId,
  86. const unsigned int payloadLength,
  87. const Packet::Verb verb,
  88. const uint64_t inRePacketId,
  89. const Packet::Verb inReVerb,
  90. const bool trustEstablished,
  91. const uint64_t networkId)
  92. {
  93. const int64_t now = RR->node->now();
  94. _lastReceive = now;
  95. switch (verb) {
  96. case Packet::VERB_FRAME:
  97. case Packet::VERB_EXT_FRAME:
  98. case Packet::VERB_NETWORK_CONFIG_REQUEST:
  99. case Packet::VERB_NETWORK_CONFIG:
  100. case Packet::VERB_MULTICAST_FRAME:
  101. _lastNontrivialReceive = now;
  102. break;
  103. default:
  104. break;
  105. }
  106. if (trustEstablished) {
  107. _lastTrustEstablishedPacketReceived = now;
  108. path->trustedPacketReceived(now);
  109. }
  110. {
  111. Mutex::Lock _l(_paths_m);
  112. recordIncomingPacket(tPtr, path, packetId, payloadLength, verb, now);
  113. if (_canUseMultipath) {
  114. if (path->needsToSendQoS(now)) {
  115. sendQOS_MEASUREMENT(tPtr, path, path->localSocket(), path->address(), now);
  116. }
  117. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  118. if (_paths[i].p) {
  119. _paths[i].p->processBackgroundPathMeasurements(now);
  120. }
  121. }
  122. }
  123. }
  124. if (hops == 0) {
  125. // If this is a direct packet (no hops), update existing paths or learn new ones
  126. bool havePath = false;
  127. {
  128. Mutex::Lock _l(_paths_m);
  129. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  130. if (_paths[i].p) {
  131. if (_paths[i].p == path) {
  132. _paths[i].lr = now;
  133. havePath = true;
  134. break;
  135. }
  136. } else break;
  137. }
  138. }
  139. bool attemptToContact = false;
  140. if ((!havePath)&&(RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id.address(),path->localSocket(),path->address()))) {
  141. Mutex::Lock _l(_paths_m);
  142. // Paths are redundant if they duplicate an alive path to the same IP or
  143. // with the same local socket and address family.
  144. bool redundant = false;
  145. unsigned int replacePath = ZT_MAX_PEER_NETWORK_PATHS;
  146. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  147. if (_paths[i].p) {
  148. if ( (_paths[i].p->alive(now)) && ( ((_paths[i].p->localSocket() == path->localSocket())&&(_paths[i].p->address().ss_family == path->address().ss_family)) || (_paths[i].p->address().ipsEqual2(path->address())) ) ) {
  149. redundant = true;
  150. break;
  151. }
  152. // If the path is the same address and port, simply assume this is a replacement
  153. if ( (_paths[i].p->address().ipsEqual2(path->address()))) {
  154. replacePath = i;
  155. break;
  156. }
  157. } else break;
  158. }
  159. // If the path isn't a duplicate of the same localSocket AND we haven't already determined a replacePath,
  160. // then find the worst path and replace it.
  161. if (!redundant && replacePath == ZT_MAX_PEER_NETWORK_PATHS) {
  162. int replacePathQuality = 0;
  163. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  164. if (_paths[i].p) {
  165. const int q = _paths[i].p->quality(now);
  166. if (q > replacePathQuality) {
  167. replacePathQuality = q;
  168. replacePath = i;
  169. }
  170. } else {
  171. replacePath = i;
  172. break;
  173. }
  174. }
  175. }
  176. if (replacePath != ZT_MAX_PEER_NETWORK_PATHS) {
  177. if (verb == Packet::VERB_OK) {
  178. RR->t->peerLearnedNewPath(tPtr,networkId,*this,path,packetId);
  179. _paths[replacePath].lr = now;
  180. _paths[replacePath].p = path;
  181. _paths[replacePath].priority = 1;
  182. } else {
  183. attemptToContact = true;
  184. }
  185. // Every time we learn of new path, rebuild set of virtual paths
  186. constructSetOfVirtualPaths();
  187. }
  188. }
  189. if (attemptToContact) {
  190. attemptToContactAt(tPtr,path->localSocket(),path->address(),now,true);
  191. path->sent(now);
  192. RR->t->peerConfirmingUnknownPath(tPtr,networkId,*this,path,packetId,verb);
  193. }
  194. }
  195. // If we have a trust relationship periodically push a message enumerating
  196. // all known external addresses for ourselves. If we already have a path this
  197. // is done less frequently.
  198. if (this->trustEstablished(now)) {
  199. const int64_t sinceLastPush = now - _lastDirectPathPushSent;
  200. if (sinceLastPush >= ((hops == 0) ? ZT_DIRECT_PATH_PUSH_INTERVAL_HAVEPATH : ZT_DIRECT_PATH_PUSH_INTERVAL)
  201. || (_localMultipathSupported && (sinceLastPush >= (ZT_DIRECT_PATH_PUSH_INTERVAL_MULTIPATH)))) {
  202. _lastDirectPathPushSent = now;
  203. std::vector<InetAddress> pathsToPush(RR->node->directPaths());
  204. if (pathsToPush.size() > 0) {
  205. std::vector<InetAddress>::const_iterator p(pathsToPush.begin());
  206. while (p != pathsToPush.end()) {
  207. Packet *const outp = new Packet(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS);
  208. outp->addSize(2); // leave room for count
  209. unsigned int count = 0;
  210. while ((p != pathsToPush.end())&&((outp->size() + 24) < 1200)) {
  211. uint8_t addressType = 4;
  212. switch(p->ss_family) {
  213. case AF_INET:
  214. break;
  215. case AF_INET6:
  216. addressType = 6;
  217. break;
  218. default: // we currently only push IP addresses
  219. ++p;
  220. continue;
  221. }
  222. outp->append((uint8_t)0); // no flags
  223. outp->append((uint16_t)0); // no extensions
  224. outp->append(addressType);
  225. outp->append((uint8_t)((addressType == 4) ? 6 : 18));
  226. outp->append(p->rawIpData(),((addressType == 4) ? 4 : 16));
  227. outp->append((uint16_t)p->port());
  228. ++count;
  229. ++p;
  230. }
  231. if (count) {
  232. outp->setAt(ZT_PACKET_IDX_PAYLOAD,(uint16_t)count);
  233. outp->compress();
  234. outp->armor(_key,true);
  235. path->send(RR,tPtr,outp->data(),outp->size(),now);
  236. }
  237. delete outp;
  238. }
  239. }
  240. }
  241. }
  242. }
  243. void Peer::constructSetOfVirtualPaths()
  244. {
  245. if (!_remoteMultipathSupported) {
  246. return;
  247. }
  248. Mutex::Lock _l(_virtual_paths_m);
  249. int64_t now = RR->node->now();
  250. _virtualPathCount = 0;
  251. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  252. if (_paths[i].p && _paths[i].p->alive(now)) {
  253. for(unsigned int j=0;j<ZT_MAX_PEER_NETWORK_PATHS;++j) {
  254. if (_paths[j].p && _paths[j].p->alive(now)) {
  255. int64_t localSocket = _paths[j].p->localSocket();
  256. bool foundVirtualPath = false;
  257. for (int k=0; k<_virtualPaths.size(); k++) {
  258. if (_virtualPaths[k]->localSocket == localSocket && _virtualPaths[k]->p == _paths[i].p) {
  259. foundVirtualPath = true;
  260. }
  261. }
  262. if (!foundVirtualPath)
  263. {
  264. VirtualPath *np = new VirtualPath;
  265. np->p = _paths[i].p;
  266. np->localSocket = localSocket;
  267. _virtualPaths.push_back(np);
  268. }
  269. }
  270. }
  271. }
  272. }
  273. }
  274. void Peer::recordOutgoingPacket(const SharedPtr<Path> &path, const uint64_t packetId,
  275. uint16_t payloadLength, const Packet::Verb verb, int64_t now)
  276. {
  277. _freeRandomByte += (unsigned char)(packetId >> 8); // grab entropy to use in path selection logic for multipath
  278. if (_canUseMultipath) {
  279. path->recordOutgoingPacket(now, packetId, payloadLength, verb);
  280. }
  281. }
  282. void Peer::recordIncomingPacket(void *tPtr, const SharedPtr<Path> &path, const uint64_t packetId,
  283. uint16_t payloadLength, const Packet::Verb verb, int64_t now)
  284. {
  285. if (_canUseMultipath) {
  286. if (path->needsToSendAck(now)) {
  287. sendACK(tPtr, path, path->localSocket(), path->address(), now);
  288. }
  289. path->recordIncomingPacket(now, packetId, payloadLength, verb);
  290. }
  291. }
  292. void Peer::computeAggregateAllocation(int64_t now)
  293. {
  294. float maxStability = 0;
  295. float totalRelativeQuality = 0;
  296. float maxThroughput = 1;
  297. float maxScope = 0;
  298. float relStability[ZT_MAX_PEER_NETWORK_PATHS];
  299. float relThroughput[ZT_MAX_PEER_NETWORK_PATHS];
  300. memset(&relStability, 0, sizeof(relStability));
  301. memset(&relThroughput, 0, sizeof(relThroughput));
  302. // Survey all paths
  303. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  304. if (_paths[i].p) {
  305. relStability[i] = _paths[i].p->lastComputedStability();
  306. relThroughput[i] = (float)_paths[i].p->maxLifetimeThroughput();
  307. maxStability = relStability[i] > maxStability ? relStability[i] : maxStability;
  308. maxThroughput = relThroughput[i] > maxThroughput ? relThroughput[i] : maxThroughput;
  309. maxScope = _paths[i].p->ipScope() > maxScope ? _paths[i].p->ipScope() : maxScope;
  310. }
  311. }
  312. // Convert to relative values
  313. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  314. if (_paths[i].p) {
  315. relStability[i] /= maxStability ? maxStability : 1;
  316. relThroughput[i] /= maxThroughput ? maxThroughput : 1;
  317. float normalized_ma = Utils::normalize((float)_paths[i].p->ackAge(now), 0, ZT_PATH_MAX_AGE, 0, 10);
  318. float age_contrib = exp((-1)*normalized_ma);
  319. float relScope = ((float)(_paths[i].p->ipScope()+1) / (maxScope + 1));
  320. float relQuality =
  321. (relStability[i] * (float)ZT_PATH_CONTRIB_STABILITY)
  322. + (fmaxf(1.0f, relThroughput[i]) * (float)ZT_PATH_CONTRIB_THROUGHPUT)
  323. + relScope * (float)ZT_PATH_CONTRIB_SCOPE;
  324. relQuality *= age_contrib;
  325. // Clamp values
  326. relQuality = relQuality > (1.00f / 100.0f) ? relQuality : 0.0f;
  327. relQuality = relQuality < (99.0f / 100.0f) ? relQuality : 1.0f;
  328. totalRelativeQuality += relQuality;
  329. _paths[i].p->updateRelativeQuality(relQuality);
  330. }
  331. }
  332. // Convert set of relative performances into an allocation set
  333. for(uint16_t i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  334. if (_paths[i].p) {
  335. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_RANDOM) {
  336. _paths[i].p->updateComponentAllocationOfAggregateLink(((float)_pathChoiceHist.countValue(i) / (float)_pathChoiceHist.count()) * 255);
  337. }
  338. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_DYNAMIC_OPAQUE) {
  339. _paths[i].p->updateComponentAllocationOfAggregateLink((unsigned char)((_paths[i].p->relativeQuality() / totalRelativeQuality) * 255));
  340. }
  341. }
  342. }
  343. }
  344. int Peer::computeAggregateLinkPacketDelayVariance()
  345. {
  346. float pdv = 0.0;
  347. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  348. if (_paths[i].p) {
  349. pdv += _paths[i].p->relativeQuality() * _paths[i].p->packetDelayVariance();
  350. }
  351. }
  352. return (int)pdv;
  353. }
  354. int Peer::computeAggregateLinkMeanLatency()
  355. {
  356. int ml = 0;
  357. int pathCount = 0;
  358. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  359. if (_paths[i].p) {
  360. pathCount++;
  361. ml += (int)(_paths[i].p->relativeQuality() * _paths[i].p->meanLatency());
  362. }
  363. }
  364. return ml / pathCount;
  365. }
  366. int Peer::aggregateLinkPhysicalPathCount()
  367. {
  368. std::map<std::string, bool> ifnamemap;
  369. int pathCount = 0;
  370. int64_t now = RR->node->now();
  371. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  372. if (_paths[i].p && _paths[i].p->alive(now)) {
  373. if (!ifnamemap[_paths[i].p->getName()]) {
  374. ifnamemap[_paths[i].p->getName()] = true;
  375. pathCount++;
  376. }
  377. }
  378. }
  379. return pathCount;
  380. }
  381. int Peer::aggregateLinkLogicalPathCount()
  382. {
  383. int pathCount = 0;
  384. int64_t now = RR->node->now();
  385. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  386. if (_paths[i].p && _paths[i].p->alive(now)) {
  387. pathCount++;
  388. }
  389. }
  390. return pathCount;
  391. }
  392. std::vector<SharedPtr<Path> > Peer::getAllPaths(int64_t now)
  393. {
  394. Mutex::Lock _l(_virtual_paths_m); // FIXME: TX can now lock RX
  395. std::vector<SharedPtr<Path> > paths;
  396. for (int i=0; i<_virtualPaths.size(); i++) {
  397. if (_virtualPaths[i]->p) {
  398. paths.push_back(_virtualPaths[i]->p);
  399. }
  400. }
  401. return paths;
  402. }
  403. SharedPtr<Path> Peer::getAppropriatePath(int64_t now, bool includeExpired, int64_t flowId)
  404. {
  405. Mutex::Lock _l(_paths_m);
  406. SharedPtr<Path> selectedPath;
  407. char curPathStr[128];
  408. char newPathStr[128];
  409. unsigned int bestPath = ZT_MAX_PEER_NETWORK_PATHS;
  410. /**
  411. * Send traffic across the highest quality path only. This algorithm will still
  412. * use the old path quality metric from protocol version 9.
  413. */
  414. if (!_canUseMultipath) {
  415. long bestPathQuality = 2147483647;
  416. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  417. if (_paths[i].p) {
  418. if ((includeExpired)||((now - _paths[i].lr) < ZT_PEER_PATH_EXPIRATION)) {
  419. const long q = _paths[i].p->quality(now) / _paths[i].priority;
  420. if (q <= bestPathQuality) {
  421. bestPathQuality = q;
  422. bestPath = i;
  423. }
  424. }
  425. } else break;
  426. }
  427. if (bestPath != ZT_MAX_PEER_NETWORK_PATHS) {
  428. return _paths[bestPath].p;
  429. }
  430. return SharedPtr<Path>();
  431. }
  432. // Update path measurements
  433. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  434. if (_paths[i].p) {
  435. _paths[i].p->processBackgroundPathMeasurements(now);
  436. }
  437. }
  438. if (RR->sw->isFlowAware()) {
  439. // Detect new flows and update existing records
  440. if (_flows.count(flowId)) {
  441. _flows[flowId]->lastSend = now;
  442. }
  443. else {
  444. fprintf(stderr, "new flow %llx detected between this node and %llx (%lu active flow(s))\n",
  445. flowId, this->_id.address().toInt(), (_flows.size()+1));
  446. struct Flow *newFlow = new Flow(flowId, now);
  447. _flows[flowId] = newFlow;
  448. newFlow->assignedPath = nullptr;
  449. }
  450. }
  451. // Construct set of virtual paths if needed
  452. if (!_virtualPaths.size()) {
  453. constructSetOfVirtualPaths();
  454. }
  455. if (!_virtualPaths.size()) {
  456. fprintf(stderr, "no paths to send packet out on\n");
  457. return SharedPtr<Path>();
  458. }
  459. /**
  460. * Traffic is randomly distributed among all active paths.
  461. */
  462. int numAlivePaths = 0;
  463. int numStalePaths = 0;
  464. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_RANDOM) {
  465. int sz = _virtualPaths.size();
  466. if (sz) {
  467. int idx = _freeRandomByte % sz;
  468. _pathChoiceHist.push(idx);
  469. char pathStr[128];
  470. _virtualPaths[idx]->p->address().toString(pathStr);
  471. fprintf(stderr, "sending out: (%llx), idx=%d: path=%s, localSocket=%lld\n",
  472. this->_id.address().toInt(), idx, pathStr, _virtualPaths[idx]->localSocket);
  473. return _virtualPaths[idx]->p;
  474. }
  475. // This call is algorithmically inert but gives us a value to show in the status output
  476. computeAggregateAllocation(now);
  477. }
  478. /**
  479. * All traffic is sent on all paths.
  480. */
  481. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BROADCAST) {
  482. // Not handled here. Handled in Switch::_trySend()
  483. }
  484. /**
  485. * Only one link is active. Fail-over is immediate.
  486. */
  487. if (RR->node->getMultipathMode() == ZT_MULTIPATH_ACTIVE_BACKUP) {
  488. bool bFoundHotPath = false;
  489. if (!_activeBackupPath) {
  490. /* Select the fist path that appears to still be active.
  491. * This will eventually be user-configurable */
  492. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; i++) {
  493. if (_paths[i].p) {
  494. if (_activeBackupPath.ptr() == _paths[i].p.ptr()) {
  495. continue;
  496. }
  497. _activeBackupPath = _paths[i].p;
  498. if ((now - _paths[i].p->lastIn()) < ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD) {
  499. bFoundHotPath = true;
  500. _activeBackupPath = _paths[i].p;
  501. _pathAssignmentIdx = i;
  502. _activeBackupPath->address().toString(curPathStr);
  503. fprintf(stderr, "selected %s as the primary active-backup path to %llx (idx=%d)\n",
  504. curPathStr, this->_id.address().toInt(), _pathAssignmentIdx);
  505. break;
  506. }
  507. }
  508. }
  509. }
  510. else {
  511. char what[128];
  512. if ((now - _activeBackupPath->lastIn()) > ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD) {
  513. _activeBackupPath->address().toString(curPathStr); // Record path string for later debug trace
  514. int16_t previousIdx = _pathAssignmentIdx;
  515. SharedPtr<Path> nextAlternativePath;
  516. // Search for a hot path, at the same time find the next path in
  517. // a RR sequence that seems viable to use as an alternative
  518. int searchCount = 0;
  519. while (searchCount < ZT_MAX_PEER_NETWORK_PATHS) {
  520. _pathAssignmentIdx++;
  521. if (_pathAssignmentIdx == ZT_MAX_PEER_NETWORK_PATHS) {
  522. _pathAssignmentIdx = 0;
  523. }
  524. searchCount++;
  525. if (_paths[_pathAssignmentIdx].p) {
  526. _paths[_pathAssignmentIdx].p->address().toString(what);
  527. if (_activeBackupPath.ptr() == _paths[_pathAssignmentIdx].p.ptr()) {
  528. continue;
  529. }
  530. if (!nextAlternativePath) { // Record the first viable alternative in the RR sequence
  531. nextAlternativePath = _paths[_pathAssignmentIdx].p;
  532. }
  533. if ((now - _paths[_pathAssignmentIdx].p->lastIn()) < ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD) {
  534. bFoundHotPath = true;
  535. _activeBackupPath = _paths[_pathAssignmentIdx].p;
  536. _activeBackupPath->address().toString(newPathStr);
  537. fprintf(stderr, "primary active-backup path %s to %llx appears to be dead, switched to %s\n",
  538. curPathStr, this->_id.address().toInt(), newPathStr);
  539. break;
  540. }
  541. }
  542. }
  543. if (!bFoundHotPath) {
  544. if (nextAlternativePath) {
  545. _activeBackupPath = nextAlternativePath;
  546. _activeBackupPath->address().toString(curPathStr);
  547. //fprintf(stderr, "no hot paths found to use as active-backup primary to %llx, using next best: %s\n",
  548. // this->_id.address().toInt(), curPathStr);
  549. }
  550. else {
  551. // No change
  552. }
  553. }
  554. }
  555. }
  556. if (!_activeBackupPath) {
  557. return SharedPtr<Path>();
  558. }
  559. return _activeBackupPath;
  560. }
  561. /**
  562. * Packets are striped across all available paths.
  563. */
  564. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_RR_OPAQUE) {
  565. // fprintf(stderr, "ZT_MULTIPATH_BALANCE_RR_OPAQUE\n");
  566. int16_t previousIdx = _roundRobinPathAssignmentIdx;
  567. if (_roundRobinPathAssignmentIdx < (_virtualPaths.size()-1)) {
  568. _roundRobinPathAssignmentIdx++;
  569. }
  570. else {
  571. _roundRobinPathAssignmentIdx = 0;
  572. }
  573. selectedPath = _virtualPaths[previousIdx]->p;
  574. char pathStr[128];
  575. selectedPath->address().toString(pathStr);
  576. fprintf(stderr, "sending packet out on path %s at index %d\n",
  577. pathStr, previousIdx);
  578. return selectedPath;
  579. }
  580. /**
  581. * Flows are striped across all available paths.
  582. */
  583. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_RR_FLOW) {
  584. // fprintf(stderr, "ZT_MULTIPATH_BALANCE_RR_FLOW\n");
  585. }
  586. /**
  587. * Flows are hashed across all available paths.
  588. */
  589. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_XOR_FLOW) {
  590. // fprintf(stderr, "ZT_MULTIPATH_BALANCE_XOR_FLOW (%llx) \n", flowId);
  591. struct Flow *currFlow = NULL;
  592. if (_flows.count(flowId)) {
  593. currFlow = _flows[flowId];
  594. if (!currFlow->assignedPath) {
  595. int idx = abs((int)(currFlow->flowId % (_virtualPaths.size()-1)));
  596. currFlow->assignedPath = _virtualPaths[idx];
  597. _virtualPaths[idx]->p->address().toString(curPathStr);
  598. fprintf(stderr, "assigning flow %llx between this node and peer %llx to path %s at index %d\n",
  599. currFlow->flowId, this->_id.address().toInt(), curPathStr, idx);
  600. }
  601. else {
  602. if (!currFlow->assignedPath->p->alive(now)) {
  603. currFlow->assignedPath->p->address().toString(curPathStr);
  604. // Re-assign
  605. int idx = abs((int)(currFlow->flowId % (_virtualPaths.size()-1)));
  606. currFlow->assignedPath = _virtualPaths[idx];
  607. _virtualPaths[idx]->p->address().toString(newPathStr);
  608. fprintf(stderr, "path %s assigned to flow %llx between this node and %llx appears to be dead, reassigning to path %s\n",
  609. curPathStr, currFlow->flowId, this->_id.address().toInt(), newPathStr);
  610. }
  611. }
  612. return currFlow->assignedPath->p;
  613. }
  614. }
  615. /**
  616. * Proportionally allocate traffic according to dynamic path quality measurements.
  617. */
  618. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_DYNAMIC_OPAQUE) {
  619. if ((now - _lastAggregateAllocation) >= ZT_PATH_QUALITY_COMPUTE_INTERVAL) {
  620. _lastAggregateAllocation = now;
  621. computeAggregateAllocation(now);
  622. }
  623. // Randomly choose path according to their allocations
  624. float rf = _freeRandomByte;
  625. for(int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  626. if (_paths[i].p) {
  627. if (rf < _paths[i].p->allocation()) {
  628. bestPath = i;
  629. _pathChoiceHist.push(bestPath); // Record which path we chose
  630. break;
  631. }
  632. rf -= _paths[i].p->allocation();
  633. }
  634. }
  635. if (bestPath < ZT_MAX_PEER_NETWORK_PATHS) {
  636. return _paths[bestPath].p;
  637. }
  638. }
  639. /**
  640. * Flows are dynamically allocated across paths in proportion to link strength and load.
  641. */
  642. if (RR->node->getMultipathMode() == ZT_MULTIPATH_BALANCE_DYNAMIC_FLOW) {
  643. }
  644. return SharedPtr<Path>();
  645. }
  646. char *Peer::interfaceListStr()
  647. {
  648. std::map<std::string, int> ifnamemap;
  649. char tmp[32];
  650. const int64_t now = RR->node->now();
  651. char *ptr = _interfaceListStr;
  652. bool imbalanced = false;
  653. memset(_interfaceListStr, 0, sizeof(_interfaceListStr));
  654. int alivePathCount = aggregateLinkLogicalPathCount();
  655. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  656. if (_paths[i].p && _paths[i].p->alive(now)) {
  657. int ipv = _paths[i].p->address().isV4();
  658. // If this is acting as an aggregate link, check allocations
  659. float targetAllocation = 1.0f / (float)alivePathCount;
  660. float currentAllocation = 1.0f;
  661. if (alivePathCount > 1) {
  662. currentAllocation = (float)_pathChoiceHist.countValue(i) / (float)_pathChoiceHist.count();
  663. if (fabs(targetAllocation - currentAllocation) > ZT_PATH_IMBALANCE_THRESHOLD) {
  664. imbalanced = true;
  665. }
  666. }
  667. char *ipvStr = ipv ? (char*)"ipv4" : (char*)"ipv6";
  668. sprintf(tmp, "(%s, %s, %.3f)", _paths[i].p->getName(), ipvStr, currentAllocation);
  669. // Prevent duplicates
  670. if(ifnamemap[_paths[i].p->getName()] != ipv) {
  671. memcpy(ptr, tmp, strlen(tmp));
  672. ptr += strlen(tmp);
  673. *ptr = ' ';
  674. ptr++;
  675. ifnamemap[_paths[i].p->getName()] = ipv;
  676. }
  677. }
  678. }
  679. ptr--; // Overwrite trailing space
  680. if (imbalanced) {
  681. sprintf(tmp, ", is asymmetrical");
  682. memcpy(ptr, tmp, sizeof(tmp));
  683. } else {
  684. *ptr = '\0';
  685. }
  686. return _interfaceListStr;
  687. }
  688. void Peer::introduce(void *const tPtr,const int64_t now,const SharedPtr<Peer> &other) const
  689. {
  690. unsigned int myBestV4ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  691. unsigned int myBestV6ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  692. long myBestV4QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  693. long myBestV6QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  694. unsigned int theirBestV4ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  695. unsigned int theirBestV6ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  696. long theirBestV4QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  697. long theirBestV6QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
  698. for(int i=0;i<=ZT_INETADDRESS_MAX_SCOPE;++i) {
  699. myBestV4ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
  700. myBestV6ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
  701. myBestV4QualityByScope[i] = 2147483647;
  702. myBestV6QualityByScope[i] = 2147483647;
  703. theirBestV4ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
  704. theirBestV6ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
  705. theirBestV4QualityByScope[i] = 2147483647;
  706. theirBestV6QualityByScope[i] = 2147483647;
  707. }
  708. Mutex::Lock _l1(_paths_m);
  709. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  710. if (_paths[i].p) {
  711. const long q = _paths[i].p->quality(now) / _paths[i].priority;
  712. const unsigned int s = (unsigned int)_paths[i].p->ipScope();
  713. switch(_paths[i].p->address().ss_family) {
  714. case AF_INET:
  715. if (q <= myBestV4QualityByScope[s]) {
  716. myBestV4QualityByScope[s] = q;
  717. myBestV4ByScope[s] = i;
  718. }
  719. break;
  720. case AF_INET6:
  721. if (q <= myBestV6QualityByScope[s]) {
  722. myBestV6QualityByScope[s] = q;
  723. myBestV6ByScope[s] = i;
  724. }
  725. break;
  726. }
  727. } else break;
  728. }
  729. Mutex::Lock _l2(other->_paths_m);
  730. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  731. if (other->_paths[i].p) {
  732. const long q = other->_paths[i].p->quality(now) / other->_paths[i].priority;
  733. const unsigned int s = (unsigned int)other->_paths[i].p->ipScope();
  734. switch(other->_paths[i].p->address().ss_family) {
  735. case AF_INET:
  736. if (q <= theirBestV4QualityByScope[s]) {
  737. theirBestV4QualityByScope[s] = q;
  738. theirBestV4ByScope[s] = i;
  739. }
  740. break;
  741. case AF_INET6:
  742. if (q <= theirBestV6QualityByScope[s]) {
  743. theirBestV6QualityByScope[s] = q;
  744. theirBestV6ByScope[s] = i;
  745. }
  746. break;
  747. }
  748. } else break;
  749. }
  750. unsigned int mine = ZT_MAX_PEER_NETWORK_PATHS;
  751. unsigned int theirs = ZT_MAX_PEER_NETWORK_PATHS;
  752. for(int s=ZT_INETADDRESS_MAX_SCOPE;s>=0;--s) {
  753. if ((myBestV6ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)&&(theirBestV6ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)) {
  754. mine = myBestV6ByScope[s];
  755. theirs = theirBestV6ByScope[s];
  756. break;
  757. }
  758. if ((myBestV4ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)&&(theirBestV4ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)) {
  759. mine = myBestV4ByScope[s];
  760. theirs = theirBestV4ByScope[s];
  761. break;
  762. }
  763. }
  764. if (mine != ZT_MAX_PEER_NETWORK_PATHS) {
  765. unsigned int alt = (unsigned int)RR->node->prng() & 1; // randomize which hint we send first for black magickal NAT-t reasons
  766. const unsigned int completed = alt + 2;
  767. while (alt != completed) {
  768. if ((alt & 1) == 0) {
  769. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_RENDEZVOUS);
  770. outp.append((uint8_t)0);
  771. other->_id.address().appendTo(outp);
  772. outp.append((uint16_t)other->_paths[theirs].p->address().port());
  773. if (other->_paths[theirs].p->address().ss_family == AF_INET6) {
  774. outp.append((uint8_t)16);
  775. outp.append(other->_paths[theirs].p->address().rawIpData(),16);
  776. } else {
  777. outp.append((uint8_t)4);
  778. outp.append(other->_paths[theirs].p->address().rawIpData(),4);
  779. }
  780. outp.armor(_key,true);
  781. _paths[mine].p->send(RR,tPtr,outp.data(),outp.size(),now);
  782. } else {
  783. Packet outp(other->_id.address(),RR->identity.address(),Packet::VERB_RENDEZVOUS);
  784. outp.append((uint8_t)0);
  785. _id.address().appendTo(outp);
  786. outp.append((uint16_t)_paths[mine].p->address().port());
  787. if (_paths[mine].p->address().ss_family == AF_INET6) {
  788. outp.append((uint8_t)16);
  789. outp.append(_paths[mine].p->address().rawIpData(),16);
  790. } else {
  791. outp.append((uint8_t)4);
  792. outp.append(_paths[mine].p->address().rawIpData(),4);
  793. }
  794. outp.armor(other->_key,true);
  795. other->_paths[theirs].p->send(RR,tPtr,outp.data(),outp.size(),now);
  796. }
  797. ++alt;
  798. }
  799. }
  800. }
  801. inline void Peer::processBackgroundPeerTasks(const int64_t now)
  802. {
  803. // Determine current multipath compatibility with other peer
  804. if ((now - _lastMultipathCompatibilityCheck) >= ZT_PATH_QUALITY_COMPUTE_INTERVAL) {
  805. //
  806. // Cache number of available paths so that we can short-circuit multipath logic elsewhere
  807. //
  808. // We also take notice of duplicate paths (same IP only) because we may have
  809. // recently received a direct path push from a peer and our list might contain
  810. // a dead path which hasn't been fully recognized as such. In this case we
  811. // don't want the duplicate to trigger execution of multipath code prematurely.
  812. //
  813. // This is done to support the behavior of auto multipath enable/disable
  814. // without user intervention.
  815. //
  816. int currAlivePathCount = 0;
  817. int duplicatePathsFound = 0;
  818. for (unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  819. if (_paths[i].p) {
  820. currAlivePathCount++;
  821. for (unsigned int j=0;j<ZT_MAX_PEER_NETWORK_PATHS;++j) {
  822. if (_paths[i].p && _paths[j].p && _paths[i].p->address().ipsEqual2(_paths[j].p->address()) && i != j) {
  823. duplicatePathsFound+=1;
  824. break;
  825. }
  826. }
  827. }
  828. }
  829. _uniqueAlivePathCount = (currAlivePathCount - (duplicatePathsFound / 2));
  830. _lastMultipathCompatibilityCheck = now;
  831. _localMultipathSupported = ((RR->node->getMultipathMode() != ZT_MULTIPATH_NONE) && (ZT_PROTO_VERSION > 9));
  832. _remoteMultipathSupported = _vProto > 9;
  833. // If both peers support multipath and more than one path exist, we can use multipath logic
  834. _canUseMultipath = _localMultipathSupported && _remoteMultipathSupported && (_uniqueAlivePathCount > 1);
  835. }
  836. // Remove old flows
  837. if (RR->sw->isFlowAware()) {
  838. std::map<int64_t, struct Flow *>::iterator it = _flows.begin();
  839. while (it != _flows.end()) {
  840. if ((now - it->second->lastSend) > ZT_MULTIPATH_FLOW_EXPIRATION) {
  841. fprintf(stderr, "forgetting flow %llx between this node and %llx (%lu active flow(s))\n",
  842. it->first, this->_id.address().toInt(), _flows.size());
  843. it = _flows.erase(it);
  844. } else {
  845. it++;
  846. }
  847. }
  848. }
  849. }
  850. void Peer::sendACK(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
  851. {
  852. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_ACK);
  853. uint32_t bytesToAck = path->bytesToAck();
  854. outp.append<uint32_t>(bytesToAck);
  855. if (atAddress) {
  856. outp.armor(_key,false);
  857. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  858. } else {
  859. RR->sw->send(tPtr,outp,false);
  860. }
  861. path->sentAck(now);
  862. }
  863. void Peer::sendQOS_MEASUREMENT(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
  864. {
  865. const int64_t _now = RR->node->now();
  866. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_QOS_MEASUREMENT);
  867. char qosData[ZT_PATH_MAX_QOS_PACKET_SZ];
  868. int16_t len = path->generateQoSPacket(_now,qosData);
  869. outp.append(qosData,len);
  870. if (atAddress) {
  871. outp.armor(_key,false);
  872. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  873. } else {
  874. RR->sw->send(tPtr,outp,false);
  875. }
  876. path->sentQoS(now);
  877. }
  878. void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
  879. {
  880. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_HELLO);
  881. outp.append((unsigned char)ZT_PROTO_VERSION);
  882. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MAJOR);
  883. outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
  884. outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  885. outp.append(now);
  886. RR->identity.serialize(outp,false);
  887. atAddress.serialize(outp);
  888. outp.append((uint64_t)RR->topology->planetWorldId());
  889. outp.append((uint64_t)RR->topology->planetWorldTimestamp());
  890. const unsigned int startCryptedPortionAt = outp.size();
  891. std::vector<World> moons(RR->topology->moons());
  892. std::vector<uint64_t> moonsWanted(RR->topology->moonsWanted());
  893. outp.append((uint16_t)(moons.size() + moonsWanted.size()));
  894. for(std::vector<World>::const_iterator m(moons.begin());m!=moons.end();++m) {
  895. outp.append((uint8_t)m->type());
  896. outp.append((uint64_t)m->id());
  897. outp.append((uint64_t)m->timestamp());
  898. }
  899. for(std::vector<uint64_t>::const_iterator m(moonsWanted.begin());m!=moonsWanted.end();++m) {
  900. outp.append((uint8_t)World::TYPE_MOON);
  901. outp.append(*m);
  902. outp.append((uint64_t)0);
  903. }
  904. outp.cryptField(_key,startCryptedPortionAt,outp.size() - startCryptedPortionAt);
  905. RR->node->expectReplyTo(outp.packetId());
  906. if (atAddress) {
  907. outp.armor(_key,false); // false == don't encrypt full payload, but add MAC
  908. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  909. } else {
  910. RR->sw->send(tPtr,outp,false); // false == don't encrypt full payload, but add MAC
  911. }
  912. }
  913. void Peer::attemptToContactAt(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now,bool sendFullHello)
  914. {
  915. if ( (!sendFullHello) && (_vProto >= 5) && (!((_vMajor == 1)&&(_vMinor == 1)&&(_vRevision == 0))) ) {
  916. Packet outp(_id.address(),RR->identity.address(),Packet::VERB_ECHO);
  917. RR->node->expectReplyTo(outp.packetId());
  918. outp.armor(_key,true);
  919. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  920. } else {
  921. sendHELLO(tPtr,localSocket,atAddress,now);
  922. }
  923. }
  924. void Peer::tryMemorizedPath(void *tPtr,int64_t now)
  925. {
  926. if ((now - _lastTriedMemorizedPath) >= ZT_TRY_MEMORIZED_PATH_INTERVAL) {
  927. _lastTriedMemorizedPath = now;
  928. InetAddress mp;
  929. if (RR->node->externalPathLookup(tPtr,_id.address(),-1,mp))
  930. attemptToContactAt(tPtr,-1,mp,now,true);
  931. }
  932. }
  933. unsigned int Peer::doPingAndKeepalive(void *tPtr,int64_t now)
  934. {
  935. unsigned int sent = 0;
  936. Mutex::Lock _l(_paths_m);
  937. processBackgroundPeerTasks(now);
  938. // Emit traces regarding aggregate link status
  939. if (_canUseMultipath) {
  940. int alivePathCount = aggregateLinkPhysicalPathCount();
  941. if ((now - _lastAggregateStatsReport) > ZT_PATH_AGGREGATE_STATS_REPORT_INTERVAL) {
  942. _lastAggregateStatsReport = now;
  943. if (alivePathCount) {
  944. RR->t->peerLinkAggregateStatistics(NULL,*this);
  945. }
  946. } if (alivePathCount < 2 && _linkIsRedundant) {
  947. _linkIsRedundant = !_linkIsRedundant;
  948. RR->t->peerLinkNoLongerAggregate(NULL,*this);
  949. } if (alivePathCount > 1 && !_linkIsRedundant) {
  950. _linkIsRedundant = !_linkIsRedundant;
  951. RR->t->peerLinkNoLongerAggregate(NULL,*this);
  952. }
  953. }
  954. // Right now we only keep pinging links that have the maximum priority. The
  955. // priority is used to track cluster redirections, meaning that when a cluster
  956. // redirects us its redirect target links override all other links and we
  957. // let those old links expire.
  958. long maxPriority = 0;
  959. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  960. if (_paths[i].p)
  961. maxPriority = std::max(_paths[i].priority,maxPriority);
  962. else break;
  963. }
  964. const bool sendFullHello = ((now - _lastSentFullHello) >= ZT_PEER_PING_PERIOD);
  965. _lastSentFullHello = now;
  966. unsigned int j = 0;
  967. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  968. if (_paths[i].p) {
  969. // Clean expired and reduced priority paths
  970. if ( ((now - _paths[i].lr) < ZT_PEER_PATH_EXPIRATION) && (_paths[i].priority == maxPriority) ) {
  971. if ((sendFullHello)||(_paths[i].p->needsHeartbeat(now))) {
  972. attemptToContactAt(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now,sendFullHello);
  973. _paths[i].p->sent(now);
  974. sent |= (_paths[i].p->address().ss_family == AF_INET) ? 0x1 : 0x2;
  975. }
  976. if (i != j)
  977. _paths[j] = _paths[i];
  978. ++j;
  979. }
  980. } else break;
  981. }
  982. if (canUseMultipath()) {
  983. while(j < ZT_MAX_PEER_NETWORK_PATHS) {
  984. _paths[j].lr = 0;
  985. _paths[j].p.zero();
  986. _paths[j].priority = 1;
  987. ++j;
  988. }
  989. }
  990. return sent;
  991. }
  992. void Peer::clusterRedirect(void *tPtr,const SharedPtr<Path> &originatingPath,const InetAddress &remoteAddress,const int64_t now)
  993. {
  994. SharedPtr<Path> np(RR->topology->getPath(originatingPath->localSocket(),remoteAddress));
  995. RR->t->peerRedirected(tPtr,0,*this,np);
  996. attemptToContactAt(tPtr,originatingPath->localSocket(),remoteAddress,now,true);
  997. {
  998. Mutex::Lock _l(_paths_m);
  999. // New priority is higher than the priority of the originating path (if known)
  1000. long newPriority = 1;
  1001. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  1002. if (_paths[i].p) {
  1003. if (_paths[i].p == originatingPath) {
  1004. newPriority = _paths[i].priority;
  1005. break;
  1006. }
  1007. } else break;
  1008. }
  1009. newPriority += 2;
  1010. // Erase any paths with lower priority than this one or that are duplicate
  1011. // IPs and add this path.
  1012. unsigned int j = 0;
  1013. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  1014. if (_paths[i].p) {
  1015. if ((_paths[i].priority >= newPriority)&&(!_paths[i].p->address().ipsEqual2(remoteAddress))) {
  1016. if (i != j)
  1017. _paths[j] = _paths[i];
  1018. ++j;
  1019. }
  1020. }
  1021. }
  1022. if (j < ZT_MAX_PEER_NETWORK_PATHS) {
  1023. _paths[j].lr = now;
  1024. _paths[j].p = np;
  1025. _paths[j].priority = newPriority;
  1026. ++j;
  1027. while (j < ZT_MAX_PEER_NETWORK_PATHS) {
  1028. _paths[j].lr = 0;
  1029. _paths[j].p.zero();
  1030. _paths[j].priority = 1;
  1031. ++j;
  1032. }
  1033. }
  1034. }
  1035. }
  1036. void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now)
  1037. {
  1038. Mutex::Lock _l(_paths_m);
  1039. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  1040. if (_paths[i].p) {
  1041. if ((_paths[i].p->address().ss_family == inetAddressFamily)&&(_paths[i].p->ipScope() == scope)) {
  1042. attemptToContactAt(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now,false);
  1043. _paths[i].p->sent(now);
  1044. _paths[i].lr = 0; // path will not be used unless it speaks again
  1045. }
  1046. } else break;
  1047. }
  1048. }
  1049. } // namespace ZeroTier