Bond.cpp 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include <cmath>
  14. #include "Peer.hpp"
  15. #include "Bond.hpp"
  16. #include "Switch.hpp"
  17. #include "Flow.hpp"
  18. #include "Path.hpp"
  19. namespace ZeroTier {
  20. Bond::Bond(const RuntimeEnvironment *renv, int policy, const SharedPtr<Peer>& peer) :
  21. RR(renv),
  22. _peer(peer)
  23. {
  24. setReasonableDefaults(policy);
  25. _policyAlias = BondController::getPolicyStrByCode(policy);
  26. }
  27. Bond::Bond(const RuntimeEnvironment *renv, std::string& basePolicy, std::string& policyAlias, const SharedPtr<Peer>& peer) :
  28. RR(renv),
  29. _policyAlias(policyAlias),
  30. _peer(peer)
  31. {
  32. setReasonableDefaults(BondController::getPolicyCodeByStr(basePolicy));
  33. }
  34. Bond::Bond(const RuntimeEnvironment *renv, const Bond &originalBond, const SharedPtr<Peer>& peer) :
  35. RR(renv),
  36. _peer(peer)
  37. {
  38. // First, set everything to sane defaults
  39. setReasonableDefaults(originalBond._bondingPolicy);
  40. _policyAlias = originalBond._policyAlias;
  41. // Second, apply user specified values (only if they make sense)
  42. _downDelay = originalBond._downDelay;
  43. _upDelay = originalBond._upDelay;
  44. if (originalBond._bondMonitorInterval > 0 && originalBond._bondMonitorInterval < 65535) {
  45. _bondMonitorInterval = originalBond._bondMonitorInterval;
  46. }
  47. else {
  48. fprintf(stderr, "warning: bondMonitorInterval (%d) is out of range, using default (%d)\n", originalBond._bondMonitorInterval, _bondMonitorInterval);
  49. }
  50. if (originalBond._slaveMonitorStrategy == ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_PASSIVE
  51. && originalBond._failoverInterval != 0) {
  52. fprintf(stderr, "warning: passive path monitoring was specified, this will prevent failovers from happening in a timely manner.\n");
  53. }
  54. _abSlaveSelectMethod = originalBond._abSlaveSelectMethod;
  55. memcpy(_qualityWeights, originalBond._qualityWeights, ZT_QOS_WEIGHT_SIZE * sizeof(float));
  56. }
  57. void Bond::nominatePath(const SharedPtr<Path>& path, int64_t now)
  58. {
  59. char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "nominatePath: %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  60. Mutex::Lock _l(_paths_m);
  61. if (!RR->bc->slaveAllowed(_policyAlias, getSlave(path))) {
  62. return;
  63. }
  64. bool alreadyPresent = false;
  65. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  66. if (path.ptr() == _paths[i].ptr()) {
  67. fprintf(stderr, "previously encountered path, not notifying bond (%s)\n", pathStr);
  68. alreadyPresent = true;
  69. break;
  70. }
  71. }
  72. if (!alreadyPresent) {
  73. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  74. if (!_paths[i]) {
  75. fprintf(stderr, "notifyOfNewPath(): Setting path %s to idx=%d\n", pathStr, i);
  76. _paths[i] = path;
  77. //_paths[i]->slave = RR->bc->getSlaveBySocket(_policyAlias, path->localSocket());
  78. _paths[i]->startTrial(now);
  79. break;
  80. }
  81. }
  82. }
  83. curateBond(now, true);
  84. estimatePathQuality(now);
  85. }
  86. SharedPtr<Path> Bond::getAppropriatePath(int64_t now, int32_t flowId)
  87. {
  88. Mutex::Lock _l(_paths_m);
  89. /**
  90. * active-backup
  91. */
  92. if (_bondingPolicy== ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  93. if (_abPath) {
  94. return _abPath;
  95. }
  96. }
  97. /**
  98. * broadcast
  99. */
  100. if (_bondingPolicy== ZT_BONDING_POLICY_BROADCAST) {
  101. return SharedPtr<Path>(); // Handled in Switch::_trySend()
  102. }
  103. if (!_numBondedPaths) {
  104. return SharedPtr<Path>(); // No paths assigned to bond yet, cannot balance traffic
  105. }
  106. /**
  107. * balance-rr
  108. */
  109. if (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_RR) {
  110. if (!_allowFlowHashing) {
  111. //fprintf(stderr, "_rrPacketsSentOnCurrSlave=%d, _numBondedPaths=%d, _rrIdx=%d\n", _rrPacketsSentOnCurrSlave, _numBondedPaths, _rrIdx);
  112. if (_packetsPerSlave == 0) {
  113. // Randomly select a path
  114. return _paths[_bondedIdx[_freeRandomByte % _numBondedPaths]]; // TODO: Optimize
  115. }
  116. if (_rrPacketsSentOnCurrSlave < _packetsPerSlave) {
  117. // Continue to use this slave
  118. ++_rrPacketsSentOnCurrSlave;
  119. return _paths[_bondedIdx[_rrIdx]];
  120. }
  121. // Reset striping counter
  122. _rrPacketsSentOnCurrSlave = 0;
  123. if (_numBondedPaths == 1) {
  124. _rrIdx = 0;
  125. }
  126. else {
  127. int _tempIdx = _rrIdx;
  128. for (int searchCount = 0; searchCount < (_numBondedPaths-1); searchCount++) {
  129. _tempIdx = (_tempIdx == (_numBondedPaths-1)) ? 0 : _tempIdx+1;
  130. if (_bondedIdx[_tempIdx] != ZT_MAX_PEER_NETWORK_PATHS) {
  131. if (_paths[_bondedIdx[_tempIdx]] && _paths[_bondedIdx[_tempIdx]]->eligible(now,_ackSendInterval)) {
  132. _rrIdx = _tempIdx;
  133. break;
  134. }
  135. }
  136. }
  137. }
  138. if (_paths[_bondedIdx[_rrIdx]]) {
  139. return _paths[_bondedIdx[_rrIdx]];
  140. }
  141. }
  142. }
  143. /**
  144. * balance-xor
  145. */
  146. if (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_XOR || _bondingPolicy== ZT_BONDING_POLICY_BALANCE_AWARE) {
  147. if (!_allowFlowHashing || flowId == -1) {
  148. // No specific path required for unclassified traffic, send on anything
  149. return _paths[_bondedIdx[_freeRandomByte % _numBondedPaths]]; // TODO: Optimize
  150. }
  151. else if (_allowFlowHashing) {
  152. // TODO: Optimize
  153. Mutex::Lock _l(_flows_m);
  154. SharedPtr<Flow> flow;
  155. if (_flows.count(flowId)) {
  156. flow = _flows[flowId];
  157. flow->updateActivity(now);
  158. }
  159. else {
  160. unsigned char entropy;
  161. Utils::getSecureRandom(&entropy, 1);
  162. flow = createFlow(SharedPtr<Path>(), flowId, entropy, now);
  163. }
  164. if (flow) {
  165. return flow->assignedPath();
  166. }
  167. }
  168. }
  169. return SharedPtr<Path>();
  170. }
  171. void Bond::recordIncomingInvalidPacket(const SharedPtr<Path>& path)
  172. {
  173. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "recordIncomingInvalidPacket() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  174. Mutex::Lock _l(_paths_m);
  175. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  176. if (_paths[i] == path) {
  177. _paths[i]->packetValiditySamples.push(false);
  178. }
  179. }
  180. }
  181. void Bond::recordOutgoingPacket(const SharedPtr<Path> &path, const uint64_t packetId,
  182. uint16_t payloadLength, const Packet::Verb verb, const int32_t flowId, int64_t now)
  183. {
  184. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "recordOutgoingPacket() %s %s, packetId=%llx, payloadLength=%d, verb=%x, flowId=%lx\n", getSlave(path)->ifname().c_str(), pathStr, packetId, payloadLength, verb, flowId);
  185. _freeRandomByte += (unsigned char)(packetId >> 8); // Grab entropy to use in path selection logic
  186. if (!_shouldCollectPathStatistics) {
  187. return;
  188. }
  189. bool isFrame = (verb == Packet::VERB_FRAME || verb == Packet::VERB_EXT_FRAME);
  190. bool shouldRecord = (packetId & (ZT_QOS_ACK_DIVISOR - 1)
  191. && (verb != Packet::VERB_ACK)
  192. && (verb != Packet::VERB_QOS_MEASUREMENT));
  193. if (isFrame || shouldRecord) {
  194. Mutex::Lock _l(_paths_m);
  195. if (isFrame) {
  196. ++(path->_packetsOut);
  197. _lastFrame=now;
  198. }
  199. if (shouldRecord) {
  200. path->_unackedBytes += payloadLength;
  201. // Take note that we're expecting a VERB_ACK on this path as of a specific time
  202. if (path->qosStatsOut.size() < ZT_QOS_MAX_OUTSTANDING_RECORDS) {
  203. path->qosStatsOut[packetId] = now;
  204. }
  205. }
  206. }
  207. if (_allowFlowHashing) {
  208. if (_allowFlowHashing && (flowId != ZT_QOS_NO_FLOW)) {
  209. Mutex::Lock _l(_flows_m);
  210. if (_flows.count(flowId)) {
  211. _flows[flowId]->recordOutgoingBytes(payloadLength);
  212. }
  213. }
  214. }
  215. }
  216. void Bond::recordIncomingPacket(const SharedPtr<Path>& path, uint64_t packetId, uint16_t payloadLength,
  217. Packet::Verb verb, int32_t flowId, int64_t now)
  218. {
  219. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "recordIncomingPacket() %s %s, packetId=%llx, payloadLength=%d, verb=%x, flowId=%lx\n", getSlave(path)->ifname().c_str(), pathStr, packetId, payloadLength, verb, flowId);
  220. bool isFrame = (verb == Packet::VERB_FRAME || verb == Packet::VERB_EXT_FRAME);
  221. bool shouldRecord = (packetId & (ZT_QOS_ACK_DIVISOR - 1)
  222. && (verb != Packet::VERB_ACK)
  223. && (verb != Packet::VERB_QOS_MEASUREMENT));
  224. if (isFrame || shouldRecord) {
  225. Mutex::Lock _l(_paths_m);
  226. if (isFrame) {
  227. ++(path->_packetsIn);
  228. _lastFrame=now;
  229. }
  230. if (shouldRecord) {
  231. path->ackStatsIn[packetId] = payloadLength;
  232. ++(path->_packetsReceivedSinceLastAck);
  233. path->qosStatsIn[packetId] = now;
  234. ++(path->_packetsReceivedSinceLastQoS);
  235. path->packetValiditySamples.push(true);
  236. }
  237. }
  238. /**
  239. * Learn new flows and pro-actively create entries for them in the bond so
  240. * that the next time we send a packet out that is part of a flow we know
  241. * which path to use.
  242. */
  243. if ((flowId != ZT_QOS_NO_FLOW)
  244. && (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_RR
  245. || _bondingPolicy== ZT_BONDING_POLICY_BALANCE_XOR
  246. || _bondingPolicy== ZT_BONDING_POLICY_BALANCE_AWARE)) {
  247. Mutex::Lock _l(_flows_m);
  248. SharedPtr<Flow> flow;
  249. if (!_flows.count(flowId)) {
  250. flow = createFlow(path, flowId, 0, now);
  251. } else {
  252. flow = _flows[flowId];
  253. }
  254. if (flow) {
  255. flow->recordIncomingBytes(payloadLength);
  256. }
  257. }
  258. }
  259. void Bond::receivedQoS(const SharedPtr<Path>& path, int64_t now, int count, uint64_t *rx_id, uint16_t *rx_ts)
  260. {
  261. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "receivedQoS() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  262. Mutex::Lock _l(_paths_m);
  263. // Look up egress times and compute latency values for each record
  264. std::map<uint64_t,uint64_t>::iterator it;
  265. for (int j=0; j<count; j++) {
  266. it = path->qosStatsOut.find(rx_id[j]);
  267. if (it != path->qosStatsOut.end()) {
  268. path->latencySamples.push(((uint16_t)(now - it->second) - rx_ts[j]) / 2);
  269. path->qosStatsOut.erase(it);
  270. }
  271. }
  272. path->qosRecordSize.push(count);
  273. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "receivedQoS() on path %s %s, count=%d, successful=%d, qosStatsOut.size()=%d\n", getSlave(path)->ifname().c_str(), pathStr, count, path->aknowledgedQoSRecordCountSinceLastCheck, path->qosStatsOut.size());
  274. }
  275. void Bond::receivedAck(const SharedPtr<Path>& path, int64_t now, int32_t ackedBytes)
  276. {
  277. Mutex::Lock _l(_paths_m);
  278. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "receivedAck() %s %s, (ackedBytes=%d, lastAckReceived=%lld, ackAge=%lld)\n", getSlave(path)->ifname().c_str(), pathStr, ackedBytes, path->lastAckReceived, path->ackAge(now));
  279. path->_lastAckReceived = now;
  280. path->_unackedBytes = (ackedBytes > path->_unackedBytes) ? 0 : path->_unackedBytes - ackedBytes;
  281. int64_t timeSinceThroughputEstimate = (now - path->_lastThroughputEstimation);
  282. if (timeSinceThroughputEstimate >= throughputMeasurementInterval) {
  283. // TODO: See if this floating point math can be reduced
  284. uint64_t throughput = (uint64_t)((float)(path->_bytesAckedSinceLastThroughputEstimation) / ((float)timeSinceThroughputEstimate / (float)1000));
  285. throughput /= 1000;
  286. if (throughput > 0.0) {
  287. path->throughputSamples.push(throughput);
  288. path->_throughputMax = throughput > path->_throughputMax ? throughput : path->_throughputMax;
  289. }
  290. path->_lastThroughputEstimation = now;
  291. path->_bytesAckedSinceLastThroughputEstimation = 0;
  292. } else {
  293. path->_bytesAckedSinceLastThroughputEstimation += ackedBytes;
  294. }
  295. }
  296. int32_t Bond::generateQoSPacket(const SharedPtr<Path>& path, int64_t now, char *qosBuffer)
  297. {
  298. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "generateQoSPacket() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  299. int32_t len = 0;
  300. std::map<uint64_t,uint64_t>::iterator it = path->qosStatsIn.begin();
  301. int i=0;
  302. int numRecords = std::min(path->_packetsReceivedSinceLastQoS,ZT_QOS_TABLE_SIZE);
  303. while (i<numRecords && it != path->qosStatsIn.end()) {
  304. uint64_t id = it->first;
  305. memcpy(qosBuffer, &id, sizeof(uint64_t));
  306. qosBuffer+=sizeof(uint64_t);
  307. uint16_t holdingTime = (uint16_t)(now - it->second);
  308. memcpy(qosBuffer, &holdingTime, sizeof(uint16_t));
  309. qosBuffer+=sizeof(uint16_t);
  310. len+=sizeof(uint64_t)+sizeof(uint16_t);
  311. path->qosStatsIn.erase(it++);
  312. ++i;
  313. }
  314. return len;
  315. }
  316. bool Bond::assignFlowToBondedPath(SharedPtr<Flow> &flow, int64_t now)
  317. {
  318. //fprintf(stderr, "assignFlowToBondedPath\n");
  319. char curPathStr[128];
  320. unsigned int idx = ZT_MAX_PEER_NETWORK_PATHS;
  321. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR) {
  322. idx = abs((int)(flow->id() % (_numBondedPaths)));
  323. flow->assignPath(_paths[_bondedIdx[idx]],now);
  324. }
  325. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  326. unsigned char entropy;
  327. Utils::getSecureRandom(&entropy, 1);
  328. if (_totalBondUnderload) {
  329. entropy %= _totalBondUnderload;
  330. }
  331. if (!_numBondedPaths) {
  332. fprintf(stderr, "no bonded paths for flow assignment\n");
  333. return false;
  334. }
  335. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  336. if (_paths[i] && _paths[i]->bonded()) {
  337. SharedPtr<Slave> slave = RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  338. _paths[i]->address().toString(curPathStr);
  339. uint8_t probabilitySegment = (_totalBondUnderload > 0) ? _paths[i]->_affinity : _paths[i]->_allocation;
  340. //fprintf(stderr, "i=%2d, entropy=%3d, alloc=%3d, byteload=%4d, segment=%3d, _totalBondUnderload=%3d, ifname=%s, path=%20s\n", i, entropy, _paths[i]->allocation, _paths[i]->relativeByteLoad, probabilitySegment, _totalBondUnderload, slave->ifname().c_str(), curPathStr);
  341. if (entropy <= probabilitySegment) {
  342. idx = i;
  343. //fprintf(stderr, "\t is best path\n");
  344. break;
  345. }
  346. entropy -= probabilitySegment;
  347. }
  348. }
  349. if (idx < ZT_MAX_PEER_NETWORK_PATHS) {
  350. flow->assignPath(_paths[idx],now);
  351. ++(_paths[idx]->_assignedFlowCount);
  352. }
  353. else {
  354. fprintf(stderr, "could not assign flow?\n"); exit(0); // TODO: Remove
  355. return false;
  356. }
  357. }
  358. flow->assignedPath()->address().toString(curPathStr);
  359. SharedPtr<Slave> slave = RR->bc->getSlaveBySocket(_policyAlias, flow->assignedPath()->localSocket());
  360. fprintf(stderr, "assigned (tx) flow %x with peer %llx to path %s on %s (idx=%d)\n", flow->id(), _peer->_id.address().toInt(), curPathStr, slave->ifname().c_str(), idx);
  361. return true;
  362. }
  363. SharedPtr<Flow> Bond::createFlow(const SharedPtr<Path> &path, int32_t flowId, unsigned char entropy, int64_t now)
  364. {
  365. //fprintf(stderr, "createFlow\n");
  366. char curPathStr[128];
  367. // ---
  368. if (!_numBondedPaths) {
  369. fprintf(stderr, "there are no bonded paths, cannot assign flow\n");
  370. return SharedPtr<Flow>();
  371. }
  372. if (_flows.size() >= ZT_FLOW_MAX_COUNT) {
  373. fprintf(stderr, "max number of flows reached (%d), forcibly forgetting oldest flow\n", ZT_FLOW_MAX_COUNT);
  374. forgetFlowsWhenNecessary(0,true,now);
  375. }
  376. SharedPtr<Flow> flow = new Flow(flowId, now);
  377. _flows[flowId] = flow;
  378. fprintf(stderr, "new flow %x detected with peer %llx, %lu active flow(s)\n", flowId, _peer->_id.address().toInt(), (_flows.size()));
  379. /**
  380. * Add a flow with a given Path already provided. This is the case when a packet
  381. * is received on a path but no flow exists, in this case we simply assign the path
  382. * that the remote peer chose for us.
  383. */
  384. if (path) {
  385. flow->assignPath(path,now);
  386. path->address().toString(curPathStr);
  387. SharedPtr<Slave> slave = RR->bc->getSlaveBySocket(_policyAlias, flow->assignedPath()->localSocket());
  388. fprintf(stderr, "assigned (rx) flow %x with peer %llx to path %s on %s\n", flow->id(), _peer->_id.address().toInt(), curPathStr, slave->ifname().c_str());
  389. }
  390. /**
  391. * Add a flow when no path was provided. This means that it is an outgoing packet
  392. * and that it is up to the local peer to decide how to load-balance its transmission.
  393. */
  394. else if (!path) {
  395. assignFlowToBondedPath(flow, now);
  396. }
  397. return flow;
  398. }
  399. void Bond::forgetFlowsWhenNecessary(uint64_t age, bool oldest, int64_t now)
  400. {
  401. //fprintf(stderr, "forgetFlowsWhenNecessary\n");
  402. std::map<int32_t,SharedPtr<Flow> >::iterator it = _flows.begin();
  403. std::map<int32_t,SharedPtr<Flow> >::iterator oldestFlow = _flows.end();
  404. SharedPtr<Flow> expiredFlow;
  405. if (age) { // Remove by specific age
  406. while (it != _flows.end()) {
  407. if (it->second->age(now) > age) {
  408. fprintf(stderr, "forgetting flow %x between this node and %llx, %lu active flow(s)\n", it->first, _peer->_id.address().toInt(), (_flows.size()-1));
  409. it = _flows.erase(it);
  410. } else {
  411. ++it;
  412. }
  413. }
  414. }
  415. else if (oldest) { // Remove single oldest by natural expiration
  416. uint64_t maxAge = 0;
  417. while (it != _flows.end()) {
  418. if (it->second->age(now) > maxAge) {
  419. maxAge = (now - it->second->age(now));
  420. oldestFlow = it;
  421. }
  422. ++it;
  423. }
  424. if (oldestFlow != _flows.end()) {
  425. fprintf(stderr, "forgetting oldest flow %x (of age %llu) between this node and %llx, %lu active flow(s)\n", oldestFlow->first, oldestFlow->second->age(now), _peer->_id.address().toInt(), (_flows.size()-1));
  426. _flows.erase(oldestFlow);
  427. }
  428. }
  429. fprintf(stderr, "000\n");
  430. }
  431. void Bond::processIncomingPathNegotiationRequest(uint64_t now, SharedPtr<Path> &path, int16_t remoteUtility)
  432. {
  433. //fprintf(stderr, "processIncomingPathNegotiationRequest\n");
  434. if (_abSlaveSelectMethod != ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  435. return;
  436. }
  437. Mutex::Lock _l(_paths_m);
  438. char pathStr[128];
  439. path->address().toString(pathStr);
  440. if (!_lastPathNegotiationCheck) {
  441. return;
  442. }
  443. SharedPtr<Slave> slave = RR->bc->getSlaveBySocket(_policyAlias, path->localSocket());
  444. if (remoteUtility > _localUtility) {
  445. fprintf(stderr, "peer suggests path, its utility (%d) is greater than ours (%d), we will switch to %s on %s (ls=%llx)\n", remoteUtility, _localUtility, pathStr, slave->ifname().c_str(), path->localSocket());
  446. negotiatedPath = path;
  447. }
  448. if (remoteUtility < _localUtility) {
  449. fprintf(stderr, "peer suggests path, its utility (%d) is less than ours (%d), we will NOT switch to %s on %s (ls=%llx)\n", remoteUtility, _localUtility, pathStr, slave->ifname().c_str(), path->localSocket());
  450. }
  451. if (remoteUtility == _localUtility) {
  452. fprintf(stderr, "peer suggest path, but utility is equal, picking choice made by peer with greater identity.\n");
  453. if (_peer->_id.address().toInt() > RR->node->identity().address().toInt()) {
  454. fprintf(stderr, "peer identity was greater, going with their choice of %s on %s (ls=%llx)\n", pathStr, slave->ifname().c_str(), path->localSocket());
  455. negotiatedPath = path;
  456. } else {
  457. fprintf(stderr, "our identity was greater, no change\n");
  458. }
  459. }
  460. }
  461. void Bond::pathNegotiationCheck(void *tPtr, const int64_t now)
  462. {
  463. //fprintf(stderr, "pathNegotiationCheck\n");
  464. char pathStr[128];
  465. int maxInPathIdx = ZT_MAX_PEER_NETWORK_PATHS;
  466. int maxOutPathIdx = ZT_MAX_PEER_NETWORK_PATHS;
  467. uint64_t maxInCount = 0;
  468. uint64_t maxOutCount = 0;
  469. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  470. if (!_paths[i]) {
  471. continue;
  472. }
  473. if (_paths[i]->_packetsIn > maxInCount) {
  474. maxInCount = _paths[i]->_packetsIn;
  475. maxInPathIdx = i;
  476. }
  477. if (_paths[i]->_packetsOut > maxOutCount) {
  478. maxOutCount = _paths[i]->_packetsOut;
  479. maxOutPathIdx = i;
  480. }
  481. _paths[i]->resetPacketCounts();
  482. }
  483. bool _peerLinksSynchronized = ((maxInPathIdx != ZT_MAX_PEER_NETWORK_PATHS)
  484. && (maxOutPathIdx != ZT_MAX_PEER_NETWORK_PATHS)
  485. && (maxInPathIdx != maxOutPathIdx)) ? false : true;
  486. /**
  487. * Determine utility and attempt to petition remote peer to switch to our chosen path
  488. */
  489. if (!_peerLinksSynchronized) {
  490. _localUtility = _paths[maxOutPathIdx]->_failoverScore - _paths[maxInPathIdx]->_failoverScore;
  491. if (_paths[maxOutPathIdx]->_negotiated) {
  492. _localUtility -= ZT_MULTIPATH_FAILOVER_HANDICAP_NEGOTIATED;
  493. }
  494. if ((now - _lastSentPathNegotiationRequest) > ZT_PATH_NEGOTIATION_CUTOFF_TIME) {
  495. fprintf(stderr, "BT: (sync) it's been long enough, sending more requests.\n");
  496. _numSentPathNegotiationRequests = 0;
  497. }
  498. if (_numSentPathNegotiationRequests < ZT_PATH_NEGOTIATION_TRY_COUNT) {
  499. if (_localUtility >= 0) {
  500. fprintf(stderr, "BT: (sync) paths appear to be out of sync (utility=%d)\n", _localUtility);
  501. sendPATH_NEGOTIATION_REQUEST(tPtr, _paths[maxOutPathIdx]);
  502. ++_numSentPathNegotiationRequests;
  503. _lastSentPathNegotiationRequest = now;
  504. _paths[maxOutPathIdx]->address().toString(pathStr);
  505. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[maxOutPathIdx]->localSocket());
  506. fprintf(stderr, "sending request to use %s on %s, ls=%llx, utility=%d\n", pathStr, slave->ifname().c_str(), _paths[maxOutPathIdx]->localSocket(), _localUtility);
  507. }
  508. }
  509. /**
  510. * Give up negotiating and consider switching
  511. */
  512. else if ((now - _lastSentPathNegotiationRequest) > (2 * ZT_PATH_NEGOTIATION_CHECK_INTERVAL)) {
  513. if (_localUtility == 0) {
  514. // There's no loss to us, just switch without sending a another request
  515. fprintf(stderr, "BT: (sync) giving up, switching to remote peer's path.\n");
  516. negotiatedPath = _paths[maxInPathIdx];
  517. }
  518. }
  519. }
  520. }
  521. void Bond::sendPATH_NEGOTIATION_REQUEST(void *tPtr, const SharedPtr<Path> &path)
  522. {
  523. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "sendPATH_NEGOTIATION_REQUEST() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  524. if (_abSlaveSelectMethod != ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  525. return;
  526. }
  527. Packet outp(_peer->_id.address(),RR->identity.address(),Packet::VERB_PATH_NEGOTIATION_REQUEST);
  528. outp.append<int16_t>(_localUtility);
  529. if (path->address()) {
  530. outp.armor(_peer->key(),false);
  531. RR->node->putPacket(tPtr,path->localSocket(),path->address(),outp.data(),outp.size());
  532. }
  533. }
  534. void Bond::sendACK(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,
  535. const InetAddress &atAddress,int64_t now)
  536. {
  537. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "sendACK() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  538. Packet outp(_peer->_id.address(),RR->identity.address(),Packet::VERB_ACK);
  539. int32_t bytesToAck = 0;
  540. std::map<uint64_t,uint16_t>::iterator it = path->ackStatsIn.begin();
  541. while (it != path->ackStatsIn.end()) {
  542. bytesToAck += it->second;
  543. ++it;
  544. }
  545. outp.append<uint32_t>(bytesToAck);
  546. if (atAddress) {
  547. outp.armor(_peer->key(),false);
  548. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  549. } else {
  550. RR->sw->send(tPtr,outp,false);
  551. }
  552. path->ackStatsIn.clear();
  553. path->_packetsReceivedSinceLastAck = 0;
  554. path->_lastAckSent = now;
  555. }
  556. void Bond::sendQOS_MEASUREMENT(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,
  557. const InetAddress &atAddress,int64_t now)
  558. {
  559. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "sendQOS() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  560. const int64_t _now = RR->node->now();
  561. Packet outp(_peer->_id.address(),RR->identity.address(),Packet::VERB_QOS_MEASUREMENT);
  562. char qosData[ZT_QOS_MAX_PACKET_SIZE];
  563. int16_t len = generateQoSPacket(path, _now,qosData);
  564. outp.append(qosData,len);
  565. if (atAddress) {
  566. outp.armor(_peer->key(),false);
  567. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  568. } else {
  569. RR->sw->send(tPtr,outp,false);
  570. }
  571. // Account for the fact that a VERB_QOS_MEASUREMENT was just sent. Reset timers.
  572. path->_packetsReceivedSinceLastQoS = 0;
  573. path->_lastQoSMeasurement = now;
  574. }
  575. void Bond::processBackgroundTasks(void *tPtr, const int64_t now)
  576. {
  577. Mutex::Lock _l(_paths_m);
  578. if (!_peer->_canUseMultipath || (now - _lastBackgroundTaskCheck) < ZT_BOND_BACKGROUND_TASK_MIN_INTERVAL) {
  579. return;
  580. }
  581. _lastBackgroundTaskCheck = now;
  582. // Compute dynamic path monitor timer interval
  583. if (_slaveMonitorStrategy == ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC) {
  584. int suggestedMonitorInterval = (now - _lastFrame) / 100;
  585. _dynamicPathMonitorInterval = std::min(ZT_PATH_HEARTBEAT_PERIOD, ((suggestedMonitorInterval > _bondMonitorInterval) ? suggestedMonitorInterval : _bondMonitorInterval));
  586. //fprintf(stderr, "_lastFrame=%llu, suggestedMonitorInterval=%d, _dynamicPathMonitorInterval=%d\n",
  587. // (now-_lastFrame), suggestedMonitorInterval, _dynamicPathMonitorInterval);
  588. }
  589. if (_slaveMonitorStrategy == ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC) {
  590. _shouldCollectPathStatistics = true;
  591. }
  592. // Memoize oft-used properties in the packet ingress/egress logic path
  593. if (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_AWARE) {
  594. // Required for real-time balancing
  595. _shouldCollectPathStatistics = true;
  596. }
  597. if (_bondingPolicy== ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  598. if (_abSlaveSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_BETTER) {
  599. // Required for judging suitability of primary slave after recovery
  600. _shouldCollectPathStatistics = true;
  601. }
  602. if (_abSlaveSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  603. // Required for judging suitability of new candidate primary
  604. _shouldCollectPathStatistics = true;
  605. }
  606. }
  607. if ((now - _lastCheckUserPreferences) > 1000) {
  608. _lastCheckUserPreferences = now;
  609. applyUserPrefs();
  610. }
  611. curateBond(now,false);
  612. if ((now - _lastQualityEstimation) > _qualityEstimationInterval) {
  613. _lastQualityEstimation = now;
  614. estimatePathQuality(now);
  615. }
  616. dumpInfo(now);
  617. // Send QOS/ACK packets as needed
  618. if (_shouldCollectPathStatistics) {
  619. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  620. if (_paths[i] && _paths[i]->allowed()) {
  621. if (_paths[i]->needsToSendQoS(now,_qosSendInterval)) {
  622. sendQOS_MEASUREMENT(tPtr, _paths[i], _paths[i]->localSocket(), _paths[i]->address(), now);
  623. }
  624. if (_paths[i]->needsToSendAck(now,_ackSendInterval)) {
  625. sendACK(tPtr, _paths[i], _paths[i]->localSocket(), _paths[i]->address(), now);
  626. }
  627. }
  628. }
  629. }
  630. // Perform periodic background tasks unique to each bonding policy
  631. switch (_bondingPolicy)
  632. {
  633. case ZT_BONDING_POLICY_ACTIVE_BACKUP:
  634. processActiveBackupTasks(now);
  635. break;
  636. case ZT_BONDING_POLICY_BROADCAST:
  637. break;
  638. case ZT_BONDING_POLICY_BALANCE_RR:
  639. case ZT_BONDING_POLICY_BALANCE_XOR:
  640. case ZT_BONDING_POLICY_BALANCE_AWARE:
  641. processBalanceTasks(now);
  642. break;
  643. default:
  644. break;
  645. }
  646. // Check whether or not a path negotiation needs to be performed
  647. if (((now - _lastPathNegotiationCheck) > ZT_PATH_NEGOTIATION_CHECK_INTERVAL) && _allowPathNegotiation) {
  648. _lastPathNegotiationCheck = now;
  649. pathNegotiationCheck(tPtr, now);
  650. }
  651. }
  652. void Bond::applyUserPrefs()
  653. {
  654. fprintf(stderr, "applyUserPrefs, _minReqPathMonitorInterval=%d\n", RR->bc->minReqPathMonitorInterval());
  655. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  656. if (!_paths[i]) {
  657. continue;
  658. }
  659. SharedPtr<Slave> sl = getSlave(_paths[i]);
  660. if (sl) {
  661. if (sl->monitorInterval() == 0) { // If no interval was specified for this slave, use more generic bond-wide interval
  662. sl->setMonitorInterval(_bondMonitorInterval);
  663. }
  664. RR->bc->setMinReqPathMonitorInterval((sl->monitorInterval() < RR->bc->minReqPathMonitorInterval()) ? sl->monitorInterval() : RR->bc->minReqPathMonitorInterval());
  665. bool bFoundCommonSlave = false;
  666. SharedPtr<Slave> commonSlave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  667. for(unsigned int j=0;j<ZT_MAX_PEER_NETWORK_PATHS;++j) {
  668. if (_paths[j] && _paths[j].ptr() != _paths[i].ptr()) {
  669. if (RR->bc->getSlaveBySocket(_policyAlias, _paths[j]->localSocket()) == commonSlave) {
  670. bFoundCommonSlave = true;
  671. }
  672. }
  673. }
  674. _paths[i]->_monitorInterval = sl->monitorInterval();
  675. _paths[i]->_upDelay = sl->upDelay() ? sl->upDelay() : _upDelay;
  676. _paths[i]->_downDelay = sl->downDelay() ? sl->downDelay() : _downDelay;
  677. _paths[i]->_ipvPref = sl->ipvPref();
  678. _paths[i]->_mode = sl->mode();
  679. _paths[i]->_enabled = sl->enabled();
  680. _paths[i]->_onlyPathOnSlave = !bFoundCommonSlave;
  681. }
  682. }
  683. if (_peer) {
  684. _peer->_shouldCollectPathStatistics = _shouldCollectPathStatistics;
  685. _peer->_bondingPolicy = _bondingPolicy;
  686. }
  687. }
  688. void Bond::curateBond(const int64_t now, bool rebuildBond)
  689. {
  690. //fprintf(stderr, "%lu curateBond (rebuildBond=%d)\n", ((now - RR->bc->getBondStartTime())), rebuildBond);
  691. char pathStr[128];
  692. /**
  693. * Update path states
  694. */
  695. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  696. if (!_paths[i]) {
  697. continue;
  698. }
  699. bool currEligibility = _paths[i]->eligible(now,_ackSendInterval);
  700. if (currEligibility != _paths[i]->_lastEligibilityState) {
  701. _paths[i]->address().toString(pathStr);
  702. //fprintf(stderr, "\n\n%ld path eligibility (for %s, %s) has changed (from %d to %d)\n", (RR->node->now() - RR->bc->getBondStartTime()), getSlave(_paths[i])->ifname().c_str(), pathStr, _paths[i]->lastCheckedEligibility, _paths[i]->eligible(now,_ackSendInterval));
  703. if (currEligibility) {
  704. rebuildBond = true;
  705. }
  706. if (!currEligibility) {
  707. _paths[i]->adjustRefractoryPeriod(now, _defaultPathRefractoryPeriod, !currEligibility);
  708. if (_paths[i]->bonded()) {
  709. //fprintf(stderr, "the path was bonded, reallocation of its flows will occur soon\n");
  710. rebuildBond = true;
  711. _paths[i]->_shouldReallocateFlows = _paths[i]->bonded();
  712. _paths[i]->setBonded(false);
  713. } else {
  714. //fprintf(stderr, "the path was not bonded, no consequences\n");
  715. }
  716. }
  717. }
  718. if (currEligibility) {
  719. _paths[i]->adjustRefractoryPeriod(now, _defaultPathRefractoryPeriod, false);
  720. }
  721. _paths[i]->_lastEligibilityState = currEligibility;
  722. }
  723. /**
  724. * Curate the set of paths that are part of the bond proper. Selects a single path
  725. * per logical slave according to eligibility and user-specified constraints.
  726. */
  727. if ((_bondingPolicy== ZT_BONDING_POLICY_BALANCE_RR)
  728. || (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_XOR)
  729. || (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_AWARE)) {
  730. if (!_numBondedPaths) {
  731. rebuildBond = true;
  732. }
  733. // TODO: Optimize
  734. if (rebuildBond) {
  735. int updatedBondedPathCount = 0;
  736. std::map<SharedPtr<Slave>,int> slaveMap;
  737. for (int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  738. if (_paths[i] && _paths[i]->allowed() && (_paths[i]->eligible(now,_ackSendInterval) || !_numBondedPaths)) {
  739. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  740. if (!slaveMap.count(slave)) {
  741. slaveMap[slave] = i;
  742. }
  743. else {
  744. bool overriden = false;
  745. _paths[i]->address().toString(pathStr);
  746. //fprintf(stderr, " slave representative path already exists! (%s %s)\n", getSlave(_paths[i])->ifname().c_str(), pathStr);
  747. if (_paths[i]->preferred() && !_paths[slaveMap[slave]]->preferred()) {
  748. // Override previous choice if preferred
  749. //fprintf(stderr, "overriding since its preferred!\n");
  750. if (_paths[slaveMap[slave]]->_assignedFlowCount) {
  751. _paths[slaveMap[slave]]->_deprecated = true;
  752. }
  753. else {
  754. _paths[slaveMap[slave]]->_deprecated = true;
  755. _paths[slaveMap[slave]]->setBonded(false);
  756. }
  757. slaveMap[slave] = i;
  758. overriden = true;
  759. }
  760. if ((_paths[i]->preferred() && _paths[slaveMap[slave]]->preferred())
  761. || (!_paths[i]->preferred() && !_paths[slaveMap[slave]]->preferred())) {
  762. if (_paths[i]->preferenceRank() > _paths[slaveMap[slave]]->preferenceRank()) {
  763. // Override if higher preference
  764. //fprintf(stderr, "overriding according to preference preferenceRank!\n");
  765. if (_paths[slaveMap[slave]]->_assignedFlowCount) {
  766. _paths[slaveMap[slave]]->_deprecated = true;
  767. }
  768. else {
  769. _paths[slaveMap[slave]]->_deprecated = true;
  770. _paths[slaveMap[slave]]->setBonded(false);
  771. }
  772. slaveMap[slave] = i;
  773. }
  774. }
  775. }
  776. }
  777. }
  778. std::map<SharedPtr<Slave>,int>::iterator it = slaveMap.begin();
  779. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  780. if (!_paths[i]) {
  781. continue;
  782. }
  783. _bondedIdx[i] = ZT_MAX_PEER_NETWORK_PATHS;
  784. if (it != slaveMap.end()) {
  785. _bondedIdx[i] = it->second;
  786. _paths[_bondedIdx[i]]->setBonded(true);
  787. ++it;
  788. ++updatedBondedPathCount;
  789. _paths[_bondedIdx[i]]->address().toString(pathStr);
  790. fprintf(stderr, "setting i=%d, _bondedIdx[%d]=%d to bonded (%s %s)\n", i, i, _bondedIdx[i], getSlave(_paths[_bondedIdx[i]])->ifname().c_str(), pathStr);
  791. }
  792. }
  793. _numBondedPaths = updatedBondedPathCount;
  794. if (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_RR) {
  795. // Cause a RR reset since the currently used index might no longer be valid
  796. _rrPacketsSentOnCurrSlave = _packetsPerSlave;
  797. }
  798. }
  799. }
  800. }
  801. void Bond::estimatePathQuality(const int64_t now)
  802. {
  803. char pathStr[128];
  804. //---
  805. uint32_t totUserSpecifiedSlaveSpeed = 0;
  806. if (_numBondedPaths) { // Compute relative user-specified speeds of slaves
  807. for(unsigned int i=0;i<_numBondedPaths;++i) {
  808. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  809. if (_paths[i] && _paths[i]->allowed()) {
  810. totUserSpecifiedSlaveSpeed += slave->speed();
  811. }
  812. }
  813. for(unsigned int i=0;i<_numBondedPaths;++i) {
  814. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  815. if (_paths[i] && _paths[i]->allowed()) {
  816. slave->setRelativeSpeed(round( ((float)slave->speed() / (float)totUserSpecifiedSlaveSpeed) * 255));
  817. }
  818. }
  819. }
  820. float lat[ZT_MAX_PEER_NETWORK_PATHS];
  821. float pdv[ZT_MAX_PEER_NETWORK_PATHS];
  822. float plr[ZT_MAX_PEER_NETWORK_PATHS];
  823. float per[ZT_MAX_PEER_NETWORK_PATHS];
  824. float thr[ZT_MAX_PEER_NETWORK_PATHS];
  825. float thm[ZT_MAX_PEER_NETWORK_PATHS];
  826. float thv[ZT_MAX_PEER_NETWORK_PATHS];
  827. float maxLAT = 0;
  828. float maxPDV = 0;
  829. float maxPLR = 0;
  830. float maxPER = 0;
  831. float maxTHR = 0;
  832. float maxTHM = 0;
  833. float maxTHV = 0;
  834. float quality[ZT_MAX_PEER_NETWORK_PATHS];
  835. uint8_t alloc[ZT_MAX_PEER_NETWORK_PATHS];
  836. float totQuality = 0.0f;
  837. memset(&lat, 0, sizeof(lat));
  838. memset(&pdv, 0, sizeof(pdv));
  839. memset(&plr, 0, sizeof(plr));
  840. memset(&per, 0, sizeof(per));
  841. memset(&thr, 0, sizeof(thr));
  842. memset(&thm, 0, sizeof(thm));
  843. memset(&thv, 0, sizeof(thv));
  844. memset(&quality, 0, sizeof(quality));
  845. memset(&alloc, 0, sizeof(alloc));
  846. // Compute initial summary statistics
  847. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  848. if (!_paths[i] || !_paths[i]->allowed()) {
  849. continue;
  850. }
  851. // Compute/Smooth average of real-world observations
  852. _paths[i]->_latencyMean = _paths[i]->latencySamples.mean();
  853. _paths[i]->_latencyVariance = _paths[i]->latencySamples.stddev();
  854. _paths[i]->_packetErrorRatio = 1.0 - (_paths[i]->packetValiditySamples.count() ? _paths[i]->packetValiditySamples.mean() : 1.0);
  855. if (userHasSpecifiedSlaveSpeeds()) {
  856. // Use user-reported metrics
  857. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  858. if (slave) {
  859. _paths[i]->_throughputMean = slave->speed();
  860. _paths[i]->_throughputVariance = 0;
  861. }
  862. }
  863. /*
  864. else {
  865. // Use estimated metrics
  866. if (_paths[i]->throughputSamples.count()) {
  867. // If we have samples, use them
  868. _paths[i]->throughputMean = (uint64_t)_paths[i]->throughputSamples.mean();
  869. if (_paths[i]->throughputMean > 0) {
  870. _paths[i]->throughputVarianceSamples.push((float)_paths[i]->throughputSamples.stddev() / (float)_paths[i]->throughputMean);
  871. _paths[i]->throughputVariance = _paths[i]->throughputVarianceSamples.mean();
  872. }
  873. }
  874. else {
  875. // No samples have been collected yet, assume best case scenario
  876. _paths[i]->throughputMean = ZT_QOS_THR_NORM_MAX;
  877. _paths[i]->throughputVariance = 0;
  878. }
  879. }
  880. */
  881. // Drain unacknowledged QoS records
  882. std::map<uint64_t,uint64_t>::iterator it = _paths[i]->qosStatsOut.begin();
  883. uint64_t currentLostRecords = 0;
  884. while (it != _paths[i]->qosStatsOut.end()) {
  885. int qosRecordTimeout = 5000; //_paths[i]->monitorInterval() * ZT_MULTIPATH_QOS_ACK_INTERVAL_MULTIPLIER * 8;
  886. if ((now - it->second) >= qosRecordTimeout) {
  887. //fprintf(stderr, "packetId=%llx was lost\n", it->first);
  888. it = _paths[i]->qosStatsOut.erase(it);
  889. ++currentLostRecords;
  890. } else { ++it; }
  891. }
  892. quality[i]=0;
  893. totQuality=0;
  894. // Normalize raw observations according to sane limits and/or user specified values
  895. lat[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_latencyMean, 0, _maxAcceptableLatency, 0, 1));
  896. pdv[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_latencyVariance, 0, _maxAcceptablePacketDelayVariance, 0, 1));
  897. plr[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_packetLossRatio, 0, _maxAcceptablePacketLossRatio, 0, 1));
  898. per[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_packetErrorRatio, 0, _maxAcceptablePacketErrorRatio, 0, 1));
  899. //thr[i] = 1.0; //Utils::normalize(_paths[i]->throughputMean, 0, ZT_QOS_THR_NORM_MAX, 0, 1);
  900. //thm[i] = 1.0; //Utils::normalize(_paths[i]->throughputMax, 0, ZT_QOS_THM_NORM_MAX, 0, 1);
  901. //thv[i] = 1.0; //1.0 / expf(4*Utils::normalize(_paths[i]->throughputVariance, 0, ZT_QOS_THV_NORM_MAX, 0, 1));
  902. //scp[i] = _paths[i]->ipvPref != 0 ? 1.0 : Utils::normalize(_paths[i]->ipScope(), InetAddress::IP_SCOPE_NONE, InetAddress::IP_SCOPE_PRIVATE, 0, 1);
  903. // Record bond-wide maximums to determine relative values
  904. maxLAT = lat[i] > maxLAT ? lat[i] : maxLAT;
  905. maxPDV = pdv[i] > maxPDV ? pdv[i] : maxPDV;
  906. maxPLR = plr[i] > maxPLR ? plr[i] : maxPLR;
  907. maxPER = per[i] > maxPER ? per[i] : maxPER;
  908. //maxTHR = thr[i] > maxTHR ? thr[i] : maxTHR;
  909. //maxTHM = thm[i] > maxTHM ? thm[i] : maxTHM;
  910. //maxTHV = thv[i] > maxTHV ? thv[i] : maxTHV;
  911. //fprintf(stdout, "EH %d: lat=%8.3f, ltm=%8.3f, pdv=%8.3f, plr=%5.3f, per=%5.3f, thr=%8f, thm=%5.3f, thv=%5.3f, avl=%5.3f, age=%8.2f, scp=%4d, q=%5.3f, qtot=%5.3f, ac=%d if=%s, path=%s\n",
  912. // i, lat[i], ltm[i], pdv[i], plr[i], per[i], thr[i], thm[i], thv[i], avl[i], age[i], scp[i], quality[i], totQuality, alloc[i], getSlave(_paths[i])->ifname().c_str(), pathStr);
  913. }
  914. // Convert metrics to relative quantities and apply contribution weights
  915. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  916. if (_paths[i] && _paths[i]->bonded()) {
  917. quality[i] += ((maxLAT > 0.0f ? lat[i] / maxLAT : 0.0f) * _qualityWeights[ZT_QOS_LAT_IDX]);
  918. quality[i] += ((maxPDV > 0.0f ? pdv[i] / maxPDV : 0.0f) * _qualityWeights[ZT_QOS_PDV_IDX]);
  919. quality[i] += ((maxPLR > 0.0f ? plr[i] / maxPLR : 0.0f) * _qualityWeights[ZT_QOS_PLR_IDX]);
  920. quality[i] += ((maxPER > 0.0f ? per[i] / maxPER : 0.0f) * _qualityWeights[ZT_QOS_PER_IDX]);
  921. //quality[i] += ((maxTHR > 0.0f ? thr[i] / maxTHR : 0.0f) * _qualityWeights[ZT_QOS_THR_IDX]);
  922. //quality[i] += ((maxTHM > 0.0f ? thm[i] / maxTHM : 0.0f) * _qualityWeights[ZT_QOS_THM_IDX]);
  923. //quality[i] += ((maxTHV > 0.0f ? thv[i] / maxTHV : 0.0f) * _qualityWeights[ZT_QOS_THV_IDX]);
  924. //quality[i] += (scp[i] * _qualityWeights[ZT_QOS_SCP_IDX]);
  925. totQuality += quality[i];
  926. }
  927. }
  928. //
  929. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  930. if (_paths[i] && _paths[i]->bonded()) {
  931. alloc[i] = std::ceil((quality[i] / totQuality) * (float)255);
  932. _paths[i]->_allocation = alloc[i];
  933. }
  934. }
  935. /*
  936. if ((now - _lastLogTS) > 500) {
  937. if (!relevant()) {return;}
  938. //fprintf(stderr, "\n");
  939. _lastPrintTS = now;
  940. _lastLogTS = now;
  941. int numPlottablePaths=0;
  942. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  943. if (_paths[i]) {
  944. ++numPlottablePaths;
  945. _paths[i]->address().toString(pathStr);
  946. //fprintf(stderr, "%lu FIN [%d/%d]: pmi=%5d, lat=%4.3f, ltm=%4.3f, pdv=%4.3f, plr=%4.3f, per=%4.3f, thr=%4.3f, thm=%4.3f, thv=%4.3f, age=%4.3f, scp=%4d, q=%4.3f, qtot=%4.3f, ac=%4d, asf=%3d, if=%s, path=%20s, bond=%d, qosout=%d, plrraw=%d\n",
  947. // ((now - RR->bc->getBondStartTime())), i, _numBondedPaths, _paths[i]->monitorInterval,
  948. // lat[i], ltm[i], pdv[i], plr[i], per[i], thr[i], thm[i], thv[i], age[i], scp[i],
  949. // quality[i], totQuality, alloc[i], _paths[i]->assignedFlowCount, getSlave(_paths[i])->ifname().c_str(), pathStr, _paths[i]->bonded(), _paths[i]->qosStatsOut.size(), _paths[i]->packetLossRatio);
  950. }
  951. }
  952. if (numPlottablePaths < 2) {
  953. return;
  954. }
  955. if (!_header) {
  956. fprintf(stdout, "now, bonded, relativeUnderload, flows, ");
  957. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  958. if (_paths[i]) {
  959. _paths[i]->address().toString(pathStr);
  960. std::string label = std::string((pathStr)) + " " + getSlave(_paths[i])->ifname();
  961. for (int i=0; i<19; ++i) {
  962. fprintf(stdout, "%s, ", label.c_str());
  963. }
  964. }
  965. }
  966. _header=true;
  967. }
  968. fprintf(stdout, "%ld, %d, %d, %d, ",((now - RR->bc->getBondStartTime())),_numBondedPaths,_totalBondUnderload, _flows.size());
  969. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  970. if (_paths[i]) {
  971. _paths[i]->address().toString(pathStr);
  972. fprintf(stdout, "%s, %s, %8.3f, %8.3f, %8.3f, %5.3f, %5.3f, %5.3f, %8f, %5.3f, %5.3f, %d, %5.3f, %d, %d, %d, %d, %d, %d, ",
  973. getSlave(_paths[i])->ifname().c_str(), pathStr, _paths[i]->latencyMean, lat[i],pdv[i], _paths[i]->packetLossRatio, plr[i],per[i],thr[i],thm[i],thv[i],(now - _paths[i]->lastIn()),quality[i],alloc[i],
  974. _paths[i]->relativeByteLoad, _paths[i]->assignedFlowCount, _paths[i]->alive(now, true), _paths[i]->eligible(now,_ackSendInterval), _paths[i]->qosStatsOut.size());
  975. }
  976. }
  977. fprintf(stdout, "\n");
  978. }
  979. */
  980. }
  981. void Bond::processBalanceTasks(const int64_t now)
  982. {
  983. //fprintf(stderr, "processBalanceTasks\n");
  984. char curPathStr[128];
  985. if (_allowFlowHashing) {
  986. /**
  987. * Clean up and reset flows if necessary
  988. */
  989. if ((now - _lastFlowExpirationCheck) > ZT_MULTIPATH_FLOW_CHECK_INTERVAL) {
  990. Mutex::Lock _l(_flows_m);
  991. forgetFlowsWhenNecessary(ZT_MULTIPATH_FLOW_EXPIRATION_INTERVAL,false,now);
  992. _lastFlowExpirationCheck = now;
  993. }
  994. if ((now - _lastFlowStatReset) > ZT_FLOW_STATS_RESET_INTERVAL) {
  995. Mutex::Lock _l(_flows_m);
  996. _lastFlowStatReset = now;
  997. std::map<int32_t,SharedPtr<Flow> >::iterator it = _flows.begin();
  998. while (it != _flows.end()) {
  999. it->second->resetByteCounts();
  1000. ++it;
  1001. }
  1002. }
  1003. /**
  1004. * Re-allocate flows from dead paths
  1005. */
  1006. if (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_XOR || _bondingPolicy== ZT_BONDING_POLICY_BALANCE_AWARE) {
  1007. Mutex::Lock _l(_flows_m);
  1008. for (int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  1009. if (!_paths[i]) {
  1010. continue;
  1011. }
  1012. if (!_paths[i]->eligible(now,_ackSendInterval) && _paths[i]->_shouldReallocateFlows) {
  1013. _paths[i]->address().toString(curPathStr);
  1014. fprintf(stderr, "%d reallocating flows from dead path %s on %s\n", (RR->node->now() - RR->bc->getBondStartTime()), curPathStr, getSlave(_paths[i])->ifname().c_str());
  1015. std::map<int32_t,SharedPtr<Flow> >::iterator flow_it = _flows.begin();
  1016. while (flow_it != _flows.end()) {
  1017. if (flow_it->second->assignedPath() == _paths[i]) {
  1018. if(assignFlowToBondedPath(flow_it->second, now)) {
  1019. _paths[i]->_assignedFlowCount--;
  1020. }
  1021. }
  1022. ++flow_it;
  1023. }
  1024. _paths[i]->_shouldReallocateFlows = false;
  1025. }
  1026. }
  1027. }
  1028. }
  1029. /**
  1030. * Tasks specific to (Balance Round Robin)
  1031. */
  1032. if (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_RR) {
  1033. if (_allowFlowHashing) {
  1034. // TODO: Should ideally failover from (idx) to a random slave, this is so that (idx+1) isn't overloaded
  1035. }
  1036. else if (!_allowFlowHashing) {
  1037. // Nothing
  1038. }
  1039. }
  1040. /**
  1041. * Tasks specific to (Balance XOR)
  1042. */
  1043. if (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_XOR) {
  1044. // Nothing specific for XOR
  1045. }
  1046. /**
  1047. * Tasks specific to (Balance Aware)
  1048. */
  1049. if ((_bondingPolicy== ZT_BONDING_POLICY_BALANCE_AWARE)) {
  1050. if (_allowFlowHashing) {
  1051. Mutex::Lock _l(_flows_m);
  1052. /**
  1053. * Re-balance flows in proportion to slave capacity (or when eligibility changes)
  1054. */
  1055. if ((now - _lastFlowRebalance) > ZT_FLOW_REBALANCE_INTERVAL) {
  1056. /**
  1057. * Determine "load" for bonded paths
  1058. */
  1059. uint64_t totalBytes = 0;
  1060. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) { // first pass: compute absolute byte load and total
  1061. if (_paths[i] && _paths[i]->bonded()) {
  1062. _paths[i]->_byteLoad = 0;
  1063. std::map<int32_t,SharedPtr<Flow> >::iterator flow_it = _flows.begin();
  1064. while (flow_it != _flows.end()) {
  1065. if (flow_it->second->assignedPath() == _paths[i]) {
  1066. _paths[i]->_byteLoad += flow_it->second->totalBytes();
  1067. }
  1068. ++flow_it;
  1069. }
  1070. totalBytes += _paths[i]->_byteLoad;
  1071. }
  1072. }
  1073. /**
  1074. * Determine "affinity" for bonded path
  1075. */
  1076. //fprintf(stderr, "\n\n");
  1077. _totalBondUnderload = 0;
  1078. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) { // second pass: compute relative byte loads and total imbalance
  1079. if (_paths[i] && _paths[i]->bonded()) {
  1080. if (totalBytes) {
  1081. uint8_t relativeByteLoad = std::ceil(((float)_paths[i]->_byteLoad / (float)totalBytes) * (float)255);
  1082. //fprintf(stderr, "lastComputedAllocation = %d\n", _paths[i]->allocation);
  1083. //fprintf(stderr, " relativeByteLoad = %d\n", relativeByteLoad);
  1084. _paths[i]->_relativeByteLoad = relativeByteLoad;
  1085. uint8_t relativeUnderload = std::max(0, (int)_paths[i]->_allocation - (int)relativeByteLoad);
  1086. //fprintf(stderr, " relativeUnderload = %d\n", relativeUnderload);
  1087. _totalBondUnderload += relativeUnderload;
  1088. //fprintf(stderr, " _totalBondUnderload = %d\n\n", _totalBondUnderload);
  1089. //_paths[i]->affinity = (relativeUnderload > 0 ? relativeUnderload : _paths[i]->_allocation);
  1090. }
  1091. else { // set everything to base values
  1092. _totalBondUnderload = 0;
  1093. //_paths[i]->affinity = 0;
  1094. }
  1095. }
  1096. }
  1097. //fprintf(stderr, "_totalBondUnderload=%d (end)\n\n", _totalBondUnderload);
  1098. /**
  1099. *
  1100. */
  1101. //fprintf(stderr, "_lastFlowRebalance\n");
  1102. std::map<int32_t, SharedPtr<Flow> >::iterator it = _flows.begin();
  1103. while (it != _flows.end()) {
  1104. int32_t flowId = it->first;
  1105. SharedPtr<Flow> flow = it->second;
  1106. if ((now - flow->_lastPathReassignment) > ZT_FLOW_MIN_REBALANCE_INTERVAL) {
  1107. //fprintf(stdout, " could move : %x\n", flowId);
  1108. }
  1109. ++it;
  1110. }
  1111. _lastFlowRebalance = now;
  1112. }
  1113. }
  1114. else if (!_allowFlowHashing) {
  1115. // Nothing
  1116. }
  1117. }
  1118. }
  1119. void Bond::dequeueNextActiveBackupPath(const uint64_t now)
  1120. {
  1121. //fprintf(stderr, "dequeueNextActiveBackupPath\n");
  1122. if (_abFailoverQueue.empty()) {
  1123. return;
  1124. }
  1125. _abPath = _abFailoverQueue.front();
  1126. _abFailoverQueue.pop_front();
  1127. _lastActiveBackupPathChange = now;
  1128. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1129. if (_paths[i]) {
  1130. _paths[i]->resetPacketCounts();
  1131. }
  1132. }
  1133. }
  1134. void Bond::processActiveBackupTasks(const int64_t now)
  1135. {
  1136. //fprintf(stderr, "%llu processActiveBackupTasks\n", (now - RR->bc->getBondStartTime()));
  1137. char pathStr[128]; char prevPathStr[128]; char curPathStr[128];
  1138. SharedPtr<Path> prevActiveBackupPath = _abPath;
  1139. SharedPtr<Path> nonPreferredPath;
  1140. bool bFoundPrimarySlave = false;
  1141. /**
  1142. * Select initial "active" active-backup slave
  1143. */
  1144. if (!_abPath) {
  1145. fprintf(stderr, "%llu no active backup path yet...\n", ((now - RR->bc->getBondStartTime())));
  1146. /**
  1147. * [Automatic mode]
  1148. * The user has not explicitly specified slaves or their failover schedule,
  1149. * the bonding policy will now select the first eligible path and set it as
  1150. * its active backup path, if a substantially better path is detected the bonding
  1151. * policy will assign it as the new active backup path. If the path fails it will
  1152. * simply find the next eligible path.
  1153. */
  1154. if (!userHasSpecifiedSlaves()) {
  1155. fprintf(stderr, "%llu AB: (auto) user did not specify any slaves. waiting until we know more\n", ((now - RR->bc->getBondStartTime())));
  1156. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1157. if (_paths[i] && _paths[i]->eligible(now,_ackSendInterval)) {
  1158. _paths[i]->address().toString(curPathStr);
  1159. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  1160. if (slave) {
  1161. fprintf(stderr, "%llu AB: (initial) [%d] found eligible path %s on: %s\n", ((now - RR->bc->getBondStartTime())), i, curPathStr, slave->ifname().c_str());
  1162. }
  1163. _abPath = _paths[i];
  1164. break;
  1165. }
  1166. }
  1167. }
  1168. /**
  1169. * [Manual mode]
  1170. * The user has specified slaves or failover rules that the bonding policy should adhere to.
  1171. */
  1172. else if (userHasSpecifiedSlaves()) {
  1173. fprintf(stderr, "%llu AB: (manual) no active backup slave, checking local.conf\n", ((now - RR->bc->getBondStartTime())));
  1174. if (userHasSpecifiedPrimarySlave()) {
  1175. fprintf(stderr, "%llu AB: (manual) user has specified primary slave, looking for it.\n", ((now - RR->bc->getBondStartTime())));
  1176. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1177. if (!_paths[i]) {
  1178. continue;
  1179. }
  1180. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  1181. if (_paths[i]->eligible(now,_ackSendInterval) && slave->primary()) {
  1182. if (!_paths[i]->preferred()) {
  1183. _paths[i]->address().toString(curPathStr);
  1184. fprintf(stderr, "%llu AB: (initial) [%d] found path on primary slave, taking note in case we don't find a preferred path\n", ((now - RR->bc->getBondStartTime())), i);
  1185. nonPreferredPath = _paths[i];
  1186. bFoundPrimarySlave = true;
  1187. }
  1188. if (_paths[i]->preferred()) {
  1189. _abPath = _paths[i];
  1190. _abPath->address().toString(curPathStr);
  1191. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  1192. if (slave) {
  1193. fprintf(stderr, "%llu AB: (initial) [%d] found preferred path %s on primary slave: %s\n", ((now - RR->bc->getBondStartTime())), i, curPathStr, slave->ifname().c_str());
  1194. }
  1195. bFoundPrimarySlave = true;
  1196. break;
  1197. }
  1198. }
  1199. }
  1200. if (_abPath) {
  1201. _abPath->address().toString(curPathStr);
  1202. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _abPath->localSocket());
  1203. if (slave) {
  1204. fprintf(stderr, "%llu AB: (initial) found preferred primary path: %s on %s\n", ((now - RR->bc->getBondStartTime())), curPathStr, slave->ifname().c_str());
  1205. }
  1206. }
  1207. else {
  1208. if (bFoundPrimarySlave && nonPreferredPath) {
  1209. fprintf(stderr, "%llu AB: (initial) found a non-preferred primary path\n", ((now - RR->bc->getBondStartTime())));
  1210. _abPath = nonPreferredPath;
  1211. }
  1212. }
  1213. if (!_abPath) {
  1214. fprintf(stderr, "%llu AB: (initial) designated primary slave is not yet ready\n", ((now - RR->bc->getBondStartTime())));
  1215. // TODO: Should fail-over to specified backup or just wait?
  1216. }
  1217. }
  1218. else if (!userHasSpecifiedPrimarySlave()) {
  1219. int _abIdx = ZT_MAX_PEER_NETWORK_PATHS;
  1220. fprintf(stderr, "%llu AB: (initial) user did not specify primary slave, just picking something\n", ((now - RR->bc->getBondStartTime())));
  1221. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1222. if (_paths[i] && _paths[i]->eligible(now,_ackSendInterval)) {
  1223. _abIdx = i;
  1224. break;
  1225. }
  1226. }
  1227. if (_abIdx == ZT_MAX_PEER_NETWORK_PATHS) {
  1228. fprintf(stderr, "%llu AB: (initial) unable to find a candidate next-best, no change\n", ((now - RR->bc->getBondStartTime())));
  1229. }
  1230. else {
  1231. _abPath = _paths[_abIdx];
  1232. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _abPath->localSocket());
  1233. if (slave) {
  1234. fprintf(stderr, "%llu AB: (initial) selected non-primary slave idx=%d, %s on %s\n", ((now - RR->bc->getBondStartTime())), _abIdx, pathStr, slave->ifname().c_str());
  1235. }
  1236. }
  1237. }
  1238. }
  1239. }
  1240. /**
  1241. * Update and maintain the active-backup failover queue
  1242. */
  1243. if (_abPath) {
  1244. // Don't worry about the failover queue until we have an active slave
  1245. // Remove ineligible paths from the failover slave queue
  1246. for (std::list<SharedPtr<Path> >::iterator it(_abFailoverQueue.begin()); it!=_abFailoverQueue.end();) {
  1247. if ((*it) && !(*it)->eligible(now,_ackSendInterval)) {
  1248. (*it)->address().toString(curPathStr);
  1249. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, (*it)->localSocket());
  1250. if (slave) {
  1251. fprintf(stderr, "%llu AB: (fq) %s on %s is now ineligible, removing from failover queue\n", ((now - RR->bc->getBondStartTime())), curPathStr, slave->ifname().c_str());
  1252. }
  1253. it = _abFailoverQueue.erase(it);
  1254. } else {
  1255. ++it;
  1256. }
  1257. }
  1258. /**
  1259. * Failover instructions were provided by user, build queue according those as well as IPv
  1260. * preference, disregarding performance.
  1261. */
  1262. if (userHasSpecifiedFailoverInstructions()) {
  1263. /**
  1264. * Clear failover scores
  1265. */
  1266. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1267. if (_paths[i]) {
  1268. _paths[i]->_failoverScore = 0;
  1269. }
  1270. }
  1271. //fprintf(stderr, "AB: (fq) user has specified specific failover instructions, will follow them.\n");
  1272. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1273. if (!_paths[i] || !_paths[i]->allowed() || !_paths[i]->eligible(now,_ackSendInterval)) {
  1274. continue;
  1275. }
  1276. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  1277. _paths[i]->address().toString(pathStr);
  1278. int failoverScoreHandicap = _paths[i]->_failoverScore;
  1279. if (_paths[i]->preferred())
  1280. {
  1281. failoverScoreHandicap += ZT_MULTIPATH_FAILOVER_HANDICAP_PREFERRED;
  1282. //fprintf(stderr, "%s on %s ----> %d for preferred\n", pathStr, _paths[i]->ifname().c_str(), failoverScoreHandicap);
  1283. }
  1284. if (slave->primary()) {
  1285. // If using "optimize" primary reselect mode, ignore user slave designations
  1286. failoverScoreHandicap += ZT_MULTIPATH_FAILOVER_HANDICAP_PRIMARY;
  1287. //fprintf(stderr, "%s on %s ----> %d for primary\n", pathStr, _paths[i]->ifname().c_str(), failoverScoreHandicap);
  1288. }
  1289. if (!_paths[i]->_failoverScore) {
  1290. // If we didn't inherit a failover score from a "parent" that wants to use this path as a failover
  1291. int newHandicap = failoverScoreHandicap ? failoverScoreHandicap : _paths[i]->_allocation;
  1292. _paths[i]->_failoverScore = newHandicap;
  1293. //fprintf(stderr, "%s on %s ----> %d for allocation\n", pathStr, _paths[i]->ifname().c_str(), newHandicap);
  1294. }
  1295. SharedPtr<Slave> failoverSlave;
  1296. if (slave->failoverToSlave().length()) {
  1297. failoverSlave = RR->bc->getSlaveByName(_policyAlias, slave->failoverToSlave());
  1298. }
  1299. if (failoverSlave) {
  1300. for (int j=0; j<ZT_MAX_PEER_NETWORK_PATHS; j++) {
  1301. if (_paths[j] && getSlave(_paths[j]) == failoverSlave.ptr()) {
  1302. _paths[j]->address().toString(pathStr);
  1303. int inheritedHandicap = failoverScoreHandicap - 10;
  1304. int newHandicap = _paths[j]->_failoverScore > inheritedHandicap ? _paths[j]->_failoverScore : inheritedHandicap;
  1305. //fprintf(stderr, "\thanding down %s on %s ----> %d\n", pathStr, getSlave(_paths[j])->ifname().c_str(), newHandicap);
  1306. if (!_paths[j]->preferred()) {
  1307. newHandicap--;
  1308. }
  1309. _paths[j]->_failoverScore = newHandicap;
  1310. }
  1311. }
  1312. }
  1313. if (_paths[i].ptr() != _abPath.ptr()) {
  1314. bool bFoundPathInQueue = false;
  1315. for (std::list<SharedPtr<Path> >::iterator it(_abFailoverQueue.begin()); it!=_abFailoverQueue.end();++it) {
  1316. if (_paths[i].ptr() == (*it).ptr()) {
  1317. bFoundPathInQueue = true;
  1318. }
  1319. }
  1320. if (!bFoundPathInQueue) {
  1321. _paths[i]->address().toString(curPathStr);
  1322. fprintf(stderr, "%llu AB: (fq) [%d] added %s on %s to queue\n", ((now - RR->bc->getBondStartTime())), i, curPathStr, getSlave(_paths[i])->ifname().c_str());
  1323. _abFailoverQueue.push_front(_paths[i]);
  1324. }
  1325. }
  1326. }
  1327. }
  1328. /**
  1329. * No failover instructions provided by user, build queue according to performance
  1330. * and IPv preference.
  1331. */
  1332. else if (!userHasSpecifiedFailoverInstructions()) {
  1333. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1334. if (!_paths[i]
  1335. || !_paths[i]->allowed()
  1336. || !_paths[i]->eligible(now,_ackSendInterval)) {
  1337. continue;
  1338. }
  1339. int failoverScoreHandicap = 0;
  1340. if (_paths[i]->preferred()) {
  1341. failoverScoreHandicap = ZT_MULTIPATH_FAILOVER_HANDICAP_PREFERRED;
  1342. }
  1343. bool includeRefractoryPeriod = true;
  1344. if (!_paths[i]->eligible(now,includeRefractoryPeriod)) {
  1345. failoverScoreHandicap = -10000;
  1346. }
  1347. if (getSlave(_paths[i])->primary() && _abSlaveSelectMethod != ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  1348. // If using "optimize" primary reselect mode, ignore user slave designations
  1349. failoverScoreHandicap = ZT_MULTIPATH_FAILOVER_HANDICAP_PRIMARY;
  1350. }
  1351. if (_paths[i].ptr() == negotiatedPath.ptr()) {
  1352. _paths[i]->_negotiated = true;
  1353. failoverScoreHandicap = ZT_MULTIPATH_FAILOVER_HANDICAP_NEGOTIATED;
  1354. } else {
  1355. _paths[i]->_negotiated = false;
  1356. }
  1357. _paths[i]->_failoverScore = _paths[i]->_allocation + failoverScoreHandicap;
  1358. if (_paths[i].ptr() != _abPath.ptr()) {
  1359. bool bFoundPathInQueue = false;
  1360. for (std::list<SharedPtr<Path> >::iterator it(_abFailoverQueue.begin()); it!=_abFailoverQueue.end();++it) {
  1361. if (_paths[i].ptr() == (*it).ptr()) {
  1362. bFoundPathInQueue = true;
  1363. }
  1364. }
  1365. if (!bFoundPathInQueue) {
  1366. _paths[i]->address().toString(curPathStr);
  1367. fprintf(stderr, "%llu AB: (fq) [%d] added %s on %s to queue\n", ((now - RR->bc->getBondStartTime())), i, curPathStr, getSlave(_paths[i])->ifname().c_str());
  1368. _abFailoverQueue.push_front(_paths[i]);
  1369. }
  1370. }
  1371. }
  1372. }
  1373. _abFailoverQueue.sort(PathQualityComparator());
  1374. if (_abFailoverQueue.empty()) {
  1375. fprintf(stderr, "%llu AB: (fq) the failover queue is empty, the active-backup bond is no longer fault-tolerant\n", ((now - RR->bc->getBondStartTime())));
  1376. }
  1377. }
  1378. /**
  1379. * Short-circuit if we have no queued paths
  1380. */
  1381. if (_abFailoverQueue.empty()) {
  1382. return;
  1383. }
  1384. /**
  1385. * Fulfill primary reselect obligations
  1386. */
  1387. if (_abPath && !_abPath->eligible(now,_ackSendInterval)) { // Implicit ZT_MULTIPATH_RESELECTION_POLICY_FAILURE
  1388. _abPath->address().toString(curPathStr); fprintf(stderr, "%llu AB: (failure) failover event!, active backup path (%s) is no-longer eligible\n", ((now - RR->bc->getBondStartTime())), curPathStr);
  1389. if (!_abFailoverQueue.empty()) {
  1390. fprintf(stderr, "%llu AB: (failure) there are (%lu) slaves in queue to choose from...\n", ((now - RR->bc->getBondStartTime())), _abFailoverQueue.size());
  1391. dequeueNextActiveBackupPath(now);
  1392. _abPath->address().toString(curPathStr); fprintf(stderr, "%llu sAB: (failure) switched to %s on %s\n", ((now - RR->bc->getBondStartTime())), curPathStr, getSlave(_abPath)->ifname().c_str());
  1393. } else {
  1394. fprintf(stderr, "%llu AB: (failure) nothing available in the slave queue, doing nothing.\n", ((now - RR->bc->getBondStartTime())));
  1395. }
  1396. }
  1397. /**
  1398. * Detect change to prevent flopping during later optimization step.
  1399. */
  1400. if (prevActiveBackupPath != _abPath) {
  1401. _lastActiveBackupPathChange = now;
  1402. }
  1403. if (_abSlaveSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_ALWAYS) {
  1404. if (_abPath && !getSlave(_abPath)->primary()
  1405. && getSlave(_abFailoverQueue.front())->primary()) {
  1406. fprintf(stderr, "%llu AB: (always) switching to available primary\n", ((now - RR->bc->getBondStartTime())));
  1407. dequeueNextActiveBackupPath(now);
  1408. }
  1409. }
  1410. if (_abSlaveSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_BETTER) {
  1411. if (_abPath && !getSlave(_abPath)->primary()) {
  1412. fprintf(stderr, "%llu AB: (better) active backup has switched to \"better\" primary slave according to re-select policy.\n", ((now - RR->bc->getBondStartTime())));
  1413. if (getSlave(_abFailoverQueue.front())->primary()
  1414. && (_abFailoverQueue.front()->_failoverScore > _abPath->_failoverScore)) {
  1415. dequeueNextActiveBackupPath(now);
  1416. fprintf(stderr, "%llu AB: (better) switched back to user-defined primary\n", ((now - RR->bc->getBondStartTime())));
  1417. }
  1418. }
  1419. }
  1420. if (_abSlaveSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE && !_abFailoverQueue.empty()) {
  1421. /**
  1422. * Implement link negotiation that was previously-decided
  1423. */
  1424. if (_abFailoverQueue.front()->_negotiated) {
  1425. dequeueNextActiveBackupPath(now);
  1426. _abPath->address().toString(prevPathStr);
  1427. fprintf(stderr, "%llu AB: (optimize) switched to negotiated path %s on %s\n", ((now - RR->bc->getBondStartTime())), prevPathStr, getSlave(_abPath)->ifname().c_str());
  1428. _lastPathNegotiationCheck = now;
  1429. }
  1430. else {
  1431. // Try to find a better path and automatically switch to it -- not too often, though.
  1432. if ((now - _lastActiveBackupPathChange) > ZT_MULTIPATH_MIN_ACTIVE_BACKUP_AUTOFLOP_INTERVAL) {
  1433. if (!_abFailoverQueue.empty()) {
  1434. //fprintf(stderr, "AB: (optimize) there are (%d) slaves in queue to choose from...\n", _abFailoverQueue.size());
  1435. int newFScore = _abFailoverQueue.front()->_failoverScore;
  1436. int prevFScore = _abPath->_failoverScore;
  1437. // Establish a minimum switch threshold to prevent flapping
  1438. int failoverScoreDifference = _abFailoverQueue.front()->_failoverScore - _abPath->_failoverScore;
  1439. int thresholdQuantity = (ZT_MULTIPATH_ACTIVE_BACKUP_OPTIMIZE_MIN_THRESHOLD * (float)_abPath->_allocation);
  1440. if ((failoverScoreDifference > 0) && (failoverScoreDifference > thresholdQuantity)) {
  1441. SharedPtr<Path> oldPath = _abPath;
  1442. _abPath->address().toString(prevPathStr);
  1443. dequeueNextActiveBackupPath(now);
  1444. _abPath->address().toString(curPathStr);
  1445. fprintf(stderr, "%llu AB: (optimize) switched from %s on %s (fs=%d) to %s on %s (fs=%d)\n", ((now - RR->bc->getBondStartTime())), prevPathStr, getSlave(oldPath)->ifname().c_str(), prevFScore, curPathStr, getSlave(_abPath)->ifname().c_str(), newFScore);
  1446. }
  1447. }
  1448. }
  1449. }
  1450. }
  1451. }
  1452. void Bond::setReasonableDefaults(int policy)
  1453. {
  1454. // If invalid bonding policy, try default
  1455. int _defaultBondingPolicy = BondController::defaultBondingPolicy();
  1456. if (policy <= ZT_BONDING_POLICY_NONE || policy > ZT_BONDING_POLICY_BALANCE_AWARE) {
  1457. // If no default set, use NONE (effectively disabling this bond)
  1458. if (_defaultBondingPolicy < ZT_BONDING_POLICY_NONE || _defaultBondingPolicy > ZT_BONDING_POLICY_BALANCE_AWARE) {
  1459. _bondingPolicy= ZT_BONDING_POLICY_NONE;
  1460. }
  1461. _bondingPolicy= _defaultBondingPolicy;
  1462. } else {
  1463. _bondingPolicy= policy;
  1464. }
  1465. _downDelay = 0;
  1466. _upDelay = 0;
  1467. _allowFlowHashing=false;
  1468. _bondMonitorInterval=0;
  1469. _shouldCollectPathStatistics=false;
  1470. _lastBackgroundTaskCheck=0;
  1471. // Path negotiation
  1472. _allowPathNegotiation=false;
  1473. _lastPathNegotiationReceived=0;
  1474. _lastPathNegotiationCheck=0;
  1475. _pathNegotiationCutoffCount=0;
  1476. _localUtility=0;
  1477. _lastFlowStatReset=0;
  1478. _lastFlowExpirationCheck=0;
  1479. _numBondedPaths=0;
  1480. _rrPacketsSentOnCurrSlave=0;
  1481. _rrIdx=0;
  1482. _lastFlowRebalance=0;
  1483. _totalBondUnderload = 0;
  1484. //_maxAcceptableLatency
  1485. _maxAcceptablePacketDelayVariance = 50;
  1486. _maxAcceptablePacketLossRatio = 0.10;
  1487. _maxAcceptablePacketErrorRatio = 0.10;
  1488. _userHasSpecifiedSlaveSpeeds=0;
  1489. _lastFrame=0;
  1490. /**
  1491. * Paths are actively monitored to provide a real-time quality/preference-ordered rapid failover queue.
  1492. */
  1493. switch (policy) {
  1494. case ZT_BONDING_POLICY_ACTIVE_BACKUP:
  1495. _failoverInterval = 5000;
  1496. _abSlaveSelectMethod = ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE;
  1497. _slaveMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1498. _qualityWeights[ZT_QOS_LAT_IDX] = 0.2f;
  1499. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1500. _qualityWeights[ZT_QOS_PDV_IDX] = 0.2f;
  1501. _qualityWeights[ZT_QOS_PLR_IDX] = 0.2f;
  1502. _qualityWeights[ZT_QOS_PER_IDX] = 0.2f;
  1503. _qualityWeights[ZT_QOS_THR_IDX] = 0.2f;
  1504. _qualityWeights[ZT_QOS_THM_IDX] = 0.0f;
  1505. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1506. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1507. break;
  1508. /**
  1509. * All seemingly-alive paths are used. Paths are not actively monitored.
  1510. */
  1511. case ZT_BONDING_POLICY_BROADCAST:
  1512. _downDelay = 30000;
  1513. _upDelay = 0;
  1514. break;
  1515. /**
  1516. * Paths are monitored to determine when/if one needs to be added or removed from the rotation
  1517. */
  1518. case ZT_BONDING_POLICY_BALANCE_RR:
  1519. _failoverInterval = 5000;
  1520. _allowFlowHashing = false;
  1521. _packetsPerSlave = 1024;
  1522. _slaveMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1523. _qualityWeights[ZT_QOS_LAT_IDX] = 0.4f;
  1524. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1525. _qualityWeights[ZT_QOS_PDV_IDX] = 0.2f;
  1526. _qualityWeights[ZT_QOS_PLR_IDX] = 0.1f;
  1527. _qualityWeights[ZT_QOS_PER_IDX] = 0.1f;
  1528. _qualityWeights[ZT_QOS_THR_IDX] = 0.1f;
  1529. _qualityWeights[ZT_QOS_THM_IDX] = 0.0f;
  1530. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1531. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1532. break;
  1533. /**
  1534. * Path monitoring is used to determine the capacity of each
  1535. * path and where to place the next flow.
  1536. */
  1537. case ZT_BONDING_POLICY_BALANCE_XOR:
  1538. _failoverInterval = 5000;;
  1539. _upDelay=_bondMonitorInterval*2;
  1540. _allowFlowHashing = true;
  1541. _slaveMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1542. _qualityWeights[ZT_QOS_LAT_IDX] = 0.4f;
  1543. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1544. _qualityWeights[ZT_QOS_PDV_IDX] = 0.2f;
  1545. _qualityWeights[ZT_QOS_PLR_IDX] = 0.1f;
  1546. _qualityWeights[ZT_QOS_PER_IDX] = 0.1f;
  1547. _qualityWeights[ZT_QOS_THR_IDX] = 0.1f;
  1548. _qualityWeights[ZT_QOS_THM_IDX] = 0.0f;
  1549. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1550. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1551. break;
  1552. /**
  1553. * Path monitoring is used to determine the capacity of each
  1554. * path and where to place the next flow. Additionally, re-shuffling
  1555. * of flows may take place.
  1556. */
  1557. case ZT_BONDING_POLICY_BALANCE_AWARE:
  1558. _failoverInterval = 3000;
  1559. _allowFlowHashing = true;
  1560. _slaveMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1561. _qualityWeights[ZT_QOS_LAT_IDX] = 0.3f;
  1562. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1563. _qualityWeights[ZT_QOS_PDV_IDX] = 0.1f;
  1564. _qualityWeights[ZT_QOS_PLR_IDX] = 0.1f;
  1565. _qualityWeights[ZT_QOS_PER_IDX] = 0.1f;
  1566. _qualityWeights[ZT_QOS_THR_IDX] = 0.0f;
  1567. _qualityWeights[ZT_QOS_THM_IDX] = 0.4f;
  1568. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1569. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1570. break;
  1571. default:
  1572. break;
  1573. }
  1574. /**
  1575. * Timer geometries and counters
  1576. */
  1577. _bondMonitorInterval = _failoverInterval / 3;
  1578. _ackSendInterval = _failoverInterval;
  1579. _qualityEstimationInterval = _failoverInterval * 2;
  1580. _dynamicPathMonitorInterval = 0;
  1581. _downDelay=0;
  1582. _upDelay=0;
  1583. _ackCutoffCount = 0;
  1584. _lastAckRateCheck = 0;
  1585. _qosSendInterval = _bondMonitorInterval * 4;
  1586. _qosCutoffCount = 0;
  1587. _lastQoSRateCheck = 0;
  1588. _lastQualityEstimation=0;
  1589. throughputMeasurementInterval = _ackSendInterval * 2;
  1590. BondController::setMinReqPathMonitorInterval(_bondMonitorInterval);
  1591. _defaultPathRefractoryPeriod = 8000;
  1592. // TODO: Remove
  1593. _header=false;
  1594. _lastLogTS = 0;
  1595. _lastPrintTS = 0;
  1596. fprintf(stderr, "TIMERS: strat=%d, fi= %d, bmi= %d, qos= %d, ack= %d, estimateInt= %d, refractory= %d, ud= %d, dd= %d\n",
  1597. _slaveMonitorStrategy,
  1598. _failoverInterval,
  1599. _bondMonitorInterval,
  1600. _qosSendInterval,
  1601. _ackSendInterval,
  1602. _qualityEstimationInterval,
  1603. _defaultPathRefractoryPeriod,
  1604. _upDelay,
  1605. _downDelay);
  1606. }
  1607. void Bond::setUserQualityWeights(float weights[], int len)
  1608. {
  1609. if (len == ZT_QOS_WEIGHT_SIZE) {
  1610. float weightTotal = 0.0;
  1611. for (unsigned int i=0; i<ZT_QOS_WEIGHT_SIZE; ++i) {
  1612. weightTotal += weights[i];
  1613. }
  1614. if (weightTotal > 0.99 && weightTotal < 1.01) {
  1615. memcpy(_qualityWeights, weights, len * sizeof(float));
  1616. }
  1617. }
  1618. }
  1619. bool Bond::relevant() {
  1620. return _peer->identity().address().toInt() == 0x16a03a3d03
  1621. || _peer->identity().address().toInt() == 0x4410300d03
  1622. || _peer->identity().address().toInt() == 0x795cbf86fa;
  1623. }
  1624. SharedPtr<Slave> Bond::getSlave(const SharedPtr<Path>& path)
  1625. {
  1626. return RR->bc->getSlaveBySocket(_policyAlias, path->localSocket());
  1627. }
  1628. void Bond::dumpInfo(const int64_t now)
  1629. {
  1630. char pathStr[128];
  1631. //char oldPathStr[128];
  1632. char currPathStr[128];
  1633. if (!relevant()) {
  1634. return;
  1635. }
  1636. /*
  1637. fprintf(stderr, "---[ bp=%d, id=%llx, dd=%d, up=%d, pmi=%d, specifiedSlaves=%d, _specifiedPrimarySlave=%d, _specifiedFailInst=%d ]\n",
  1638. _policy, _peer->identity().address().toInt(), _downDelay, _upDelay, _monitorInterval, _userHasSpecifiedSlaves, _userHasSpecifiedPrimarySlave, _userHasSpecifiedFailoverInstructions);
  1639. if (_bondingPolicy== ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  1640. fprintf(stderr, "Paths (bp=%d, stats=%d, primaryReselect=%d) :\n",
  1641. _policy, _shouldCollectPathStatistics, _abSlaveSelectMethod);
  1642. }
  1643. if (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_RR
  1644. || _bondingPolicy== ZT_BONDING_POLICY_BALANCE_XOR
  1645. || _bondingPolicy== ZT_BONDING_POLICY_BALANCE_AWARE) {
  1646. fprintf(stderr, "Paths (bp=%d, stats=%d, fh=%d) :\n",
  1647. _policy, _shouldCollectPathStatistics, _allowFlowHashing);
  1648. }*/
  1649. if ((now - _lastLogTS) < 1000) {
  1650. return;
  1651. }
  1652. _lastPrintTS = now;
  1653. _lastLogTS = now;
  1654. fprintf(stderr, "\n\n");
  1655. for(int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1656. if (_paths[i]) {
  1657. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  1658. _paths[i]->address().toString(pathStr);
  1659. fprintf(stderr, " %2d: lat=%8.3f, ac=%3d, fail%5s, fscore=%6d, in=%7d, out=%7d, age=%7ld, ack=%7ld, ref=%6d, ls=%llx",
  1660. i,
  1661. _paths[i]->_latencyMean,
  1662. _paths[i]->_allocation,
  1663. slave->failoverToSlave().c_str(),
  1664. _paths[i]->_failoverScore,
  1665. _paths[i]->_packetsIn,
  1666. _paths[i]->_packetsOut,
  1667. (long)_paths[i]->age(now),
  1668. (long)_paths[i]->ackAge(now),
  1669. _paths[i]->_refractoryPeriod,
  1670. _paths[i]->localSocket()
  1671. );
  1672. if (slave->spare()) {
  1673. fprintf(stderr, " SPR.");
  1674. } else {
  1675. fprintf(stderr, " ");
  1676. }
  1677. if (slave->primary()) {
  1678. fprintf(stderr, " PRIM.");
  1679. } else {
  1680. fprintf(stderr, " ");
  1681. }
  1682. if (_paths[i]->allowed()) {
  1683. fprintf(stderr, " ALL.");
  1684. } else {
  1685. fprintf(stderr, " ");
  1686. }
  1687. if (_paths[i]->eligible(now,_ackSendInterval)) {
  1688. fprintf(stderr, " ELI.");
  1689. } else {
  1690. fprintf(stderr, " ");
  1691. }
  1692. if (_paths[i]->preferred()) {
  1693. fprintf(stderr, " PREF.");
  1694. } else {
  1695. fprintf(stderr, " ");
  1696. }
  1697. if (_paths[i]->_negotiated) {
  1698. fprintf(stderr, " NEG.");
  1699. } else {
  1700. fprintf(stderr, " ");
  1701. }
  1702. if (_paths[i]->bonded()) {
  1703. fprintf(stderr, " BOND ");
  1704. } else {
  1705. fprintf(stderr, " ");
  1706. }
  1707. if (_bondingPolicy== ZT_BONDING_POLICY_ACTIVE_BACKUP && _abPath && (_abPath == _paths[i].ptr())) {
  1708. fprintf(stderr, " ACTIVE ");
  1709. } else if (_bondingPolicy== ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  1710. fprintf(stderr, " ");
  1711. }
  1712. if (_bondingPolicy== ZT_BONDING_POLICY_ACTIVE_BACKUP && _abFailoverQueue.size() && (_abFailoverQueue.front().ptr() == _paths[i].ptr())) {
  1713. fprintf(stderr, " NEXT ");
  1714. } else if (_bondingPolicy== ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  1715. fprintf(stderr, " ");
  1716. }
  1717. fprintf(stderr, "%5s %s\n", slave->ifname().c_str(), pathStr);
  1718. }
  1719. }
  1720. if (_bondingPolicy== ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  1721. if (!_abFailoverQueue.empty()) {
  1722. fprintf(stderr, "\nFailover Queue:\n");
  1723. for (std::list<SharedPtr<Path> >::iterator it(_abFailoverQueue.begin()); it!=_abFailoverQueue.end();++it) {
  1724. (*it)->address().toString(currPathStr);
  1725. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, (*it)->localSocket());
  1726. fprintf(stderr, "\t%8s\tspeed=%7d\trelSpeed=%3d\tipvPref=%3d\tfscore=%9d\t\t%s\n",
  1727. slave->ifname().c_str(),
  1728. slave->speed(),
  1729. slave->relativeSpeed(),
  1730. slave->ipvPref(),
  1731. (*it)->_failoverScore,
  1732. currPathStr);
  1733. }
  1734. }
  1735. else
  1736. {
  1737. fprintf(stderr, "\nFailover Queue size = %lu\n", _abFailoverQueue.size());
  1738. }
  1739. }
  1740. if (_bondingPolicy== ZT_BONDING_POLICY_BALANCE_RR
  1741. || _bondingPolicy== ZT_BONDING_POLICY_BALANCE_XOR
  1742. || _bondingPolicy== ZT_BONDING_POLICY_BALANCE_AWARE) {
  1743. /*
  1744. if (_numBondedPaths) {
  1745. fprintf(stderr, "\nBonded Paths:\n");
  1746. for (int i=0; i<_numBondedPaths; ++i) {
  1747. _paths[_bondedIdx[i]].p->address().toString(currPathStr);
  1748. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[_bondedIdx[i]].p->localSocket());
  1749. fprintf(stderr, " [%d]\t%8s\tflows=%3d\tspeed=%7d\trelSpeed=%3d\tipvPref=%3d\tfscore=%9d\t\t%s\n", i,
  1750. //fprintf(stderr, " [%d]\t%8s\tspeed=%7d\trelSpeed=%3d\tflowCount=%2d\tipvPref=%3d\tfscore=%9d\t\t%s\n", i,
  1751. slave->ifname().c_str(),
  1752. numberOfAssignedFlows(_paths[_bondedIdx[i]].p),
  1753. slave->speed(),
  1754. slave->relativeSpeed(),
  1755. //_paths[_bondedIdx[i]].p->assignedFlows.size(),
  1756. slave->ipvPref(),
  1757. _paths[_bondedIdx[i]].p->failoverScore(),
  1758. currPathStr);
  1759. }
  1760. }
  1761. */
  1762. /*
  1763. if (_allowFlowHashing) {
  1764. //Mutex::Lock _l(_flows_m);
  1765. if (_flows.size()) {
  1766. fprintf(stderr, "\nFlows:\n");
  1767. std::map<int32_t,SharedPtr<Flow> >::iterator it = _flows.begin();
  1768. while (it != _flows.end()) {
  1769. it->second->assignedPath()->address().toString(currPathStr);
  1770. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, it->second->assignedPath()->localSocket());
  1771. fprintf(stderr, " [%4x] in=%16llu, out=%16llu, bytes=%16llu, last=%16llu, if=%8s\t\t%s\n",
  1772. it->second->id(),
  1773. it->second->bytesInPerUnitTime(),
  1774. it->second->bytesOutPerUnitTime(),
  1775. it->second->totalBytes(),
  1776. it->second->age(now),
  1777. slave->ifname().c_str(),
  1778. currPathStr);
  1779. ++it;
  1780. }
  1781. }
  1782. }
  1783. */
  1784. }
  1785. //fprintf(stderr, "\n\n\n\n\n");
  1786. }
  1787. } // namespace ZeroTier