Bond.cpp 72 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885
  1. /*
  2. * Copyright (c)2013-2020 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2024-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #include <cmath>
  14. #include "Peer.hpp"
  15. #include "Bond.hpp"
  16. #include "Switch.hpp"
  17. #include "Flow.hpp"
  18. #include "Path.hpp"
  19. namespace ZeroTier {
  20. Bond::Bond(const RuntimeEnvironment *renv, int policy, const SharedPtr<Peer>& peer) :
  21. RR(renv),
  22. _peer(peer)
  23. {
  24. setReasonableDefaults(policy, SharedPtr<Bond>(), false);
  25. _policyAlias = BondController::getPolicyStrByCode(policy);
  26. }
  27. Bond::Bond(const RuntimeEnvironment *renv, std::string& basePolicy, std::string& policyAlias, const SharedPtr<Peer>& peer) :
  28. RR(renv),
  29. _policyAlias(policyAlias),
  30. _peer(peer)
  31. {
  32. setReasonableDefaults(BondController::getPolicyCodeByStr(basePolicy), SharedPtr<Bond>(), false);
  33. }
  34. Bond::Bond(const RuntimeEnvironment *renv, SharedPtr<Bond> originalBond, const SharedPtr<Peer>& peer) :
  35. RR(renv),
  36. _peer(peer)
  37. {
  38. setReasonableDefaults(originalBond->_bondingPolicy, originalBond, true);
  39. }
  40. void Bond::nominatePath(const SharedPtr<Path>& path, int64_t now)
  41. {
  42. char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "nominatePath: %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  43. Mutex::Lock _l(_paths_m);
  44. if (!RR->bc->slaveAllowed(_policyAlias, getSlave(path))) {
  45. return;
  46. }
  47. bool alreadyPresent = false;
  48. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  49. if (path.ptr() == _paths[i].ptr()) {
  50. fprintf(stderr, "previously encountered path, not notifying bond (%s)\n", pathStr);
  51. alreadyPresent = true;
  52. break;
  53. }
  54. }
  55. if (!alreadyPresent) {
  56. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  57. if (!_paths[i]) {
  58. fprintf(stderr, "notifyOfNewPath(): Setting path %s to idx=%d\n", pathStr, i);
  59. _paths[i] = path;
  60. //_paths[i]->slave = RR->bc->getSlaveBySocket(_policyAlias, path->localSocket());
  61. _paths[i]->startTrial(now);
  62. break;
  63. }
  64. }
  65. }
  66. curateBond(now, true);
  67. estimatePathQuality(now);
  68. }
  69. SharedPtr<Path> Bond::getAppropriatePath(int64_t now, int32_t flowId)
  70. {
  71. Mutex::Lock _l(_paths_m);
  72. /**
  73. * active-backup
  74. */
  75. if (_bondingPolicy == ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  76. if (_abPath) {
  77. return _abPath;
  78. }
  79. }
  80. /**
  81. * broadcast
  82. */
  83. if (_bondingPolicy == ZT_BONDING_POLICY_BROADCAST) {
  84. return SharedPtr<Path>(); // Handled in Switch::_trySend()
  85. }
  86. if (!_numBondedPaths) {
  87. return SharedPtr<Path>(); // No paths assigned to bond yet, cannot balance traffic
  88. }
  89. /**
  90. * balance-rr
  91. */
  92. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR) {
  93. if (!_allowFlowHashing) {
  94. //fprintf(stderr, "_rrPacketsSentOnCurrSlave=%d, _numBondedPaths=%d, _rrIdx=%d\n", _rrPacketsSentOnCurrSlave, _numBondedPaths, _rrIdx);
  95. if (_packetsPerSlave == 0) {
  96. // Randomly select a path
  97. return _paths[_bondedIdx[_freeRandomByte % _numBondedPaths]]; // TODO: Optimize
  98. }
  99. if (_rrPacketsSentOnCurrSlave < _packetsPerSlave) {
  100. // Continue to use this slave
  101. ++_rrPacketsSentOnCurrSlave;
  102. return _paths[_bondedIdx[_rrIdx]];
  103. }
  104. // Reset striping counter
  105. _rrPacketsSentOnCurrSlave = 0;
  106. if (_numBondedPaths == 1) {
  107. _rrIdx = 0;
  108. }
  109. else {
  110. int _tempIdx = _rrIdx;
  111. for (int searchCount = 0; searchCount < (_numBondedPaths-1); searchCount++) {
  112. _tempIdx = (_tempIdx == (_numBondedPaths-1)) ? 0 : _tempIdx+1;
  113. if (_bondedIdx[_tempIdx] != ZT_MAX_PEER_NETWORK_PATHS) {
  114. if (_paths[_bondedIdx[_tempIdx]] && _paths[_bondedIdx[_tempIdx]]->eligible(now,_ackSendInterval)) {
  115. _rrIdx = _tempIdx;
  116. break;
  117. }
  118. }
  119. }
  120. }
  121. if (_paths[_bondedIdx[_rrIdx]]) {
  122. return _paths[_bondedIdx[_rrIdx]];
  123. }
  124. }
  125. }
  126. /**
  127. * balance-xor
  128. */
  129. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  130. if (!_allowFlowHashing || flowId == -1) {
  131. // No specific path required for unclassified traffic, send on anything
  132. return _paths[_bondedIdx[_freeRandomByte % _numBondedPaths]]; // TODO: Optimize
  133. }
  134. else if (_allowFlowHashing) {
  135. // TODO: Optimize
  136. Mutex::Lock _l(_flows_m);
  137. SharedPtr<Flow> flow;
  138. if (_flows.count(flowId)) {
  139. flow = _flows[flowId];
  140. flow->updateActivity(now);
  141. }
  142. else {
  143. unsigned char entropy;
  144. Utils::getSecureRandom(&entropy, 1);
  145. flow = createFlow(SharedPtr<Path>(), flowId, entropy, now);
  146. }
  147. if (flow) {
  148. return flow->assignedPath();
  149. }
  150. }
  151. }
  152. return SharedPtr<Path>();
  153. }
  154. void Bond::recordIncomingInvalidPacket(const SharedPtr<Path>& path)
  155. {
  156. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "recordIncomingInvalidPacket() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  157. Mutex::Lock _l(_paths_m);
  158. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  159. if (_paths[i] == path) {
  160. _paths[i]->packetValiditySamples.push(false);
  161. }
  162. }
  163. }
  164. void Bond::recordOutgoingPacket(const SharedPtr<Path> &path, const uint64_t packetId,
  165. uint16_t payloadLength, const Packet::Verb verb, const int32_t flowId, int64_t now)
  166. {
  167. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "recordOutgoingPacket() %s %s, packetId=%llx, payloadLength=%d, verb=%x, flowId=%lx\n", getSlave(path)->ifname().c_str(), pathStr, packetId, payloadLength, verb, flowId);
  168. _freeRandomByte += (unsigned char)(packetId >> 8); // Grab entropy to use in path selection logic
  169. if (!_shouldCollectPathStatistics) {
  170. return;
  171. }
  172. bool isFrame = (verb == Packet::VERB_FRAME || verb == Packet::VERB_EXT_FRAME);
  173. bool shouldRecord = (packetId & (ZT_QOS_ACK_DIVISOR - 1)
  174. && (verb != Packet::VERB_ACK)
  175. && (verb != Packet::VERB_QOS_MEASUREMENT));
  176. if (isFrame || shouldRecord) {
  177. Mutex::Lock _l(_paths_m);
  178. if (isFrame) {
  179. ++(path->_packetsOut);
  180. _lastFrame=now;
  181. }
  182. if (shouldRecord) {
  183. path->_unackedBytes += payloadLength;
  184. // Take note that we're expecting a VERB_ACK on this path as of a specific time
  185. if (path->qosStatsOut.size() < ZT_QOS_MAX_OUTSTANDING_RECORDS) {
  186. path->qosStatsOut[packetId] = now;
  187. }
  188. }
  189. }
  190. if (_allowFlowHashing) {
  191. if (_allowFlowHashing && (flowId != ZT_QOS_NO_FLOW)) {
  192. Mutex::Lock _l(_flows_m);
  193. if (_flows.count(flowId)) {
  194. _flows[flowId]->recordOutgoingBytes(payloadLength);
  195. }
  196. }
  197. }
  198. }
  199. void Bond::recordIncomingPacket(const SharedPtr<Path>& path, uint64_t packetId, uint16_t payloadLength,
  200. Packet::Verb verb, int32_t flowId, int64_t now)
  201. {
  202. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "recordIncomingPacket() %s %s, packetId=%llx, payloadLength=%d, verb=%x, flowId=%lx\n", getSlave(path)->ifname().c_str(), pathStr, packetId, payloadLength, verb, flowId);
  203. bool isFrame = (verb == Packet::VERB_FRAME || verb == Packet::VERB_EXT_FRAME);
  204. bool shouldRecord = (packetId & (ZT_QOS_ACK_DIVISOR - 1)
  205. && (verb != Packet::VERB_ACK)
  206. && (verb != Packet::VERB_QOS_MEASUREMENT));
  207. if (isFrame || shouldRecord) {
  208. Mutex::Lock _l(_paths_m);
  209. if (isFrame) {
  210. ++(path->_packetsIn);
  211. _lastFrame=now;
  212. }
  213. if (shouldRecord) {
  214. path->ackStatsIn[packetId] = payloadLength;
  215. ++(path->_packetsReceivedSinceLastAck);
  216. path->qosStatsIn[packetId] = now;
  217. ++(path->_packetsReceivedSinceLastQoS);
  218. path->packetValiditySamples.push(true);
  219. }
  220. }
  221. /**
  222. * Learn new flows and pro-actively create entries for them in the bond so
  223. * that the next time we send a packet out that is part of a flow we know
  224. * which path to use.
  225. */
  226. if ((flowId != ZT_QOS_NO_FLOW)
  227. && (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR
  228. || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR
  229. || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE)) {
  230. Mutex::Lock _l(_flows_m);
  231. SharedPtr<Flow> flow;
  232. if (!_flows.count(flowId)) {
  233. flow = createFlow(path, flowId, 0, now);
  234. } else {
  235. flow = _flows[flowId];
  236. }
  237. if (flow) {
  238. flow->recordIncomingBytes(payloadLength);
  239. }
  240. }
  241. }
  242. void Bond::receivedQoS(const SharedPtr<Path>& path, int64_t now, int count, uint64_t *rx_id, uint16_t *rx_ts)
  243. {
  244. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "receivedQoS() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  245. Mutex::Lock _l(_paths_m);
  246. // Look up egress times and compute latency values for each record
  247. std::map<uint64_t,uint64_t>::iterator it;
  248. for (int j=0; j<count; j++) {
  249. it = path->qosStatsOut.find(rx_id[j]);
  250. if (it != path->qosStatsOut.end()) {
  251. path->latencySamples.push(((uint16_t)(now - it->second) - rx_ts[j]) / 2);
  252. path->qosStatsOut.erase(it);
  253. }
  254. }
  255. path->qosRecordSize.push(count);
  256. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "receivedQoS() on path %s %s, count=%d, successful=%d, qosStatsOut.size()=%d\n", getSlave(path)->ifname().c_str(), pathStr, count, path->aknowledgedQoSRecordCountSinceLastCheck, path->qosStatsOut.size());
  257. }
  258. void Bond::receivedAck(const SharedPtr<Path>& path, int64_t now, int32_t ackedBytes)
  259. {
  260. Mutex::Lock _l(_paths_m);
  261. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "receivedAck() %s %s, (ackedBytes=%d, lastAckReceived=%lld, ackAge=%lld)\n", getSlave(path)->ifname().c_str(), pathStr, ackedBytes, path->lastAckReceived, path->ackAge(now));
  262. path->_lastAckReceived = now;
  263. path->_unackedBytes = (ackedBytes > path->_unackedBytes) ? 0 : path->_unackedBytes - ackedBytes;
  264. int64_t timeSinceThroughputEstimate = (now - path->_lastThroughputEstimation);
  265. if (timeSinceThroughputEstimate >= throughputMeasurementInterval) {
  266. // TODO: See if this floating point math can be reduced
  267. uint64_t throughput = (uint64_t)((float)(path->_bytesAckedSinceLastThroughputEstimation) / ((float)timeSinceThroughputEstimate / (float)1000));
  268. throughput /= 1000;
  269. if (throughput > 0.0) {
  270. path->throughputSamples.push(throughput);
  271. path->_throughputMax = throughput > path->_throughputMax ? throughput : path->_throughputMax;
  272. }
  273. path->_lastThroughputEstimation = now;
  274. path->_bytesAckedSinceLastThroughputEstimation = 0;
  275. } else {
  276. path->_bytesAckedSinceLastThroughputEstimation += ackedBytes;
  277. }
  278. }
  279. int32_t Bond::generateQoSPacket(const SharedPtr<Path>& path, int64_t now, char *qosBuffer)
  280. {
  281. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "generateQoSPacket() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  282. int32_t len = 0;
  283. std::map<uint64_t,uint64_t>::iterator it = path->qosStatsIn.begin();
  284. int i=0;
  285. int numRecords = std::min(path->_packetsReceivedSinceLastQoS,ZT_QOS_TABLE_SIZE);
  286. while (i<numRecords && it != path->qosStatsIn.end()) {
  287. uint64_t id = it->first;
  288. memcpy(qosBuffer, &id, sizeof(uint64_t));
  289. qosBuffer+=sizeof(uint64_t);
  290. uint16_t holdingTime = (uint16_t)(now - it->second);
  291. memcpy(qosBuffer, &holdingTime, sizeof(uint16_t));
  292. qosBuffer+=sizeof(uint16_t);
  293. len+=sizeof(uint64_t)+sizeof(uint16_t);
  294. path->qosStatsIn.erase(it++);
  295. ++i;
  296. }
  297. return len;
  298. }
  299. bool Bond::assignFlowToBondedPath(SharedPtr<Flow> &flow, int64_t now)
  300. {
  301. //fprintf(stderr, "assignFlowToBondedPath\n");
  302. char curPathStr[128];
  303. unsigned int idx = ZT_MAX_PEER_NETWORK_PATHS;
  304. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR) {
  305. idx = abs((int)(flow->id() % (_numBondedPaths)));
  306. //fprintf(stderr, "flow->id()=%d, %x, _numBondedPaths=%d, idx=%d\n", flow->id(), flow->id(), _numBondedPaths, idx);
  307. flow->assignPath(_paths[_bondedIdx[idx]],now);
  308. }
  309. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  310. unsigned char entropy;
  311. Utils::getSecureRandom(&entropy, 1);
  312. if (_totalBondUnderload) {
  313. entropy %= _totalBondUnderload;
  314. }
  315. if (!_numBondedPaths) {
  316. fprintf(stderr, "no bonded paths for flow assignment\n");
  317. return false;
  318. }
  319. /* Since there may be scenarios where a path is removed before we can re-estimate
  320. relative qualities (and thus allocations) we need to down-modulate the entropy
  321. value that we use to randomly assign among the surviving paths, otherwise we risk
  322. not being able to find a path to assign this flow to. */
  323. int totalIncompleteAllocation = 0;
  324. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  325. if (_paths[i] && _paths[i]->bonded()) {
  326. totalIncompleteAllocation += _paths[i]->_allocation;
  327. }
  328. }
  329. fprintf(stderr, "entropy = %d, totalIncompleteAllocation=%d\n", entropy, totalIncompleteAllocation);
  330. entropy %= totalIncompleteAllocation;
  331. fprintf(stderr, "new entropy = %d\n", entropy);
  332. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  333. if (_paths[i] && _paths[i]->bonded()) {
  334. SharedPtr<Slave> slave = RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  335. _paths[i]->address().toString(curPathStr);
  336. uint8_t probabilitySegment = (_totalBondUnderload > 0) ? _paths[i]->_affinity : _paths[i]->_allocation;
  337. fprintf(stderr, "i=%2d, entropy=%3d, alloc=%3d, byteload=%4d, segment=%3d, _totalBondUnderload=%3d, ifname=%s, path=%20s\n", i, entropy, _paths[i]->_allocation, _paths[i]->_relativeByteLoad, probabilitySegment, _totalBondUnderload, slave->ifname().c_str(), curPathStr);
  338. if (entropy <= probabilitySegment) {
  339. idx = i;
  340. fprintf(stderr, "\t is best path\n");
  341. break;
  342. }
  343. entropy -= probabilitySegment;
  344. }
  345. }
  346. if (idx < ZT_MAX_PEER_NETWORK_PATHS) {
  347. flow->assignPath(_paths[idx],now);
  348. ++(_paths[idx]->_assignedFlowCount);
  349. }
  350. else {
  351. fprintf(stderr, "could not assign flow?\n"); exit(0); // TODO: Remove
  352. return false;
  353. }
  354. }
  355. flow->assignedPath()->address().toString(curPathStr);
  356. SharedPtr<Slave> slave = RR->bc->getSlaveBySocket(_policyAlias, flow->assignedPath()->localSocket());
  357. fprintf(stderr, "assigned (tx) flow %x with peer %llx to path %s on %s (idx=%d)\n", flow->id(), _peer->_id.address().toInt(), curPathStr, slave->ifname().c_str(), idx);
  358. return true;
  359. }
  360. SharedPtr<Flow> Bond::createFlow(const SharedPtr<Path> &path, int32_t flowId, unsigned char entropy, int64_t now)
  361. {
  362. //fprintf(stderr, "createFlow\n");
  363. char curPathStr[128];
  364. // ---
  365. if (!_numBondedPaths) {
  366. fprintf(stderr, "there are no bonded paths, cannot assign flow\n");
  367. return SharedPtr<Flow>();
  368. }
  369. if (_flows.size() >= ZT_FLOW_MAX_COUNT) {
  370. fprintf(stderr, "max number of flows reached (%d), forcibly forgetting oldest flow\n", ZT_FLOW_MAX_COUNT);
  371. forgetFlowsWhenNecessary(0,true,now);
  372. }
  373. SharedPtr<Flow> flow = new Flow(flowId, now);
  374. _flows[flowId] = flow;
  375. fprintf(stderr, "new flow %x detected with peer %llx, %lu active flow(s)\n", flowId, _peer->_id.address().toInt(), (_flows.size()));
  376. /**
  377. * Add a flow with a given Path already provided. This is the case when a packet
  378. * is received on a path but no flow exists, in this case we simply assign the path
  379. * that the remote peer chose for us.
  380. */
  381. if (path) {
  382. flow->assignPath(path,now);
  383. path->address().toString(curPathStr);
  384. SharedPtr<Slave> slave = RR->bc->getSlaveBySocket(_policyAlias, flow->assignedPath()->localSocket());
  385. fprintf(stderr, "assigned (rx) flow %x with peer %llx to path %s on %s\n", flow->id(), _peer->_id.address().toInt(), curPathStr, slave->ifname().c_str());
  386. }
  387. /**
  388. * Add a flow when no path was provided. This means that it is an outgoing packet
  389. * and that it is up to the local peer to decide how to load-balance its transmission.
  390. */
  391. else if (!path) {
  392. assignFlowToBondedPath(flow, now);
  393. }
  394. return flow;
  395. }
  396. void Bond::forgetFlowsWhenNecessary(uint64_t age, bool oldest, int64_t now)
  397. {
  398. //fprintf(stderr, "forgetFlowsWhenNecessary\n");
  399. std::map<int32_t,SharedPtr<Flow> >::iterator it = _flows.begin();
  400. std::map<int32_t,SharedPtr<Flow> >::iterator oldestFlow = _flows.end();
  401. SharedPtr<Flow> expiredFlow;
  402. if (age) { // Remove by specific age
  403. while (it != _flows.end()) {
  404. if (it->second->age(now) > age) {
  405. fprintf(stderr, "forgetting flow %x between this node and %llx, %lu active flow(s)\n", it->first, _peer->_id.address().toInt(), (_flows.size()-1));
  406. it->second->assignedPath()->_assignedFlowCount--;
  407. it = _flows.erase(it);
  408. } else {
  409. ++it;
  410. }
  411. }
  412. }
  413. else if (oldest) { // Remove single oldest by natural expiration
  414. uint64_t maxAge = 0;
  415. while (it != _flows.end()) {
  416. if (it->second->age(now) > maxAge) {
  417. maxAge = (now - it->second->age(now));
  418. oldestFlow = it;
  419. }
  420. ++it;
  421. }
  422. if (oldestFlow != _flows.end()) {
  423. fprintf(stderr, "forgetting oldest flow %x (of age %llu) between this node and %llx, %lu active flow(s)\n", oldestFlow->first, oldestFlow->second->age(now), _peer->_id.address().toInt(), (_flows.size()-1));
  424. oldestFlow->second->assignedPath()->_assignedFlowCount--;
  425. _flows.erase(oldestFlow);
  426. }
  427. }
  428. }
  429. void Bond::processIncomingPathNegotiationRequest(uint64_t now, SharedPtr<Path> &path, int16_t remoteUtility)
  430. {
  431. //fprintf(stderr, "processIncomingPathNegotiationRequest\n");
  432. if (_abSlaveSelectMethod != ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  433. return;
  434. }
  435. Mutex::Lock _l(_paths_m);
  436. char pathStr[128];
  437. path->address().toString(pathStr);
  438. if (!_lastPathNegotiationCheck) {
  439. return;
  440. }
  441. SharedPtr<Slave> slave = RR->bc->getSlaveBySocket(_policyAlias, path->localSocket());
  442. if (remoteUtility > _localUtility) {
  443. fprintf(stderr, "peer suggests path, its utility (%d) is greater than ours (%d), we will switch to %s on %s (ls=%llx)\n", remoteUtility, _localUtility, pathStr, slave->ifname().c_str(), path->localSocket());
  444. negotiatedPath = path;
  445. }
  446. if (remoteUtility < _localUtility) {
  447. fprintf(stderr, "peer suggests path, its utility (%d) is less than ours (%d), we will NOT switch to %s on %s (ls=%llx)\n", remoteUtility, _localUtility, pathStr, slave->ifname().c_str(), path->localSocket());
  448. }
  449. if (remoteUtility == _localUtility) {
  450. fprintf(stderr, "peer suggest path, but utility is equal, picking choice made by peer with greater identity.\n");
  451. if (_peer->_id.address().toInt() > RR->node->identity().address().toInt()) {
  452. fprintf(stderr, "peer identity was greater, going with their choice of %s on %s (ls=%llx)\n", pathStr, slave->ifname().c_str(), path->localSocket());
  453. negotiatedPath = path;
  454. } else {
  455. fprintf(stderr, "our identity was greater, no change\n");
  456. }
  457. }
  458. }
  459. void Bond::pathNegotiationCheck(void *tPtr, const int64_t now)
  460. {
  461. //fprintf(stderr, "pathNegotiationCheck\n");
  462. char pathStr[128];
  463. int maxInPathIdx = ZT_MAX_PEER_NETWORK_PATHS;
  464. int maxOutPathIdx = ZT_MAX_PEER_NETWORK_PATHS;
  465. uint64_t maxInCount = 0;
  466. uint64_t maxOutCount = 0;
  467. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  468. if (!_paths[i]) {
  469. continue;
  470. }
  471. if (_paths[i]->_packetsIn > maxInCount) {
  472. maxInCount = _paths[i]->_packetsIn;
  473. maxInPathIdx = i;
  474. }
  475. if (_paths[i]->_packetsOut > maxOutCount) {
  476. maxOutCount = _paths[i]->_packetsOut;
  477. maxOutPathIdx = i;
  478. }
  479. _paths[i]->resetPacketCounts();
  480. }
  481. bool _peerLinksSynchronized = ((maxInPathIdx != ZT_MAX_PEER_NETWORK_PATHS)
  482. && (maxOutPathIdx != ZT_MAX_PEER_NETWORK_PATHS)
  483. && (maxInPathIdx != maxOutPathIdx)) ? false : true;
  484. /**
  485. * Determine utility and attempt to petition remote peer to switch to our chosen path
  486. */
  487. if (!_peerLinksSynchronized) {
  488. _localUtility = _paths[maxOutPathIdx]->_failoverScore - _paths[maxInPathIdx]->_failoverScore;
  489. if (_paths[maxOutPathIdx]->_negotiated) {
  490. _localUtility -= ZT_MULTIPATH_FAILOVER_HANDICAP_NEGOTIATED;
  491. }
  492. if ((now - _lastSentPathNegotiationRequest) > ZT_PATH_NEGOTIATION_CUTOFF_TIME) {
  493. fprintf(stderr, "BT: (sync) it's been long enough, sending more requests.\n");
  494. _numSentPathNegotiationRequests = 0;
  495. }
  496. if (_numSentPathNegotiationRequests < ZT_PATH_NEGOTIATION_TRY_COUNT) {
  497. if (_localUtility >= 0) {
  498. fprintf(stderr, "BT: (sync) paths appear to be out of sync (utility=%d)\n", _localUtility);
  499. sendPATH_NEGOTIATION_REQUEST(tPtr, _paths[maxOutPathIdx]);
  500. ++_numSentPathNegotiationRequests;
  501. _lastSentPathNegotiationRequest = now;
  502. _paths[maxOutPathIdx]->address().toString(pathStr);
  503. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[maxOutPathIdx]->localSocket());
  504. fprintf(stderr, "sending request to use %s on %s, ls=%llx, utility=%d\n", pathStr, slave->ifname().c_str(), _paths[maxOutPathIdx]->localSocket(), _localUtility);
  505. }
  506. }
  507. /**
  508. * Give up negotiating and consider switching
  509. */
  510. else if ((now - _lastSentPathNegotiationRequest) > (2 * ZT_PATH_NEGOTIATION_CHECK_INTERVAL)) {
  511. if (_localUtility == 0) {
  512. // There's no loss to us, just switch without sending a another request
  513. fprintf(stderr, "BT: (sync) giving up, switching to remote peer's path.\n");
  514. negotiatedPath = _paths[maxInPathIdx];
  515. }
  516. }
  517. }
  518. }
  519. void Bond::sendPATH_NEGOTIATION_REQUEST(void *tPtr, const SharedPtr<Path> &path)
  520. {
  521. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "sendPATH_NEGOTIATION_REQUEST() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  522. if (_abSlaveSelectMethod != ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  523. return;
  524. }
  525. Packet outp(_peer->_id.address(),RR->identity.address(),Packet::VERB_PATH_NEGOTIATION_REQUEST);
  526. outp.append<int16_t>(_localUtility);
  527. if (path->address()) {
  528. outp.armor(_peer->key(),false);
  529. RR->node->putPacket(tPtr,path->localSocket(),path->address(),outp.data(),outp.size());
  530. }
  531. }
  532. void Bond::sendACK(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,
  533. const InetAddress &atAddress,int64_t now)
  534. {
  535. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "sendACK() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  536. Packet outp(_peer->_id.address(),RR->identity.address(),Packet::VERB_ACK);
  537. int32_t bytesToAck = 0;
  538. std::map<uint64_t,uint16_t>::iterator it = path->ackStatsIn.begin();
  539. while (it != path->ackStatsIn.end()) {
  540. bytesToAck += it->second;
  541. ++it;
  542. }
  543. outp.append<uint32_t>(bytesToAck);
  544. if (atAddress) {
  545. outp.armor(_peer->key(),false);
  546. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  547. } else {
  548. RR->sw->send(tPtr,outp,false);
  549. }
  550. path->ackStatsIn.clear();
  551. path->_packetsReceivedSinceLastAck = 0;
  552. path->_lastAckSent = now;
  553. }
  554. void Bond::sendQOS_MEASUREMENT(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,
  555. const InetAddress &atAddress,int64_t now)
  556. {
  557. //char pathStr[128];path->address().toString(pathStr);fprintf(stderr, "sendQOS() %s %s\n", getSlave(path)->ifname().c_str(), pathStr);
  558. const int64_t _now = RR->node->now();
  559. Packet outp(_peer->_id.address(),RR->identity.address(),Packet::VERB_QOS_MEASUREMENT);
  560. char qosData[ZT_QOS_MAX_PACKET_SIZE];
  561. int16_t len = generateQoSPacket(path, _now,qosData);
  562. outp.append(qosData,len);
  563. if (atAddress) {
  564. outp.armor(_peer->key(),false);
  565. RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
  566. } else {
  567. RR->sw->send(tPtr,outp,false);
  568. }
  569. // Account for the fact that a VERB_QOS_MEASUREMENT was just sent. Reset timers.
  570. path->_packetsReceivedSinceLastQoS = 0;
  571. path->_lastQoSMeasurement = now;
  572. }
  573. void Bond::processBackgroundTasks(void *tPtr, const int64_t now)
  574. {
  575. Mutex::Lock _l(_paths_m);
  576. if (!_peer->_canUseMultipath || (now - _lastBackgroundTaskCheck) < ZT_BOND_BACKGROUND_TASK_MIN_INTERVAL) {
  577. return;
  578. }
  579. _lastBackgroundTaskCheck = now;
  580. // Compute dynamic path monitor timer interval
  581. if (_slaveMonitorStrategy == ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC) {
  582. int suggestedMonitorInterval = (now - _lastFrame) / 100;
  583. _dynamicPathMonitorInterval = std::min(ZT_PATH_HEARTBEAT_PERIOD, ((suggestedMonitorInterval > _bondMonitorInterval) ? suggestedMonitorInterval : _bondMonitorInterval));
  584. //fprintf(stderr, "_lastFrame=%llu, suggestedMonitorInterval=%d, _dynamicPathMonitorInterval=%d\n",
  585. // (now-_lastFrame), suggestedMonitorInterval, _dynamicPathMonitorInterval);
  586. }
  587. // TODO: Clarify and generalize this logic
  588. if (_slaveMonitorStrategy == ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC) {
  589. _shouldCollectPathStatistics = true;
  590. }
  591. // Memoize oft-used properties in the packet ingress/egress logic path
  592. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  593. // Required for real-time balancing
  594. _shouldCollectPathStatistics = true;
  595. }
  596. if (_bondingPolicy == ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  597. if (_abSlaveSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_BETTER) {
  598. // Required for judging suitability of primary slave after recovery
  599. _shouldCollectPathStatistics = true;
  600. }
  601. if (_abSlaveSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  602. // Required for judging suitability of new candidate primary
  603. _shouldCollectPathStatistics = true;
  604. }
  605. }
  606. if ((now - _lastCheckUserPreferences) > 1000) {
  607. _lastCheckUserPreferences = now;
  608. applyUserPrefs();
  609. }
  610. curateBond(now,false);
  611. if ((now - _lastQualityEstimation) > _qualityEstimationInterval) {
  612. _lastQualityEstimation = now;
  613. estimatePathQuality(now);
  614. }
  615. dumpInfo(now);
  616. // Send QOS/ACK packets as needed
  617. if (_shouldCollectPathStatistics) {
  618. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  619. if (_paths[i] && _paths[i]->allowed()) {
  620. if (_paths[i]->needsToSendQoS(now,_qosSendInterval)) {
  621. sendQOS_MEASUREMENT(tPtr, _paths[i], _paths[i]->localSocket(), _paths[i]->address(), now);
  622. }
  623. if (_paths[i]->needsToSendAck(now,_ackSendInterval)) {
  624. sendACK(tPtr, _paths[i], _paths[i]->localSocket(), _paths[i]->address(), now);
  625. }
  626. }
  627. }
  628. }
  629. // Perform periodic background tasks unique to each bonding policy
  630. switch (_bondingPolicy)
  631. {
  632. case ZT_BONDING_POLICY_ACTIVE_BACKUP:
  633. processActiveBackupTasks(now);
  634. break;
  635. case ZT_BONDING_POLICY_BROADCAST:
  636. break;
  637. case ZT_BONDING_POLICY_BALANCE_RR:
  638. case ZT_BONDING_POLICY_BALANCE_XOR:
  639. case ZT_BONDING_POLICY_BALANCE_AWARE:
  640. processBalanceTasks(now);
  641. break;
  642. default:
  643. break;
  644. }
  645. // Check whether or not a path negotiation needs to be performed
  646. if (((now - _lastPathNegotiationCheck) > ZT_PATH_NEGOTIATION_CHECK_INTERVAL) && _allowPathNegotiation) {
  647. _lastPathNegotiationCheck = now;
  648. pathNegotiationCheck(tPtr, now);
  649. }
  650. }
  651. void Bond::applyUserPrefs()
  652. {
  653. //fprintf(stderr, "applyUserPrefs, _minReqPathMonitorInterval=%d\n", RR->bc->minReqPathMonitorInterval());
  654. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  655. if (!_paths[i]) {
  656. continue;
  657. }
  658. SharedPtr<Slave> sl = getSlave(_paths[i]);
  659. if (sl) {
  660. if (sl->monitorInterval() == 0) { // If no interval was specified for this slave, use more generic bond-wide interval
  661. sl->setMonitorInterval(_bondMonitorInterval);
  662. }
  663. RR->bc->setMinReqPathMonitorInterval((sl->monitorInterval() < RR->bc->minReqPathMonitorInterval()) ? sl->monitorInterval() : RR->bc->minReqPathMonitorInterval());
  664. bool bFoundCommonSlave = false;
  665. SharedPtr<Slave> commonSlave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  666. for(unsigned int j=0;j<ZT_MAX_PEER_NETWORK_PATHS;++j) {
  667. if (_paths[j] && _paths[j].ptr() != _paths[i].ptr()) {
  668. if (RR->bc->getSlaveBySocket(_policyAlias, _paths[j]->localSocket()) == commonSlave) {
  669. bFoundCommonSlave = true;
  670. }
  671. }
  672. }
  673. _paths[i]->_monitorInterval = sl->monitorInterval();
  674. _paths[i]->_upDelay = sl->upDelay() ? sl->upDelay() : _upDelay;
  675. _paths[i]->_downDelay = sl->downDelay() ? sl->downDelay() : _downDelay;
  676. _paths[i]->_ipvPref = sl->ipvPref();
  677. _paths[i]->_mode = sl->mode();
  678. _paths[i]->_enabled = sl->enabled();
  679. _paths[i]->_onlyPathOnSlave = !bFoundCommonSlave;
  680. }
  681. }
  682. if (_peer) {
  683. _peer->_shouldCollectPathStatistics = _shouldCollectPathStatistics;
  684. _peer->_bondingPolicy = _bondingPolicy;
  685. }
  686. }
  687. void Bond::curateBond(const int64_t now, bool rebuildBond)
  688. {
  689. //fprintf(stderr, "%lu curateBond (rebuildBond=%d), _numBondedPaths=%d\n", ((now - RR->bc->getBondStartTime())), rebuildBond, _numBondedPaths);
  690. char pathStr[128];
  691. /**
  692. * Update path states
  693. */
  694. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  695. if (!_paths[i]) {
  696. continue;
  697. }
  698. bool currEligibility = _paths[i]->eligible(now,_ackSendInterval);
  699. //_paths[i]->address().toString(pathStr);
  700. //fprintf(stderr, "\n\n%ld path eligibility (for %s, %s):\n", (RR->node->now() - RR->bc->getBondStartTime()), getSlave(_paths[i])->ifname().c_str(), pathStr);
  701. //_paths[i]->printEligible(now,_ackSendInterval);
  702. if (currEligibility != _paths[i]->_lastEligibilityState) {
  703. _paths[i]->address().toString(pathStr);
  704. //fprintf(stderr, "\n\n%ld path eligibility (for %s, %s) has changed (from %d to %d)\n", (RR->node->now() - RR->bc->getBondStartTime()), getSlave(_paths[i])->ifname().c_str(), pathStr, _paths[i]->lastCheckedEligibility, _paths[i]->eligible(now,_ackSendInterval));
  705. if (currEligibility) {
  706. rebuildBond = true;
  707. }
  708. if (!currEligibility) {
  709. _paths[i]->adjustRefractoryPeriod(now, _defaultPathRefractoryPeriod, !currEligibility);
  710. if (_paths[i]->bonded()) {
  711. //fprintf(stderr, "the path was bonded, reallocation of its flows will occur soon\n");
  712. rebuildBond = true;
  713. _paths[i]->_shouldReallocateFlows = _paths[i]->bonded();
  714. _paths[i]->setBonded(false);
  715. } else {
  716. //fprintf(stderr, "the path was not bonded, no consequences\n");
  717. }
  718. }
  719. }
  720. if (currEligibility) {
  721. _paths[i]->adjustRefractoryPeriod(now, _defaultPathRefractoryPeriod, false);
  722. }
  723. _paths[i]->_lastEligibilityState = currEligibility;
  724. }
  725. /**
  726. * Curate the set of paths that are part of the bond proper. Selects a single path
  727. * per logical slave according to eligibility and user-specified constraints.
  728. */
  729. if ((_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR)
  730. || (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR)
  731. || (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE)) {
  732. if (!_numBondedPaths) {
  733. rebuildBond = true;
  734. }
  735. // TODO: Optimize
  736. if (rebuildBond) {
  737. int updatedBondedPathCount = 0;
  738. std::map<SharedPtr<Slave>,int> slaveMap;
  739. for (int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  740. if (_paths[i] && _paths[i]->allowed() && (_paths[i]->eligible(now,_ackSendInterval) || !_numBondedPaths)) {
  741. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  742. if (!slaveMap.count(slave)) {
  743. slaveMap[slave] = i;
  744. }
  745. else {
  746. bool overriden = false;
  747. _paths[i]->address().toString(pathStr);
  748. //fprintf(stderr, " slave representative path already exists! (%s %s)\n", getSlave(_paths[i])->ifname().c_str(), pathStr);
  749. if (_paths[i]->preferred() && !_paths[slaveMap[slave]]->preferred()) {
  750. // Override previous choice if preferred
  751. //fprintf(stderr, "overriding since its preferred!\n");
  752. if (_paths[slaveMap[slave]]->_assignedFlowCount) {
  753. _paths[slaveMap[slave]]->_deprecated = true;
  754. }
  755. else {
  756. _paths[slaveMap[slave]]->_deprecated = true;
  757. _paths[slaveMap[slave]]->setBonded(false);
  758. }
  759. slaveMap[slave] = i;
  760. overriden = true;
  761. }
  762. if ((_paths[i]->preferred() && _paths[slaveMap[slave]]->preferred())
  763. || (!_paths[i]->preferred() && !_paths[slaveMap[slave]]->preferred())) {
  764. if (_paths[i]->preferenceRank() > _paths[slaveMap[slave]]->preferenceRank()) {
  765. // Override if higher preference
  766. //fprintf(stderr, "overriding according to preference preferenceRank!\n");
  767. if (_paths[slaveMap[slave]]->_assignedFlowCount) {
  768. _paths[slaveMap[slave]]->_deprecated = true;
  769. }
  770. else {
  771. _paths[slaveMap[slave]]->_deprecated = true;
  772. _paths[slaveMap[slave]]->setBonded(false);
  773. }
  774. slaveMap[slave] = i;
  775. }
  776. }
  777. }
  778. }
  779. }
  780. std::map<SharedPtr<Slave>,int>::iterator it = slaveMap.begin();
  781. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  782. if (!_paths[i]) {
  783. continue;
  784. }
  785. _bondedIdx[i] = ZT_MAX_PEER_NETWORK_PATHS;
  786. if (it != slaveMap.end()) {
  787. _bondedIdx[i] = it->second;
  788. _paths[_bondedIdx[i]]->setBonded(true);
  789. ++it;
  790. ++updatedBondedPathCount;
  791. _paths[_bondedIdx[i]]->address().toString(pathStr);
  792. fprintf(stderr, "setting i=%d, _bondedIdx[%d]=%d to bonded (%s %s)\n", i, i, _bondedIdx[i], getSlave(_paths[_bondedIdx[i]])->ifname().c_str(), pathStr);
  793. }
  794. }
  795. _numBondedPaths = updatedBondedPathCount;
  796. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR) {
  797. // Cause a RR reset since the currently used index might no longer be valid
  798. _rrPacketsSentOnCurrSlave = _packetsPerSlave;
  799. }
  800. }
  801. }
  802. }
  803. void Bond::estimatePathQuality(const int64_t now)
  804. {
  805. char pathStr[128];
  806. //---
  807. uint32_t totUserSpecifiedSlaveSpeed = 0;
  808. if (_numBondedPaths) { // Compute relative user-specified speeds of slaves
  809. for(unsigned int i=0;i<_numBondedPaths;++i) {
  810. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  811. if (_paths[i] && _paths[i]->allowed()) {
  812. totUserSpecifiedSlaveSpeed += slave->speed();
  813. }
  814. }
  815. for(unsigned int i=0;i<_numBondedPaths;++i) {
  816. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  817. if (_paths[i] && _paths[i]->allowed()) {
  818. slave->setRelativeSpeed(round( ((float)slave->speed() / (float)totUserSpecifiedSlaveSpeed) * 255));
  819. }
  820. }
  821. }
  822. float lat[ZT_MAX_PEER_NETWORK_PATHS];
  823. float pdv[ZT_MAX_PEER_NETWORK_PATHS];
  824. float plr[ZT_MAX_PEER_NETWORK_PATHS];
  825. float per[ZT_MAX_PEER_NETWORK_PATHS];
  826. float thr[ZT_MAX_PEER_NETWORK_PATHS];
  827. float thm[ZT_MAX_PEER_NETWORK_PATHS];
  828. float thv[ZT_MAX_PEER_NETWORK_PATHS];
  829. float maxLAT = 0;
  830. float maxPDV = 0;
  831. float maxPLR = 0;
  832. float maxPER = 0;
  833. float maxTHR = 0;
  834. float maxTHM = 0;
  835. float maxTHV = 0;
  836. float quality[ZT_MAX_PEER_NETWORK_PATHS];
  837. uint8_t alloc[ZT_MAX_PEER_NETWORK_PATHS];
  838. float totQuality = 0.0f;
  839. memset(&lat, 0, sizeof(lat));
  840. memset(&pdv, 0, sizeof(pdv));
  841. memset(&plr, 0, sizeof(plr));
  842. memset(&per, 0, sizeof(per));
  843. memset(&thr, 0, sizeof(thr));
  844. memset(&thm, 0, sizeof(thm));
  845. memset(&thv, 0, sizeof(thv));
  846. memset(&quality, 0, sizeof(quality));
  847. memset(&alloc, 0, sizeof(alloc));
  848. // Compute initial summary statistics
  849. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  850. if (!_paths[i] || !_paths[i]->allowed()) {
  851. continue;
  852. }
  853. // Compute/Smooth average of real-world observations
  854. _paths[i]->_latencyMean = _paths[i]->latencySamples.mean();
  855. _paths[i]->_latencyVariance = _paths[i]->latencySamples.stddev();
  856. _paths[i]->_packetErrorRatio = 1.0 - (_paths[i]->packetValiditySamples.count() ? _paths[i]->packetValiditySamples.mean() : 1.0);
  857. if (userHasSpecifiedSlaveSpeeds()) {
  858. // Use user-reported metrics
  859. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  860. if (slave) {
  861. _paths[i]->_throughputMean = slave->speed();
  862. _paths[i]->_throughputVariance = 0;
  863. }
  864. }
  865. /*
  866. else {
  867. // Use estimated metrics
  868. if (_paths[i]->throughputSamples.count()) {
  869. // If we have samples, use them
  870. _paths[i]->throughputMean = (uint64_t)_paths[i]->throughputSamples.mean();
  871. if (_paths[i]->throughputMean > 0) {
  872. _paths[i]->throughputVarianceSamples.push((float)_paths[i]->throughputSamples.stddev() / (float)_paths[i]->throughputMean);
  873. _paths[i]->throughputVariance = _paths[i]->throughputVarianceSamples.mean();
  874. }
  875. }
  876. else {
  877. // No samples have been collected yet, assume best case scenario
  878. _paths[i]->throughputMean = ZT_QOS_THR_NORM_MAX;
  879. _paths[i]->throughputVariance = 0;
  880. }
  881. }
  882. */
  883. // Drain unacknowledged QoS records
  884. std::map<uint64_t,uint64_t>::iterator it = _paths[i]->qosStatsOut.begin();
  885. uint64_t currentLostRecords = 0;
  886. while (it != _paths[i]->qosStatsOut.end()) {
  887. int qosRecordTimeout = 5000; //_paths[i]->monitorInterval() * ZT_MULTIPATH_QOS_ACK_INTERVAL_MULTIPLIER * 8;
  888. if ((now - it->second) >= qosRecordTimeout) {
  889. //fprintf(stderr, "packetId=%llx was lost\n", it->first);
  890. it = _paths[i]->qosStatsOut.erase(it);
  891. ++currentLostRecords;
  892. } else { ++it; }
  893. }
  894. quality[i]=0;
  895. totQuality=0;
  896. // Normalize raw observations according to sane limits and/or user specified values
  897. lat[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_latencyMean, 0, _maxAcceptableLatency, 0, 1));
  898. pdv[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_latencyVariance, 0, _maxAcceptablePacketDelayVariance, 0, 1));
  899. plr[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_packetLossRatio, 0, _maxAcceptablePacketLossRatio, 0, 1));
  900. per[i] = 1.0 / expf(4*Utils::normalize(_paths[i]->_packetErrorRatio, 0, _maxAcceptablePacketErrorRatio, 0, 1));
  901. //thr[i] = 1.0; //Utils::normalize(_paths[i]->throughputMean, 0, ZT_QOS_THR_NORM_MAX, 0, 1);
  902. //thm[i] = 1.0; //Utils::normalize(_paths[i]->throughputMax, 0, ZT_QOS_THM_NORM_MAX, 0, 1);
  903. //thv[i] = 1.0; //1.0 / expf(4*Utils::normalize(_paths[i]->throughputVariance, 0, ZT_QOS_THV_NORM_MAX, 0, 1));
  904. //scp[i] = _paths[i]->ipvPref != 0 ? 1.0 : Utils::normalize(_paths[i]->ipScope(), InetAddress::IP_SCOPE_NONE, InetAddress::IP_SCOPE_PRIVATE, 0, 1);
  905. // Record bond-wide maximums to determine relative values
  906. maxLAT = lat[i] > maxLAT ? lat[i] : maxLAT;
  907. maxPDV = pdv[i] > maxPDV ? pdv[i] : maxPDV;
  908. maxPLR = plr[i] > maxPLR ? plr[i] : maxPLR;
  909. maxPER = per[i] > maxPER ? per[i] : maxPER;
  910. //maxTHR = thr[i] > maxTHR ? thr[i] : maxTHR;
  911. //maxTHM = thm[i] > maxTHM ? thm[i] : maxTHM;
  912. //maxTHV = thv[i] > maxTHV ? thv[i] : maxTHV;
  913. //fprintf(stdout, "EH %d: lat=%8.3f, ltm=%8.3f, pdv=%8.3f, plr=%5.3f, per=%5.3f, thr=%8f, thm=%5.3f, thv=%5.3f, avl=%5.3f, age=%8.2f, scp=%4d, q=%5.3f, qtot=%5.3f, ac=%d if=%s, path=%s\n",
  914. // i, lat[i], ltm[i], pdv[i], plr[i], per[i], thr[i], thm[i], thv[i], avl[i], age[i], scp[i], quality[i], totQuality, alloc[i], getSlave(_paths[i])->ifname().c_str(), pathStr);
  915. }
  916. // Convert metrics to relative quantities and apply contribution weights
  917. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  918. if (_paths[i] && _paths[i]->bonded()) {
  919. quality[i] += ((maxLAT > 0.0f ? lat[i] / maxLAT : 0.0f) * _qualityWeights[ZT_QOS_LAT_IDX]);
  920. quality[i] += ((maxPDV > 0.0f ? pdv[i] / maxPDV : 0.0f) * _qualityWeights[ZT_QOS_PDV_IDX]);
  921. quality[i] += ((maxPLR > 0.0f ? plr[i] / maxPLR : 0.0f) * _qualityWeights[ZT_QOS_PLR_IDX]);
  922. quality[i] += ((maxPER > 0.0f ? per[i] / maxPER : 0.0f) * _qualityWeights[ZT_QOS_PER_IDX]);
  923. //quality[i] += ((maxTHR > 0.0f ? thr[i] / maxTHR : 0.0f) * _qualityWeights[ZT_QOS_THR_IDX]);
  924. //quality[i] += ((maxTHM > 0.0f ? thm[i] / maxTHM : 0.0f) * _qualityWeights[ZT_QOS_THM_IDX]);
  925. //quality[i] += ((maxTHV > 0.0f ? thv[i] / maxTHV : 0.0f) * _qualityWeights[ZT_QOS_THV_IDX]);
  926. //quality[i] += (scp[i] * _qualityWeights[ZT_QOS_SCP_IDX]);
  927. totQuality += quality[i];
  928. }
  929. }
  930. //
  931. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  932. if (_paths[i] && _paths[i]->bonded()) {
  933. alloc[i] = std::ceil((quality[i] / totQuality) * (float)255);
  934. _paths[i]->_allocation = alloc[i];
  935. }
  936. }
  937. if ((now - _lastLogTS) > 500) {
  938. if (!relevant()) {return;}
  939. //fprintf(stderr, "\n");
  940. _lastLogTS = now;
  941. int numPlottablePaths=0;
  942. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  943. if (_paths[i]) {
  944. ++numPlottablePaths;
  945. _paths[i]->address().toString(pathStr);
  946. //fprintf(stderr, "%lu FIN [%d/%d]: pmi=%5d, lat=%4.3f, ltm=%4.3f, pdv=%4.3f, plr=%4.3f, per=%4.3f, thr=%4.3f, thm=%4.3f, thv=%4.3f, age=%4.3f, scp=%4d, q=%4.3f, qtot=%4.3f, ac=%4d, asf=%3d, if=%s, path=%20s, bond=%d, qosout=%d, plrraw=%d\n",
  947. // ((now - RR->bc->getBondStartTime())), i, _numBondedPaths, _paths[i]->monitorInterval,
  948. // lat[i], ltm[i], pdv[i], plr[i], per[i], thr[i], thm[i], thv[i], age[i], scp[i],
  949. // quality[i], totQuality, alloc[i], _paths[i]->assignedFlowCount, getSlave(_paths[i])->ifname().c_str(), pathStr, _paths[i]->bonded(), _paths[i]->qosStatsOut.size(), _paths[i]->packetLossRatio);
  950. }
  951. }
  952. if (numPlottablePaths < 2) {
  953. return;
  954. }
  955. if (!_header) {
  956. fprintf(stdout, "now, bonded, relativeUnderload, flows, ");
  957. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  958. if (_paths[i]) {
  959. _paths[i]->address().toString(pathStr);
  960. std::string label = std::string((pathStr)) + " " + getSlave(_paths[i])->ifname();
  961. for (int i=0; i<19; ++i) {
  962. fprintf(stdout, "%s, ", label.c_str());
  963. }
  964. }
  965. }
  966. _header=true;
  967. }
  968. fprintf(stdout, "%ld, %d, %d, %d, ",((now - RR->bc->getBondStartTime())),_numBondedPaths,_totalBondUnderload, _flows.size());
  969. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  970. if (_paths[i]) {
  971. _paths[i]->address().toString(pathStr);
  972. fprintf(stdout, "%s, %s, %8.3f, %8.3f, %8.3f, %5.3f, %5.3f, %5.3f, %8f, %5.3f, %5.3f, %d, %5.3f, %d, %d, %d, %d, %d, %d, ",
  973. getSlave(_paths[i])->ifname().c_str(), pathStr, _paths[i]->_latencyMean, lat[i],pdv[i], _paths[i]->_packetLossRatio, plr[i],per[i],thr[i],thm[i],thv[i],(now - _paths[i]->lastIn()),quality[i],alloc[i],
  974. _paths[i]->_relativeByteLoad, _paths[i]->_assignedFlowCount, _paths[i]->alive(now, true), _paths[i]->eligible(now,_ackSendInterval), _paths[i]->qosStatsOut.size());
  975. }
  976. }
  977. fprintf(stdout, "\n");
  978. }
  979. }
  980. void Bond::processBalanceTasks(const int64_t now)
  981. {
  982. //fprintf(stderr, "processBalanceTasks\n");
  983. char curPathStr[128];
  984. if (_allowFlowHashing) {
  985. /**
  986. * Clean up and reset flows if necessary
  987. */
  988. if ((now - _lastFlowExpirationCheck) > ZT_MULTIPATH_FLOW_CHECK_INTERVAL) {
  989. Mutex::Lock _l(_flows_m);
  990. forgetFlowsWhenNecessary(ZT_MULTIPATH_FLOW_EXPIRATION_INTERVAL,false,now);
  991. _lastFlowExpirationCheck = now;
  992. }
  993. if ((now - _lastFlowStatReset) > ZT_FLOW_STATS_RESET_INTERVAL) {
  994. Mutex::Lock _l(_flows_m);
  995. _lastFlowStatReset = now;
  996. std::map<int32_t,SharedPtr<Flow> >::iterator it = _flows.begin();
  997. while (it != _flows.end()) {
  998. it->second->resetByteCounts();
  999. ++it;
  1000. }
  1001. }
  1002. /**
  1003. * Re-allocate flows from dead paths
  1004. */
  1005. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  1006. Mutex::Lock _l(_flows_m);
  1007. for (int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
  1008. if (!_paths[i]) {
  1009. continue;
  1010. }
  1011. if (!_paths[i]->eligible(now,_ackSendInterval) && _paths[i]->_shouldReallocateFlows) {
  1012. _paths[i]->address().toString(curPathStr);
  1013. fprintf(stderr, "%d reallocating flows from dead path %s on %s\n", (RR->node->now() - RR->bc->getBondStartTime()), curPathStr, getSlave(_paths[i])->ifname().c_str());
  1014. std::map<int32_t,SharedPtr<Flow> >::iterator flow_it = _flows.begin();
  1015. while (flow_it != _flows.end()) {
  1016. if (flow_it->second->assignedPath() == _paths[i]) {
  1017. if(assignFlowToBondedPath(flow_it->second, now)) {
  1018. _paths[i]->_assignedFlowCount--;
  1019. }
  1020. }
  1021. ++flow_it;
  1022. }
  1023. _paths[i]->_shouldReallocateFlows = false;
  1024. }
  1025. }
  1026. }
  1027. }
  1028. /**
  1029. * Tasks specific to (Balance Round Robin)
  1030. */
  1031. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR) {
  1032. if (_allowFlowHashing) {
  1033. // TODO: Should ideally failover from (idx) to a random slave, this is so that (idx+1) isn't overloaded
  1034. }
  1035. else if (!_allowFlowHashing) {
  1036. // Nothing
  1037. }
  1038. }
  1039. /**
  1040. * Tasks specific to (Balance XOR)
  1041. */
  1042. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR) {
  1043. // Nothing specific for XOR
  1044. }
  1045. /**
  1046. * Tasks specific to (Balance Aware)
  1047. */
  1048. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  1049. if (_allowFlowHashing) {
  1050. Mutex::Lock _l(_flows_m);
  1051. /**
  1052. * Re-balance flows in proportion to slave capacity (or when eligibility changes)
  1053. */
  1054. if ((now - _lastFlowRebalance) > ZT_FLOW_REBALANCE_INTERVAL) {
  1055. /**
  1056. * Determine "load" for bonded paths
  1057. */
  1058. uint64_t totalBytes = 0;
  1059. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) { // first pass: compute absolute byte load and total
  1060. if (_paths[i] && _paths[i]->bonded()) {
  1061. _paths[i]->_byteLoad = 0;
  1062. std::map<int32_t,SharedPtr<Flow> >::iterator flow_it = _flows.begin();
  1063. while (flow_it != _flows.end()) {
  1064. if (flow_it->second->assignedPath() == _paths[i]) {
  1065. _paths[i]->_byteLoad += flow_it->second->totalBytes();
  1066. }
  1067. ++flow_it;
  1068. }
  1069. totalBytes += _paths[i]->_byteLoad;
  1070. }
  1071. }
  1072. /**
  1073. * Determine "affinity" for bonded path
  1074. */
  1075. //fprintf(stderr, "\n\n");
  1076. _totalBondUnderload = 0;
  1077. /*
  1078. for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) { // second pass: compute relative byte loads and total imbalance
  1079. if (_paths[i] && _paths[i]->bonded()) {
  1080. if (totalBytes) {
  1081. uint8_t relativeByteLoad = std::ceil(((float)_paths[i]->_byteLoad / (float)totalBytes) * (float)255);
  1082. //fprintf(stderr, "lastComputedAllocation = %d\n", _paths[i]->allocation);
  1083. //fprintf(stderr, " relativeByteLoad = %d\n", relativeByteLoad);
  1084. _paths[i]->_relativeByteLoad = relativeByteLoad;
  1085. uint8_t relativeUnderload = std::max(0, (int)_paths[i]->_allocation - (int)relativeByteLoad);
  1086. //fprintf(stderr, " relativeUnderload = %d\n", relativeUnderload);
  1087. _totalBondUnderload += relativeUnderload;
  1088. //fprintf(stderr, " _totalBondUnderload = %d\n\n", _totalBondUnderload);
  1089. //_paths[i]->affinity = (relativeUnderload > 0 ? relativeUnderload : _paths[i]->_allocation);
  1090. }
  1091. else { // set everything to base values
  1092. _totalBondUnderload = 0;
  1093. //_paths[i]->affinity = 0;
  1094. }
  1095. }
  1096. }
  1097. */
  1098. //fprintf(stderr, "_totalBondUnderload=%d (end)\n\n", _totalBondUnderload);
  1099. /**
  1100. *
  1101. */
  1102. //fprintf(stderr, "_lastFlowRebalance\n");
  1103. std::map<int32_t, SharedPtr<Flow> >::iterator it = _flows.begin();
  1104. while (it != _flows.end()) {
  1105. int32_t flowId = it->first;
  1106. SharedPtr<Flow> flow = it->second;
  1107. if ((now - flow->_lastPathReassignment) > ZT_FLOW_MIN_REBALANCE_INTERVAL) {
  1108. //fprintf(stdout, " could move : %x\n", flowId);
  1109. }
  1110. ++it;
  1111. }
  1112. _lastFlowRebalance = now;
  1113. }
  1114. }
  1115. else if (!_allowFlowHashing) {
  1116. // Nothing
  1117. }
  1118. }
  1119. }
  1120. void Bond::dequeueNextActiveBackupPath(const uint64_t now)
  1121. {
  1122. //fprintf(stderr, "dequeueNextActiveBackupPath\n");
  1123. if (_abFailoverQueue.empty()) {
  1124. return;
  1125. }
  1126. _abPath = _abFailoverQueue.front();
  1127. _abFailoverQueue.pop_front();
  1128. _lastActiveBackupPathChange = now;
  1129. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1130. if (_paths[i]) {
  1131. _paths[i]->resetPacketCounts();
  1132. }
  1133. }
  1134. }
  1135. void Bond::processActiveBackupTasks(const int64_t now)
  1136. {
  1137. //fprintf(stderr, "%llu processActiveBackupTasks\n", (now - RR->bc->getBondStartTime()));
  1138. char pathStr[128]; char prevPathStr[128]; char curPathStr[128];
  1139. SharedPtr<Path> prevActiveBackupPath = _abPath;
  1140. SharedPtr<Path> nonPreferredPath;
  1141. bool bFoundPrimarySlave = false;
  1142. /**
  1143. * Select initial "active" active-backup slave
  1144. */
  1145. if (!_abPath) {
  1146. fprintf(stderr, "%llu no active backup path yet...\n", ((now - RR->bc->getBondStartTime())));
  1147. /**
  1148. * [Automatic mode]
  1149. * The user has not explicitly specified slaves or their failover schedule,
  1150. * the bonding policy will now select the first eligible path and set it as
  1151. * its active backup path, if a substantially better path is detected the bonding
  1152. * policy will assign it as the new active backup path. If the path fails it will
  1153. * simply find the next eligible path.
  1154. */
  1155. if (!userHasSpecifiedSlaves()) {
  1156. fprintf(stderr, "%llu AB: (auto) user did not specify any slaves. waiting until we know more\n", ((now - RR->bc->getBondStartTime())));
  1157. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1158. if (_paths[i] && _paths[i]->eligible(now,_ackSendInterval)) {
  1159. _paths[i]->address().toString(curPathStr);
  1160. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  1161. if (slave) {
  1162. fprintf(stderr, "%llu AB: (initial) [%d] found eligible path %s on: %s\n", ((now - RR->bc->getBondStartTime())), i, curPathStr, slave->ifname().c_str());
  1163. }
  1164. _abPath = _paths[i];
  1165. break;
  1166. }
  1167. }
  1168. }
  1169. /**
  1170. * [Manual mode]
  1171. * The user has specified slaves or failover rules that the bonding policy should adhere to.
  1172. */
  1173. else if (userHasSpecifiedSlaves()) {
  1174. fprintf(stderr, "%llu AB: (manual) no active backup slave, checking local.conf\n", ((now - RR->bc->getBondStartTime())));
  1175. if (userHasSpecifiedPrimarySlave()) {
  1176. fprintf(stderr, "%llu AB: (manual) user has specified primary slave, looking for it.\n", ((now - RR->bc->getBondStartTime())));
  1177. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1178. if (!_paths[i]) {
  1179. continue;
  1180. }
  1181. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  1182. if (_paths[i]->eligible(now,_ackSendInterval) && slave->primary()) {
  1183. if (!_paths[i]->preferred()) {
  1184. _paths[i]->address().toString(curPathStr);
  1185. fprintf(stderr, "%llu AB: (initial) [%d] found path on primary slave, taking note in case we don't find a preferred path\n", ((now - RR->bc->getBondStartTime())), i);
  1186. nonPreferredPath = _paths[i];
  1187. bFoundPrimarySlave = true;
  1188. }
  1189. if (_paths[i]->preferred()) {
  1190. _abPath = _paths[i];
  1191. _abPath->address().toString(curPathStr);
  1192. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  1193. if (slave) {
  1194. fprintf(stderr, "%llu AB: (initial) [%d] found preferred path %s on primary slave: %s\n", ((now - RR->bc->getBondStartTime())), i, curPathStr, slave->ifname().c_str());
  1195. }
  1196. bFoundPrimarySlave = true;
  1197. break;
  1198. }
  1199. }
  1200. }
  1201. if (_abPath) {
  1202. _abPath->address().toString(curPathStr);
  1203. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _abPath->localSocket());
  1204. if (slave) {
  1205. fprintf(stderr, "%llu AB: (initial) found preferred primary path: %s on %s\n", ((now - RR->bc->getBondStartTime())), curPathStr, slave->ifname().c_str());
  1206. }
  1207. }
  1208. else {
  1209. if (bFoundPrimarySlave && nonPreferredPath) {
  1210. fprintf(stderr, "%llu AB: (initial) found a non-preferred primary path\n", ((now - RR->bc->getBondStartTime())));
  1211. _abPath = nonPreferredPath;
  1212. }
  1213. }
  1214. if (!_abPath) {
  1215. fprintf(stderr, "%llu AB: (initial) designated primary slave is not yet ready\n", ((now - RR->bc->getBondStartTime())));
  1216. // TODO: Should fail-over to specified backup or just wait?
  1217. }
  1218. }
  1219. else if (!userHasSpecifiedPrimarySlave()) {
  1220. int _abIdx = ZT_MAX_PEER_NETWORK_PATHS;
  1221. fprintf(stderr, "%llu AB: (initial) user did not specify primary slave, just picking something\n", ((now - RR->bc->getBondStartTime())));
  1222. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1223. if (_paths[i] && _paths[i]->eligible(now,_ackSendInterval)) {
  1224. _abIdx = i;
  1225. break;
  1226. }
  1227. }
  1228. if (_abIdx == ZT_MAX_PEER_NETWORK_PATHS) {
  1229. fprintf(stderr, "%llu AB: (initial) unable to find a candidate next-best, no change\n", ((now - RR->bc->getBondStartTime())));
  1230. }
  1231. else {
  1232. _abPath = _paths[_abIdx];
  1233. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _abPath->localSocket());
  1234. if (slave) {
  1235. fprintf(stderr, "%llu AB: (initial) selected non-primary slave idx=%d, %s on %s\n", ((now - RR->bc->getBondStartTime())), _abIdx, pathStr, slave->ifname().c_str());
  1236. }
  1237. }
  1238. }
  1239. }
  1240. }
  1241. /**
  1242. * Update and maintain the active-backup failover queue
  1243. */
  1244. if (_abPath) {
  1245. // Don't worry about the failover queue until we have an active slave
  1246. // Remove ineligible paths from the failover slave queue
  1247. for (std::list<SharedPtr<Path> >::iterator it(_abFailoverQueue.begin()); it!=_abFailoverQueue.end();) {
  1248. if ((*it) && !(*it)->eligible(now,_ackSendInterval)) {
  1249. (*it)->address().toString(curPathStr);
  1250. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, (*it)->localSocket());
  1251. if (slave) {
  1252. fprintf(stderr, "%llu AB: (fq) %s on %s is now ineligible, removing from failover queue\n", ((now - RR->bc->getBondStartTime())), curPathStr, slave->ifname().c_str());
  1253. }
  1254. it = _abFailoverQueue.erase(it);
  1255. } else {
  1256. ++it;
  1257. }
  1258. }
  1259. /**
  1260. * Failover instructions were provided by user, build queue according those as well as IPv
  1261. * preference, disregarding performance.
  1262. */
  1263. if (userHasSpecifiedFailoverInstructions()) {
  1264. /**
  1265. * Clear failover scores
  1266. */
  1267. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1268. if (_paths[i]) {
  1269. _paths[i]->_failoverScore = 0;
  1270. }
  1271. }
  1272. //fprintf(stderr, "AB: (fq) user has specified specific failover instructions, will follow them.\n");
  1273. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1274. if (!_paths[i] || !_paths[i]->allowed() || !_paths[i]->eligible(now,_ackSendInterval)) {
  1275. continue;
  1276. }
  1277. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  1278. _paths[i]->address().toString(pathStr);
  1279. int failoverScoreHandicap = _paths[i]->_failoverScore;
  1280. if (_paths[i]->preferred())
  1281. {
  1282. failoverScoreHandicap += ZT_MULTIPATH_FAILOVER_HANDICAP_PREFERRED;
  1283. //fprintf(stderr, "%s on %s ----> %d for preferred\n", pathStr, _paths[i]->ifname().c_str(), failoverScoreHandicap);
  1284. }
  1285. if (slave->primary()) {
  1286. // If using "optimize" primary reselect mode, ignore user slave designations
  1287. failoverScoreHandicap += ZT_MULTIPATH_FAILOVER_HANDICAP_PRIMARY;
  1288. //fprintf(stderr, "%s on %s ----> %d for primary\n", pathStr, _paths[i]->ifname().c_str(), failoverScoreHandicap);
  1289. }
  1290. if (!_paths[i]->_failoverScore) {
  1291. // If we didn't inherit a failover score from a "parent" that wants to use this path as a failover
  1292. int newHandicap = failoverScoreHandicap ? failoverScoreHandicap : _paths[i]->_allocation;
  1293. _paths[i]->_failoverScore = newHandicap;
  1294. //fprintf(stderr, "%s on %s ----> %d for allocation\n", pathStr, _paths[i]->ifname().c_str(), newHandicap);
  1295. }
  1296. SharedPtr<Slave> failoverSlave;
  1297. if (slave->failoverToSlave().length()) {
  1298. failoverSlave = RR->bc->getSlaveByName(_policyAlias, slave->failoverToSlave());
  1299. }
  1300. if (failoverSlave) {
  1301. for (int j=0; j<ZT_MAX_PEER_NETWORK_PATHS; j++) {
  1302. if (_paths[j] && getSlave(_paths[j]) == failoverSlave.ptr()) {
  1303. _paths[j]->address().toString(pathStr);
  1304. int inheritedHandicap = failoverScoreHandicap - 10;
  1305. int newHandicap = _paths[j]->_failoverScore > inheritedHandicap ? _paths[j]->_failoverScore : inheritedHandicap;
  1306. //fprintf(stderr, "\thanding down %s on %s ----> %d\n", pathStr, getSlave(_paths[j])->ifname().c_str(), newHandicap);
  1307. if (!_paths[j]->preferred()) {
  1308. newHandicap--;
  1309. }
  1310. _paths[j]->_failoverScore = newHandicap;
  1311. }
  1312. }
  1313. }
  1314. if (_paths[i].ptr() != _abPath.ptr()) {
  1315. bool bFoundPathInQueue = false;
  1316. for (std::list<SharedPtr<Path> >::iterator it(_abFailoverQueue.begin()); it!=_abFailoverQueue.end();++it) {
  1317. if (_paths[i].ptr() == (*it).ptr()) {
  1318. bFoundPathInQueue = true;
  1319. }
  1320. }
  1321. if (!bFoundPathInQueue) {
  1322. _paths[i]->address().toString(curPathStr);
  1323. fprintf(stderr, "%llu AB: (fq) [%d] added %s on %s to queue\n", ((now - RR->bc->getBondStartTime())), i, curPathStr, getSlave(_paths[i])->ifname().c_str());
  1324. _abFailoverQueue.push_front(_paths[i]);
  1325. }
  1326. }
  1327. }
  1328. }
  1329. /**
  1330. * No failover instructions provided by user, build queue according to performance
  1331. * and IPv preference.
  1332. */
  1333. else if (!userHasSpecifiedFailoverInstructions()) {
  1334. for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1335. if (!_paths[i]
  1336. || !_paths[i]->allowed()
  1337. || !_paths[i]->eligible(now,_ackSendInterval)) {
  1338. continue;
  1339. }
  1340. int failoverScoreHandicap = 0;
  1341. if (_paths[i]->preferred()) {
  1342. failoverScoreHandicap = ZT_MULTIPATH_FAILOVER_HANDICAP_PREFERRED;
  1343. }
  1344. bool includeRefractoryPeriod = true;
  1345. if (!_paths[i]->eligible(now,includeRefractoryPeriod)) {
  1346. failoverScoreHandicap = -10000;
  1347. }
  1348. if (getSlave(_paths[i])->primary() && _abSlaveSelectMethod != ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE) {
  1349. // If using "optimize" primary reselect mode, ignore user slave designations
  1350. failoverScoreHandicap = ZT_MULTIPATH_FAILOVER_HANDICAP_PRIMARY;
  1351. }
  1352. if (_paths[i].ptr() == negotiatedPath.ptr()) {
  1353. _paths[i]->_negotiated = true;
  1354. failoverScoreHandicap = ZT_MULTIPATH_FAILOVER_HANDICAP_NEGOTIATED;
  1355. } else {
  1356. _paths[i]->_negotiated = false;
  1357. }
  1358. _paths[i]->_failoverScore = _paths[i]->_allocation + failoverScoreHandicap;
  1359. if (_paths[i].ptr() != _abPath.ptr()) {
  1360. bool bFoundPathInQueue = false;
  1361. for (std::list<SharedPtr<Path> >::iterator it(_abFailoverQueue.begin()); it!=_abFailoverQueue.end();++it) {
  1362. if (_paths[i].ptr() == (*it).ptr()) {
  1363. bFoundPathInQueue = true;
  1364. }
  1365. }
  1366. if (!bFoundPathInQueue) {
  1367. _paths[i]->address().toString(curPathStr);
  1368. fprintf(stderr, "%llu AB: (fq) [%d] added %s on %s to queue\n", ((now - RR->bc->getBondStartTime())), i, curPathStr, getSlave(_paths[i])->ifname().c_str());
  1369. _abFailoverQueue.push_front(_paths[i]);
  1370. }
  1371. }
  1372. }
  1373. }
  1374. _abFailoverQueue.sort(PathQualityComparator());
  1375. if (_abFailoverQueue.empty()) {
  1376. fprintf(stderr, "%llu AB: (fq) the failover queue is empty, the active-backup bond is no longer fault-tolerant\n", ((now - RR->bc->getBondStartTime())));
  1377. }
  1378. }
  1379. /**
  1380. * Short-circuit if we have no queued paths
  1381. */
  1382. if (_abFailoverQueue.empty()) {
  1383. return;
  1384. }
  1385. /**
  1386. * Fulfill primary reselect obligations
  1387. */
  1388. if (_abPath && !_abPath->eligible(now,_ackSendInterval)) { // Implicit ZT_MULTIPATH_RESELECTION_POLICY_FAILURE
  1389. _abPath->address().toString(curPathStr); fprintf(stderr, "%llu AB: (failure) failover event!, active backup path (%s) is no-longer eligible\n", ((now - RR->bc->getBondStartTime())), curPathStr);
  1390. if (!_abFailoverQueue.empty()) {
  1391. fprintf(stderr, "%llu AB: (failure) there are (%lu) slaves in queue to choose from...\n", ((now - RR->bc->getBondStartTime())), _abFailoverQueue.size());
  1392. dequeueNextActiveBackupPath(now);
  1393. _abPath->address().toString(curPathStr); fprintf(stderr, "%llu sAB: (failure) switched to %s on %s\n", ((now - RR->bc->getBondStartTime())), curPathStr, getSlave(_abPath)->ifname().c_str());
  1394. } else {
  1395. fprintf(stderr, "%llu AB: (failure) nothing available in the slave queue, doing nothing.\n", ((now - RR->bc->getBondStartTime())));
  1396. }
  1397. }
  1398. /**
  1399. * Detect change to prevent flopping during later optimization step.
  1400. */
  1401. if (prevActiveBackupPath != _abPath) {
  1402. _lastActiveBackupPathChange = now;
  1403. }
  1404. if (_abSlaveSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_ALWAYS) {
  1405. if (_abPath && !getSlave(_abPath)->primary()
  1406. && getSlave(_abFailoverQueue.front())->primary()) {
  1407. fprintf(stderr, "%llu AB: (always) switching to available primary\n", ((now - RR->bc->getBondStartTime())));
  1408. dequeueNextActiveBackupPath(now);
  1409. }
  1410. }
  1411. if (_abSlaveSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_BETTER) {
  1412. if (_abPath && !getSlave(_abPath)->primary()) {
  1413. fprintf(stderr, "%llu AB: (better) active backup has switched to \"better\" primary slave according to re-select policy.\n", ((now - RR->bc->getBondStartTime())));
  1414. if (getSlave(_abFailoverQueue.front())->primary()
  1415. && (_abFailoverQueue.front()->_failoverScore > _abPath->_failoverScore)) {
  1416. dequeueNextActiveBackupPath(now);
  1417. fprintf(stderr, "%llu AB: (better) switched back to user-defined primary\n", ((now - RR->bc->getBondStartTime())));
  1418. }
  1419. }
  1420. }
  1421. if (_abSlaveSelectMethod == ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE && !_abFailoverQueue.empty()) {
  1422. /**
  1423. * Implement link negotiation that was previously-decided
  1424. */
  1425. if (_abFailoverQueue.front()->_negotiated) {
  1426. dequeueNextActiveBackupPath(now);
  1427. _abPath->address().toString(prevPathStr);
  1428. fprintf(stderr, "%llu AB: (optimize) switched to negotiated path %s on %s\n", ((now - RR->bc->getBondStartTime())), prevPathStr, getSlave(_abPath)->ifname().c_str());
  1429. _lastPathNegotiationCheck = now;
  1430. }
  1431. else {
  1432. // Try to find a better path and automatically switch to it -- not too often, though.
  1433. if ((now - _lastActiveBackupPathChange) > ZT_MULTIPATH_MIN_ACTIVE_BACKUP_AUTOFLOP_INTERVAL) {
  1434. if (!_abFailoverQueue.empty()) {
  1435. //fprintf(stderr, "AB: (optimize) there are (%d) slaves in queue to choose from...\n", _abFailoverQueue.size());
  1436. int newFScore = _abFailoverQueue.front()->_failoverScore;
  1437. int prevFScore = _abPath->_failoverScore;
  1438. // Establish a minimum switch threshold to prevent flapping
  1439. int failoverScoreDifference = _abFailoverQueue.front()->_failoverScore - _abPath->_failoverScore;
  1440. int thresholdQuantity = (ZT_MULTIPATH_ACTIVE_BACKUP_OPTIMIZE_MIN_THRESHOLD * (float)_abPath->_allocation);
  1441. if ((failoverScoreDifference > 0) && (failoverScoreDifference > thresholdQuantity)) {
  1442. SharedPtr<Path> oldPath = _abPath;
  1443. _abPath->address().toString(prevPathStr);
  1444. dequeueNextActiveBackupPath(now);
  1445. _abPath->address().toString(curPathStr);
  1446. fprintf(stderr, "%llu AB: (optimize) switched from %s on %s (fs=%d) to %s on %s (fs=%d)\n", ((now - RR->bc->getBondStartTime())), prevPathStr, getSlave(oldPath)->ifname().c_str(), prevFScore, curPathStr, getSlave(_abPath)->ifname().c_str(), newFScore);
  1447. }
  1448. }
  1449. }
  1450. }
  1451. }
  1452. }
  1453. void Bond::setReasonableDefaults(int policy, SharedPtr<Bond> templateBond, bool useTemplate)
  1454. {
  1455. // If invalid bonding policy, try default
  1456. int _defaultBondingPolicy = BondController::defaultBondingPolicy();
  1457. if (policy <= ZT_BONDING_POLICY_NONE || policy > ZT_BONDING_POLICY_BALANCE_AWARE) {
  1458. // If no default set, use NONE (effectively disabling this bond)
  1459. if (_defaultBondingPolicy < ZT_BONDING_POLICY_NONE || _defaultBondingPolicy > ZT_BONDING_POLICY_BALANCE_AWARE) {
  1460. _bondingPolicy= ZT_BONDING_POLICY_NONE;
  1461. }
  1462. _bondingPolicy= _defaultBondingPolicy;
  1463. } else {
  1464. _bondingPolicy= policy;
  1465. }
  1466. _downDelay = 0;
  1467. _upDelay = 0;
  1468. _allowFlowHashing=false;
  1469. _bondMonitorInterval=0;
  1470. _shouldCollectPathStatistics=false;
  1471. _lastBackgroundTaskCheck=0;
  1472. // Path negotiation
  1473. _allowPathNegotiation=false;
  1474. _lastPathNegotiationReceived=0;
  1475. _lastPathNegotiationCheck=0;
  1476. _pathNegotiationCutoffCount=0;
  1477. _localUtility=0;
  1478. _lastFlowStatReset=0;
  1479. _lastFlowExpirationCheck=0;
  1480. _numBondedPaths=0;
  1481. _rrPacketsSentOnCurrSlave=0;
  1482. _rrIdx=0;
  1483. _lastFlowRebalance=0;
  1484. _totalBondUnderload = 0;
  1485. //_maxAcceptableLatency
  1486. _maxAcceptablePacketDelayVariance = 50;
  1487. _maxAcceptablePacketLossRatio = 0.10;
  1488. _maxAcceptablePacketErrorRatio = 0.10;
  1489. _userHasSpecifiedSlaveSpeeds=0;
  1490. _lastFrame=0;
  1491. // TODO: Remove
  1492. _header=false;
  1493. _lastLogTS = RR->node->now();
  1494. _lastPrintTS = RR->node->now();
  1495. /**
  1496. * Paths are actively monitored to provide a real-time quality/preference-ordered rapid failover queue.
  1497. */
  1498. switch (policy) {
  1499. case ZT_BONDING_POLICY_ACTIVE_BACKUP:
  1500. _failoverInterval = 5000;
  1501. _abSlaveSelectMethod = ZT_MULTIPATH_RESELECTION_POLICY_OPTIMIZE;
  1502. _slaveMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1503. _qualityWeights[ZT_QOS_LAT_IDX] = 0.2f;
  1504. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1505. _qualityWeights[ZT_QOS_PDV_IDX] = 0.2f;
  1506. _qualityWeights[ZT_QOS_PLR_IDX] = 0.2f;
  1507. _qualityWeights[ZT_QOS_PER_IDX] = 0.2f;
  1508. _qualityWeights[ZT_QOS_THR_IDX] = 0.2f;
  1509. _qualityWeights[ZT_QOS_THM_IDX] = 0.0f;
  1510. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1511. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1512. break;
  1513. /**
  1514. * All seemingly-alive paths are used. Paths are not actively monitored.
  1515. */
  1516. case ZT_BONDING_POLICY_BROADCAST:
  1517. _downDelay = 30000;
  1518. _upDelay = 0;
  1519. break;
  1520. /**
  1521. * Paths are monitored to determine when/if one needs to be added or removed from the rotation
  1522. */
  1523. case ZT_BONDING_POLICY_BALANCE_RR:
  1524. _failoverInterval = 5000;
  1525. _allowFlowHashing = false;
  1526. _packetsPerSlave = 1024;
  1527. _slaveMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1528. _qualityWeights[ZT_QOS_LAT_IDX] = 0.4f;
  1529. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1530. _qualityWeights[ZT_QOS_PDV_IDX] = 0.2f;
  1531. _qualityWeights[ZT_QOS_PLR_IDX] = 0.1f;
  1532. _qualityWeights[ZT_QOS_PER_IDX] = 0.1f;
  1533. _qualityWeights[ZT_QOS_THR_IDX] = 0.1f;
  1534. _qualityWeights[ZT_QOS_THM_IDX] = 0.0f;
  1535. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1536. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1537. break;
  1538. /**
  1539. * Path monitoring is used to determine the capacity of each
  1540. * path and where to place the next flow.
  1541. */
  1542. case ZT_BONDING_POLICY_BALANCE_XOR:
  1543. _failoverInterval = 5000;;
  1544. _upDelay=_bondMonitorInterval*2;
  1545. _allowFlowHashing = true;
  1546. _slaveMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1547. _qualityWeights[ZT_QOS_LAT_IDX] = 0.4f;
  1548. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1549. _qualityWeights[ZT_QOS_PDV_IDX] = 0.2f;
  1550. _qualityWeights[ZT_QOS_PLR_IDX] = 0.1f;
  1551. _qualityWeights[ZT_QOS_PER_IDX] = 0.1f;
  1552. _qualityWeights[ZT_QOS_THR_IDX] = 0.1f;
  1553. _qualityWeights[ZT_QOS_THM_IDX] = 0.0f;
  1554. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1555. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1556. break;
  1557. /**
  1558. * Path monitoring is used to determine the capacity of each
  1559. * path and where to place the next flow. Additionally, re-shuffling
  1560. * of flows may take place.
  1561. */
  1562. case ZT_BONDING_POLICY_BALANCE_AWARE:
  1563. _failoverInterval = 3000;
  1564. _allowFlowHashing = true;
  1565. _slaveMonitorStrategy = ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_DYNAMIC;
  1566. _qualityWeights[ZT_QOS_LAT_IDX] = 0.3f;
  1567. _qualityWeights[ZT_QOS_LTM_IDX] = 0.0f;
  1568. _qualityWeights[ZT_QOS_PDV_IDX] = 0.1f;
  1569. _qualityWeights[ZT_QOS_PLR_IDX] = 0.1f;
  1570. _qualityWeights[ZT_QOS_PER_IDX] = 0.1f;
  1571. _qualityWeights[ZT_QOS_THR_IDX] = 0.0f;
  1572. _qualityWeights[ZT_QOS_THM_IDX] = 0.4f;
  1573. _qualityWeights[ZT_QOS_THV_IDX] = 0.0f;
  1574. _qualityWeights[ZT_QOS_SCP_IDX] = 0.0f;
  1575. break;
  1576. default:
  1577. break;
  1578. }
  1579. if (useTemplate) {
  1580. _policyAlias = templateBond->_policyAlias;
  1581. _failoverInterval = templateBond->_failoverInterval;
  1582. _downDelay = templateBond->_downDelay;
  1583. _upDelay = templateBond->_upDelay;
  1584. fprintf(stderr, "TIMERS: strat=%d, fi= %d, bmi= %d, qos= %d, ack= %d, estimateInt= %d, refractory= %d, ud= %d, dd= %d\n",
  1585. _slaveMonitorStrategy,
  1586. _failoverInterval,
  1587. _bondMonitorInterval,
  1588. _qosSendInterval,
  1589. _ackSendInterval,
  1590. _qualityEstimationInterval,
  1591. _defaultPathRefractoryPeriod,
  1592. _upDelay,
  1593. _downDelay);
  1594. if (templateBond->_slaveMonitorStrategy == ZT_MULTIPATH_SLAVE_MONITOR_STRATEGY_PASSIVE
  1595. && templateBond->_failoverInterval != 0) {
  1596. fprintf(stderr, "warning: passive path monitoring was specified, this will prevent failovers from happening in a timely manner.\n");
  1597. }
  1598. _abSlaveSelectMethod = templateBond->_abSlaveSelectMethod;
  1599. memcpy(_qualityWeights, templateBond->_qualityWeights, ZT_QOS_WEIGHT_SIZE * sizeof(float));
  1600. }
  1601. //
  1602. // Second, apply user specified values (only if they make sense)
  1603. /**
  1604. * Timer geometries and counters
  1605. */
  1606. // TODO: Think more about the maximum
  1607. /*
  1608. if (originalBond._failoverInterval > 250 && originalBond._failoverInterval < 65535) {
  1609. _failoverInterval = originalBond._failoverInterval;
  1610. }
  1611. else {
  1612. fprintf(stderr, "warning: _failoverInterval (%d) is out of range, using default (%d)\n", originalBond._failoverInterval, _failoverInterval);
  1613. }
  1614. */
  1615. _bondMonitorInterval = _failoverInterval / 3;
  1616. BondController::setMinReqPathMonitorInterval(_bondMonitorInterval);
  1617. _ackSendInterval = _failoverInterval;
  1618. _qualityEstimationInterval = _failoverInterval * 2;
  1619. _dynamicPathMonitorInterval = 0;
  1620. _ackCutoffCount = 0;
  1621. _lastAckRateCheck = 0;
  1622. _qosSendInterval = _bondMonitorInterval * 4;
  1623. _qosCutoffCount = 0;
  1624. _lastQoSRateCheck = 0;
  1625. _lastQualityEstimation=0;
  1626. throughputMeasurementInterval = _ackSendInterval * 2;
  1627. _defaultPathRefractoryPeriod = 8000;
  1628. }
  1629. void Bond::setUserQualityWeights(float weights[], int len)
  1630. {
  1631. if (len == ZT_QOS_WEIGHT_SIZE) {
  1632. float weightTotal = 0.0;
  1633. for (unsigned int i=0; i<ZT_QOS_WEIGHT_SIZE; ++i) {
  1634. weightTotal += weights[i];
  1635. }
  1636. if (weightTotal > 0.99 && weightTotal < 1.01) {
  1637. memcpy(_qualityWeights, weights, len * sizeof(float));
  1638. }
  1639. }
  1640. }
  1641. bool Bond::relevant() {
  1642. return _peer->identity().address().toInt() == 0x16a03a3d03
  1643. || _peer->identity().address().toInt() == 0x4410300d03
  1644. || _peer->identity().address().toInt() == 0x795cbf86fa;
  1645. }
  1646. SharedPtr<Slave> Bond::getSlave(const SharedPtr<Path>& path)
  1647. {
  1648. return RR->bc->getSlaveBySocket(_policyAlias, path->localSocket());
  1649. }
  1650. void Bond::dumpInfo(const int64_t now)
  1651. {
  1652. char pathStr[128];
  1653. //char oldPathStr[128];
  1654. char currPathStr[128];
  1655. if (!relevant()) {
  1656. return;
  1657. }
  1658. /*
  1659. fprintf(stderr, "---[ bp=%d, id=%llx, dd=%d, up=%d, pmi=%d, specifiedSlaves=%d, _specifiedPrimarySlave=%d, _specifiedFailInst=%d ]\n",
  1660. _policy, _peer->identity().address().toInt(), _downDelay, _upDelay, _monitorInterval, _userHasSpecifiedSlaves, _userHasSpecifiedPrimarySlave, _userHasSpecifiedFailoverInstructions);
  1661. if (_bondingPolicy == ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  1662. fprintf(stderr, "Paths (bp=%d, stats=%d, primaryReselect=%d) :\n",
  1663. _policy, _shouldCollectPathStatistics, _abSlaveSelectMethod);
  1664. }
  1665. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR
  1666. || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR
  1667. || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  1668. fprintf(stderr, "Paths (bp=%d, stats=%d, fh=%d) :\n",
  1669. _policy, _shouldCollectPathStatistics, _allowFlowHashing);
  1670. }*/
  1671. if ((now - _lastPrintTS) < 1000) {
  1672. return;
  1673. }
  1674. _lastPrintTS = now;
  1675. fprintf(stderr, "\n\n");
  1676. for(int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; ++i) {
  1677. if (_paths[i]) {
  1678. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[i]->localSocket());
  1679. _paths[i]->address().toString(pathStr);
  1680. fprintf(stderr, " %2d: lat=%8.3f, ac=%3d, fail%5s, fscore=%6d, in=%7d, out=%7d, age=%7ld, ack=%7ld, ref=%6d, ls=%llx",
  1681. i,
  1682. _paths[i]->_latencyMean,
  1683. _paths[i]->_allocation,
  1684. slave->failoverToSlave().c_str(),
  1685. _paths[i]->_failoverScore,
  1686. _paths[i]->_packetsIn,
  1687. _paths[i]->_packetsOut,
  1688. (long)_paths[i]->age(now),
  1689. (long)_paths[i]->ackAge(now),
  1690. _paths[i]->_refractoryPeriod,
  1691. _paths[i]->localSocket()
  1692. );
  1693. if (slave->spare()) {
  1694. fprintf(stderr, " SPR.");
  1695. } else {
  1696. fprintf(stderr, " ");
  1697. }
  1698. if (slave->primary()) {
  1699. fprintf(stderr, " PRIM.");
  1700. } else {
  1701. fprintf(stderr, " ");
  1702. }
  1703. if (_paths[i]->allowed()) {
  1704. fprintf(stderr, " ALL.");
  1705. } else {
  1706. fprintf(stderr, " ");
  1707. }
  1708. if (_paths[i]->eligible(now,_ackSendInterval)) {
  1709. fprintf(stderr, " ELI.");
  1710. } else {
  1711. fprintf(stderr, " ");
  1712. }
  1713. if (_paths[i]->preferred()) {
  1714. fprintf(stderr, " PREF.");
  1715. } else {
  1716. fprintf(stderr, " ");
  1717. }
  1718. if (_paths[i]->_negotiated) {
  1719. fprintf(stderr, " NEG.");
  1720. } else {
  1721. fprintf(stderr, " ");
  1722. }
  1723. if (_paths[i]->bonded()) {
  1724. fprintf(stderr, " BOND ");
  1725. } else {
  1726. fprintf(stderr, " ");
  1727. }
  1728. if (_bondingPolicy == ZT_BONDING_POLICY_ACTIVE_BACKUP && _abPath && (_abPath == _paths[i].ptr())) {
  1729. fprintf(stderr, " ACTIVE ");
  1730. } else if (_bondingPolicy == ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  1731. fprintf(stderr, " ");
  1732. }
  1733. if (_bondingPolicy == ZT_BONDING_POLICY_ACTIVE_BACKUP && _abFailoverQueue.size() && (_abFailoverQueue.front().ptr() == _paths[i].ptr())) {
  1734. fprintf(stderr, " NEXT ");
  1735. } else if (_bondingPolicy == ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  1736. fprintf(stderr, " ");
  1737. }
  1738. fprintf(stderr, "%5s %s\n", slave->ifname().c_str(), pathStr);
  1739. }
  1740. }
  1741. if (_bondingPolicy == ZT_BONDING_POLICY_ACTIVE_BACKUP) {
  1742. if (!_abFailoverQueue.empty()) {
  1743. fprintf(stderr, "\nFailover Queue:\n");
  1744. for (std::list<SharedPtr<Path> >::iterator it(_abFailoverQueue.begin()); it!=_abFailoverQueue.end();++it) {
  1745. (*it)->address().toString(currPathStr);
  1746. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, (*it)->localSocket());
  1747. fprintf(stderr, "\t%8s\tspeed=%7d\trelSpeed=%3d\tipvPref=%3d\tfscore=%9d\t\t%s\n",
  1748. slave->ifname().c_str(),
  1749. slave->speed(),
  1750. slave->relativeSpeed(),
  1751. slave->ipvPref(),
  1752. (*it)->_failoverScore,
  1753. currPathStr);
  1754. }
  1755. }
  1756. else
  1757. {
  1758. fprintf(stderr, "\nFailover Queue size = %lu\n", _abFailoverQueue.size());
  1759. }
  1760. }
  1761. if (_bondingPolicy == ZT_BONDING_POLICY_BALANCE_RR
  1762. || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_XOR
  1763. || _bondingPolicy == ZT_BONDING_POLICY_BALANCE_AWARE) {
  1764. if (_numBondedPaths) {
  1765. fprintf(stderr, "\nBonded Paths:\n");
  1766. for (int i=0; i<_numBondedPaths; ++i) {
  1767. _paths[_bondedIdx[i]]->address().toString(currPathStr);
  1768. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, _paths[_bondedIdx[i]]->localSocket());
  1769. fprintf(stderr, " [%d]\t%8s\tflows=%3d\tspeed=%7d\trelSpeed=%3d\tipvPref=%3d\tfscore=%9d\t\t%s\n", i,
  1770. //fprintf(stderr, " [%d]\t%8s\tspeed=%7d\trelSpeed=%3d\tflowCount=%2d\tipvPref=%3d\tfscore=%9d\t\t%s\n", i,
  1771. slave->ifname().c_str(),
  1772. _paths[_bondedIdx[i]]->_assignedFlowCount,
  1773. slave->speed(),
  1774. slave->relativeSpeed(),
  1775. //_paths[_bondedIdx[i]].p->assignedFlows.size(),
  1776. slave->ipvPref(),
  1777. _paths[_bondedIdx[i]]->_failoverScore,
  1778. currPathStr);
  1779. }
  1780. }
  1781. /*
  1782. if (_allowFlowHashing) {
  1783. //Mutex::Lock _l(_flows_m);
  1784. if (_flows.size()) {
  1785. fprintf(stderr, "\nFlows:\n");
  1786. std::map<int32_t,SharedPtr<Flow> >::iterator it = _flows.begin();
  1787. while (it != _flows.end()) {
  1788. it->second->assignedPath()->address().toString(currPathStr);
  1789. SharedPtr<Slave> slave =RR->bc->getSlaveBySocket(_policyAlias, it->second->assignedPath()->localSocket());
  1790. fprintf(stderr, " [%4x] in=%16llu, out=%16llu, bytes=%16llu, last=%16llu, if=%8s\t\t%s\n",
  1791. it->second->id(),
  1792. it->second->bytesInPerUnitTime(),
  1793. it->second->bytesOutPerUnitTime(),
  1794. it->second->totalBytes(),
  1795. it->second->age(now),
  1796. slave->ifname().c_str(),
  1797. currPathStr);
  1798. ++it;
  1799. }
  1800. }
  1801. }
  1802. */
  1803. }
  1804. //fprintf(stderr, "\n\n\n\n\n");
  1805. }
  1806. } // namespace ZeroTier