1
0

Switch.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2015 ZeroTier, Inc.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <algorithm>
  30. #include <utility>
  31. #include <stdexcept>
  32. #include "../version.h"
  33. #include "../include/ZeroTierOne.h"
  34. #include "Constants.hpp"
  35. #include "RuntimeEnvironment.hpp"
  36. #include "Switch.hpp"
  37. #include "Node.hpp"
  38. #include "InetAddress.hpp"
  39. #include "Topology.hpp"
  40. #include "Peer.hpp"
  41. #include "CMWC4096.hpp"
  42. #include "AntiRecursion.hpp"
  43. #include "Packet.hpp"
  44. namespace ZeroTier {
  45. Switch::Switch(const RuntimeEnvironment *renv) :
  46. RR(renv),
  47. _lastBeacon(0)
  48. {
  49. }
  50. Switch::~Switch()
  51. {
  52. }
  53. void Switch::onRemotePacket(const InetAddress &fromAddr,int linkDesperation,const void *data,unsigned int len)
  54. {
  55. try {
  56. if (len == ZT_PROTO_BEACON_LENGTH) {
  57. _handleBeacon(fromAddr,linkDesperation,Buffer<ZT_PROTO_BEACON_LENGTH>(data,len));
  58. } else if (len > ZT_PROTO_MIN_FRAGMENT_LENGTH) {
  59. if (((const unsigned char *)data)[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR) {
  60. _handleRemotePacketFragment(fromAddr,linkDesperation,data,len);
  61. } else if (len >= ZT_PROTO_MIN_PACKET_LENGTH) {
  62. _handleRemotePacketHead(fromAddr,linkDesperation,data,len);
  63. }
  64. }
  65. } catch (std::exception &ex) {
  66. TRACE("dropped packet from %s: unexpected exception: %s",fromAddr.toString().c_str(),ex.what());
  67. } catch ( ... ) {
  68. TRACE("dropped packet from %s: unexpected exception: (unknown)",fromAddr.toString().c_str());
  69. }
  70. }
  71. void Switch::onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
  72. {
  73. SharedPtr<NetworkConfig> nconf(network->config2());
  74. if (!nconf)
  75. return;
  76. // Sanity check -- bridge loop? OS problem?
  77. if (to == network->mac())
  78. return;
  79. /* Check anti-recursion module to ensure that this is not ZeroTier talking over its own links.
  80. * Note: even when we introduce a more purposeful binding of the main UDP port, this can
  81. * still happen because Windows likes to send broadcasts over interfaces that have little
  82. * to do with their intended target audience. :P */
  83. if (!RR->antiRec->checkEthernetFrame(data,len)) {
  84. TRACE("%.16llx: rejected recursively addressed ZeroTier packet by tail match (type %s, length: %u)",network->id(),etherTypeName(etherType),len);
  85. return;
  86. }
  87. // Check to make sure this protocol is allowed on this network
  88. if (!nconf->permitsEtherType(etherType)) {
  89. TRACE("%.16llx: ignored tap: %s -> %s: ethertype %s not allowed on network %.16llx",network->id(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType),(unsigned long long)network->id());
  90. return;
  91. }
  92. // Check if this packet is from someone other than the tap -- i.e. bridged in
  93. bool fromBridged = false;
  94. if (from != network->mac()) {
  95. if (!network->permitsBridging(RR->identity.address())) {
  96. TRACE("%.16llx: %s -> %s %s not forwarded, bridging disabled or this peer not a bridge",network->id(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType));
  97. return;
  98. }
  99. fromBridged = true;
  100. }
  101. if (to.isMulticast()) {
  102. // Destination is a multicast address (including broadcast)
  103. const uint64_t now = RR->node->now();
  104. MulticastGroup mg(to,0);
  105. if (to.isBroadcast()) {
  106. if (
  107. (etherType == ZT_ETHERTYPE_ARP)&&
  108. (len >= 28)&&
  109. (
  110. (((const unsigned char *)data)[2] == 0x08)&&
  111. (((const unsigned char *)data)[3] == 0x00)&&
  112. (((const unsigned char *)data)[4] == 6)&&
  113. (((const unsigned char *)data)[5] == 4)&&
  114. (((const unsigned char *)data)[7] == 0x01)
  115. )
  116. ) {
  117. // Cram IPv4 IP into ADI field to make IPv4 ARP broadcast channel specific and scalable
  118. // Also: enableBroadcast() does not apply to ARP since it's required for IPv4
  119. mg = MulticastGroup::deriveMulticastGroupForAddressResolution(InetAddress(((const unsigned char *)data) + 24,4,0));
  120. } else if (!nconf->enableBroadcast()) {
  121. // Don't transmit broadcasts if this network doesn't want them
  122. TRACE("%.16llx: dropped broadcast since ff:ff:ff:ff:ff:ff is not enabled",network->id());
  123. return;
  124. }
  125. }
  126. /* Learn multicast groups for bridged-in hosts.
  127. * Note that some OSes, most notably Linux, do this for you by learning
  128. * multicast addresses on bridge interfaces and subscribing each slave.
  129. * But in that case this does no harm, as the sets are just merged. */
  130. if (fromBridged)
  131. network->learnBridgedMulticastGroup(mg,now);
  132. // Check multicast/broadcast bandwidth quotas and reject if quota exceeded
  133. if (!network->updateAndCheckMulticastBalance(mg,len)) {
  134. TRACE("%.16llx: didn't multicast %u bytes, quota exceeded for multicast group %s",network->id(),len,mg.toString().c_str());
  135. return;
  136. }
  137. //TRACE("%.16llx: MULTICAST %s -> %s %s %u",network->id(),from.toString().c_str(),mg.toString().c_str(),etherTypeName(etherType),len);
  138. RR->mc->send(
  139. ((!nconf->isPublic())&&(nconf->com())) ? &(nconf->com()) : (const CertificateOfMembership *)0,
  140. nconf->multicastLimit(),
  141. now,
  142. network->id(),
  143. nconf->activeBridges(),
  144. mg,
  145. (fromBridged) ? from : MAC(),
  146. etherType,
  147. data,
  148. len);
  149. return;
  150. }
  151. if (to[0] == MAC::firstOctetForNetwork(network->id())) {
  152. // Destination is another ZeroTier peer
  153. Address toZT(to.toAddress(network->id()));
  154. if (network->isAllowed(toZT)) {
  155. if (network->peerNeedsOurMembershipCertificate(toZT,RR->node->now())) {
  156. // TODO: once there are no more <1.0.0 nodes around, we can
  157. // bundle this with EXT_FRAME instead of sending two packets.
  158. Packet outp(toZT,RR->identity.address(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE);
  159. nconf->com().serialize(outp);
  160. send(outp,true);
  161. }
  162. if (fromBridged) {
  163. // EXT_FRAME is used for bridging or if we want to include a COM
  164. Packet outp(toZT,RR->identity.address(),Packet::VERB_EXT_FRAME);
  165. outp.append(network->id());
  166. outp.append((unsigned char)0);
  167. to.appendTo(outp);
  168. from.appendTo(outp);
  169. outp.append((uint16_t)etherType);
  170. outp.append(data,len);
  171. outp.compress();
  172. send(outp,true);
  173. } else {
  174. // FRAME is a shorter version that can be used when there's no bridging and no COM
  175. Packet outp(toZT,RR->identity.address(),Packet::VERB_FRAME);
  176. outp.append(network->id());
  177. outp.append((uint16_t)etherType);
  178. outp.append(data,len);
  179. outp.compress();
  180. send(outp,true);
  181. }
  182. } else {
  183. TRACE("%.16llx: UNICAST: %s -> %s %s dropped, destination not a member of private network",network->id(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType));
  184. }
  185. return;
  186. }
  187. {
  188. // Destination is bridged behind a remote peer
  189. Address bridges[ZT_MAX_BRIDGE_SPAM];
  190. unsigned int numBridges = 0;
  191. bridges[0] = network->findBridgeTo(to);
  192. if ((bridges[0])&&(bridges[0] != RR->identity.address())&&(network->isAllowed(bridges[0]))&&(network->permitsBridging(bridges[0]))) {
  193. // We have a known bridge route for this MAC.
  194. ++numBridges;
  195. } else if (!nconf->activeBridges().empty()) {
  196. /* If there is no known route, spam to up to ZT_MAX_BRIDGE_SPAM active
  197. * bridges. This is similar to what many switches do -- if they do not
  198. * know which port corresponds to a MAC, they send it to all ports. If
  199. * there aren't any active bridges, numBridges will stay 0 and packet
  200. * is dropped. */
  201. std::vector<Address>::const_iterator ab(nconf->activeBridges().begin());
  202. if (nconf->activeBridges().size() <= ZT_MAX_BRIDGE_SPAM) {
  203. // If there are <= ZT_MAX_BRIDGE_SPAM active bridges, spam them all
  204. while (ab != nconf->activeBridges().end()) {
  205. if (network->isAllowed(*ab)) // config sanity check
  206. bridges[numBridges++] = *ab;
  207. ++ab;
  208. }
  209. } else {
  210. // Otherwise pick a random set of them
  211. while (numBridges < ZT_MAX_BRIDGE_SPAM) {
  212. if (ab == nconf->activeBridges().end())
  213. ab = nconf->activeBridges().begin();
  214. if (((unsigned long)RR->prng->next32() % (unsigned long)nconf->activeBridges().size()) == 0) {
  215. if (network->isAllowed(*ab)) // config sanity check
  216. bridges[numBridges++] = *ab;
  217. ++ab;
  218. } else ++ab;
  219. }
  220. }
  221. }
  222. for(unsigned int b=0;b<numBridges;++b) {
  223. Packet outp(bridges[b],RR->identity.address(),Packet::VERB_EXT_FRAME);
  224. outp.append(network->id());
  225. outp.append((unsigned char)0);
  226. to.appendTo(outp);
  227. from.appendTo(outp);
  228. outp.append((uint16_t)etherType);
  229. outp.append(data,len);
  230. outp.compress();
  231. send(outp,true);
  232. }
  233. }
  234. }
  235. void Switch::send(const Packet &packet,bool encrypt)
  236. {
  237. if (packet.destination() == RR->identity.address()) {
  238. TRACE("BUG: caught attempt to send() to self, ignored");
  239. return;
  240. }
  241. if (!_trySend(packet,encrypt)) {
  242. Mutex::Lock _l(_txQueue_m);
  243. _txQueue.insert(std::pair< Address,TXQueueEntry >(packet.destination(),TXQueueEntry(RR->node->now(),packet,encrypt)));
  244. }
  245. }
  246. bool Switch::unite(const Address &p1,const Address &p2,bool force)
  247. {
  248. if ((p1 == RR->identity.address())||(p2 == RR->identity.address()))
  249. return false;
  250. SharedPtr<Peer> p1p = RR->topology->getPeer(p1);
  251. if (!p1p)
  252. return false;
  253. SharedPtr<Peer> p2p = RR->topology->getPeer(p2);
  254. if (!p2p)
  255. return false;
  256. const uint64_t now = RR->node->now();
  257. // Right now we only unite desperation == 0 links, which will be direct
  258. std::pair<InetAddress,InetAddress> cg(Peer::findCommonGround(*p1p,*p2p,now,0));
  259. if (!(cg.first))
  260. return false;
  261. // Addresses are sorted in key for last unite attempt map for order
  262. // invariant lookup: (p1,p2) == (p2,p1)
  263. Array<Address,2> uniteKey;
  264. if (p1 >= p2) {
  265. uniteKey[0] = p2;
  266. uniteKey[1] = p1;
  267. } else {
  268. uniteKey[0] = p1;
  269. uniteKey[1] = p2;
  270. }
  271. {
  272. Mutex::Lock _l(_lastUniteAttempt_m);
  273. std::map< Array< Address,2 >,uint64_t >::const_iterator e(_lastUniteAttempt.find(uniteKey));
  274. if ((!force)&&(e != _lastUniteAttempt.end())&&((now - e->second) < ZT_MIN_UNITE_INTERVAL))
  275. return false;
  276. else _lastUniteAttempt[uniteKey] = now;
  277. }
  278. TRACE("unite: %s(%s) <> %s(%s)",p1.toString().c_str(),cg.second.toString().c_str(),p2.toString().c_str(),cg.first.toString().c_str());
  279. /* Tell P1 where to find P2 and vice versa, sending the packets to P1 and
  280. * P2 in randomized order in terms of which gets sent first. This is done
  281. * since in a few cases NAT-t can be sensitive to slight timing differences
  282. * in terms of when the two peers initiate. Normally this is accounted for
  283. * by the nearly-simultaneous RENDEZVOUS kickoff from the supernode, but
  284. * given that supernodes are hosted on cloud providers this can in some
  285. * cases have a few ms of latency between packet departures. By randomizing
  286. * the order we make each attempted NAT-t favor one or the other going
  287. * first, meaning if it doesn't succeed the first time it might the second
  288. * and so forth. */
  289. unsigned int alt = RR->prng->next32() & 1;
  290. unsigned int completed = alt + 2;
  291. while (alt != completed) {
  292. if ((alt & 1) == 0) {
  293. // Tell p1 where to find p2.
  294. Packet outp(p1,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  295. outp.append((unsigned char)0);
  296. p2.appendTo(outp);
  297. outp.append((uint16_t)cg.first.port());
  298. if (cg.first.isV6()) {
  299. outp.append((unsigned char)16);
  300. outp.append(cg.first.rawIpData(),16);
  301. } else {
  302. outp.append((unsigned char)4);
  303. outp.append(cg.first.rawIpData(),4);
  304. }
  305. outp.armor(p1p->key(),true);
  306. p1p->send(RR,outp.data(),outp.size(),now);
  307. } else {
  308. // Tell p2 where to find p1.
  309. Packet outp(p2,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  310. outp.append((unsigned char)0);
  311. p1.appendTo(outp);
  312. outp.append((uint16_t)cg.second.port());
  313. if (cg.second.isV6()) {
  314. outp.append((unsigned char)16);
  315. outp.append(cg.second.rawIpData(),16);
  316. } else {
  317. outp.append((unsigned char)4);
  318. outp.append(cg.second.rawIpData(),4);
  319. }
  320. outp.armor(p2p->key(),true);
  321. p2p->send(RR,outp.data(),outp.size(),now);
  322. }
  323. ++alt; // counts up and also flips LSB
  324. }
  325. return true;
  326. }
  327. void Switch::contact(const SharedPtr<Peer> &peer,const InetAddress &atAddr,unsigned int maxDesperation)
  328. {
  329. TRACE("sending NAT-t message to %s(%s)",peer->address().toString().c_str(),atAddr.toString().c_str());
  330. const uint64_t now = RR->node->now();
  331. // Attempt to contact at zero desperation first
  332. peer->attemptToContactAt(RR,atAddr,0,now);
  333. // If we have not punched through after this timeout, open refreshing can of whupass
  334. {
  335. Mutex::Lock _l(_contactQueue_m);
  336. _contactQueue.push_back(ContactQueueEntry(peer,now + ZT_NAT_T_TACTICAL_ESCALATION_DELAY,atAddr,maxDesperation));
  337. }
  338. }
  339. void Switch::requestWhois(const Address &addr)
  340. {
  341. bool inserted = false;
  342. {
  343. Mutex::Lock _l(_outstandingWhoisRequests_m);
  344. std::pair< std::map< Address,WhoisRequest >::iterator,bool > entry(_outstandingWhoisRequests.insert(std::pair<Address,WhoisRequest>(addr,WhoisRequest())));
  345. if ((inserted = entry.second))
  346. entry.first->second.lastSent = RR->node->now();
  347. entry.first->second.retries = 0; // reset retry count if entry already existed
  348. }
  349. if (inserted)
  350. _sendWhoisRequest(addr,(const Address *)0,0);
  351. }
  352. void Switch::cancelWhoisRequest(const Address &addr)
  353. {
  354. Mutex::Lock _l(_outstandingWhoisRequests_m);
  355. _outstandingWhoisRequests.erase(addr);
  356. }
  357. void Switch::doAnythingWaitingForPeer(const SharedPtr<Peer> &peer)
  358. {
  359. { // cancel pending WHOIS since we now know this peer
  360. Mutex::Lock _l(_outstandingWhoisRequests_m);
  361. _outstandingWhoisRequests.erase(peer->address());
  362. }
  363. { // finish processing any packets waiting on peer's public key / identity
  364. Mutex::Lock _l(_rxQueue_m);
  365. for(std::list< SharedPtr<IncomingPacket> >::iterator rxi(_rxQueue.begin());rxi!=_rxQueue.end();) {
  366. if ((*rxi)->tryDecode(RR))
  367. _rxQueue.erase(rxi++);
  368. else ++rxi;
  369. }
  370. }
  371. { // finish sending any packets waiting on peer's public key / identity
  372. Mutex::Lock _l(_txQueue_m);
  373. std::pair< std::multimap< Address,TXQueueEntry >::iterator,std::multimap< Address,TXQueueEntry >::iterator > waitingTxQueueItems(_txQueue.equal_range(peer->address()));
  374. for(std::multimap< Address,TXQueueEntry >::iterator txi(waitingTxQueueItems.first);txi!=waitingTxQueueItems.second;) {
  375. if (_trySend(txi->second.packet,txi->second.encrypt))
  376. _txQueue.erase(txi++);
  377. else ++txi;
  378. }
  379. }
  380. }
  381. unsigned long Switch::doTimerTasks(uint64_t now)
  382. {
  383. unsigned long nextDelay = 0xffffffff; // ceiling delay, caller will cap to minimum
  384. { // Aggressive NAT traversal time!
  385. Mutex::Lock _l(_contactQueue_m);
  386. for(std::list<ContactQueueEntry>::iterator qi(_contactQueue.begin());qi!=_contactQueue.end();) {
  387. if (now >= qi->fireAtTime) {
  388. if (qi->peer->hasActiveDirectPath(now)) {
  389. // We've successfully NAT-t'd, so cancel attempt
  390. _contactQueue.erase(qi++);
  391. continue;
  392. } else {
  393. // Nope, nothing yet. Time to kill some kittens.
  394. switch(qi->strategyIteration++) {
  395. case 0: {
  396. // First strategy: rifle method: direct packet to known port
  397. qi->peer->attemptToContactAt(RR,qi->inaddr,qi->currentDesperation,now);
  398. } break;
  399. case 1: {
  400. // Second strategy: shotgun method up: try a few ports above
  401. InetAddress tmpaddr(qi->inaddr);
  402. int p = (int)qi->inaddr.port();
  403. for(int i=0;i<9;++i) {
  404. if (++p > 0xffff) break;
  405. tmpaddr.setPort((unsigned int)p);
  406. qi->peer->attemptToContactAt(RR,tmpaddr,qi->currentDesperation,now);
  407. }
  408. } break;
  409. case 2: {
  410. // Third strategy: shotgun method down: try a few ports below
  411. InetAddress tmpaddr(qi->inaddr);
  412. int p = (int)qi->inaddr.port();
  413. for(int i=0;i<3;++i) {
  414. if (--p < 1024) break;
  415. tmpaddr.setPort((unsigned int)p);
  416. qi->peer->attemptToContactAt(RR,tmpaddr,qi->currentDesperation,now);
  417. }
  418. // Escalate link desperation after all strategies attempted
  419. ++qi->currentDesperation;
  420. if (qi->currentDesperation > qi->maxDesperation) {
  421. // We've tried all strategies at all levels of desperation, give up.
  422. _contactQueue.erase(qi++);
  423. continue;
  424. } else {
  425. // Otherwise restart at new link desperation level (e.g. try a tougher transport)
  426. qi->strategyIteration = 0;
  427. }
  428. } break;
  429. }
  430. qi->fireAtTime = now + ZT_NAT_T_TACTICAL_ESCALATION_DELAY;
  431. nextDelay = std::min(nextDelay,(unsigned long)ZT_NAT_T_TACTICAL_ESCALATION_DELAY);
  432. }
  433. } else {
  434. nextDelay = std::min(nextDelay,(unsigned long)(qi->fireAtTime - now));
  435. }
  436. ++qi; // if qi was erased, loop will have continued before here
  437. }
  438. }
  439. { // Retry outstanding WHOIS requests
  440. Mutex::Lock _l(_outstandingWhoisRequests_m);
  441. for(std::map< Address,WhoisRequest >::iterator i(_outstandingWhoisRequests.begin());i!=_outstandingWhoisRequests.end();) {
  442. unsigned long since = (unsigned long)(now - i->second.lastSent);
  443. if (since >= ZT_WHOIS_RETRY_DELAY) {
  444. if (i->second.retries >= ZT_MAX_WHOIS_RETRIES) {
  445. TRACE("WHOIS %s timed out",i->first.toString().c_str());
  446. _outstandingWhoisRequests.erase(i++);
  447. continue;
  448. } else {
  449. i->second.lastSent = now;
  450. i->second.peersConsulted[i->second.retries] = _sendWhoisRequest(i->first,i->second.peersConsulted,i->second.retries);
  451. ++i->second.retries;
  452. TRACE("WHOIS %s (retry %u)",i->first.toString().c_str(),i->second.retries);
  453. nextDelay = std::min(nextDelay,(unsigned long)ZT_WHOIS_RETRY_DELAY);
  454. }
  455. } else {
  456. nextDelay = std::min(nextDelay,ZT_WHOIS_RETRY_DELAY - since);
  457. }
  458. ++i;
  459. }
  460. }
  461. { // Time out TX queue packets that never got WHOIS lookups or other info.
  462. Mutex::Lock _l(_txQueue_m);
  463. for(std::multimap< Address,TXQueueEntry >::iterator i(_txQueue.begin());i!=_txQueue.end();) {
  464. if (_trySend(i->second.packet,i->second.encrypt))
  465. _txQueue.erase(i++);
  466. else if ((now - i->second.creationTime) > ZT_TRANSMIT_QUEUE_TIMEOUT) {
  467. TRACE("TX %s -> %s timed out",i->second.packet.source().toString().c_str(),i->second.packet.destination().toString().c_str());
  468. _txQueue.erase(i++);
  469. } else ++i;
  470. }
  471. }
  472. { // Time out RX queue packets that never got WHOIS lookups or other info.
  473. Mutex::Lock _l(_rxQueue_m);
  474. for(std::list< SharedPtr<IncomingPacket> >::iterator i(_rxQueue.begin());i!=_rxQueue.end();) {
  475. if ((now - (*i)->receiveTime()) > ZT_RECEIVE_QUEUE_TIMEOUT) {
  476. TRACE("RX %s -> %s timed out",(*i)->source().toString().c_str(),(*i)->destination().toString().c_str());
  477. _rxQueue.erase(i++);
  478. } else ++i;
  479. }
  480. }
  481. { // Time out packets that didn't get all their fragments.
  482. Mutex::Lock _l(_defragQueue_m);
  483. for(std::map< uint64_t,DefragQueueEntry >::iterator i(_defragQueue.begin());i!=_defragQueue.end();) {
  484. if ((now - i->second.creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) {
  485. TRACE("incomplete fragmented packet %.16llx timed out, fragments discarded",i->first);
  486. _defragQueue.erase(i++);
  487. } else ++i;
  488. }
  489. }
  490. return nextDelay;
  491. }
  492. const char *Switch::etherTypeName(const unsigned int etherType)
  493. throw()
  494. {
  495. switch(etherType) {
  496. case ZT_ETHERTYPE_IPV4: return "IPV4";
  497. case ZT_ETHERTYPE_ARP: return "ARP";
  498. case ZT_ETHERTYPE_RARP: return "RARP";
  499. case ZT_ETHERTYPE_ATALK: return "ATALK";
  500. case ZT_ETHERTYPE_AARP: return "AARP";
  501. case ZT_ETHERTYPE_IPX_A: return "IPX_A";
  502. case ZT_ETHERTYPE_IPX_B: return "IPX_B";
  503. case ZT_ETHERTYPE_IPV6: return "IPV6";
  504. }
  505. return "UNKNOWN";
  506. }
  507. void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,int linkDesperation,const void *data,unsigned int len)
  508. {
  509. Packet::Fragment fragment(data,len);
  510. Address destination(fragment.destination());
  511. if (destination != RR->identity.address()) {
  512. // Fragment is not for us, so try to relay it
  513. if (fragment.hops() < ZT_RELAY_MAX_HOPS) {
  514. fragment.incrementHops();
  515. // Note: we don't bother initiating NAT-t for fragments, since heads will set that off.
  516. // It wouldn't hurt anything, just redundant and unnecessary.
  517. SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
  518. if ((!relayTo)||(!relayTo->send(RR,fragment.data(),fragment.size(),RR->node->now()))) {
  519. // Don't know peer or no direct path -- so relay via supernode
  520. relayTo = RR->topology->getBestSupernode();
  521. if (relayTo)
  522. relayTo->send(RR,fragment.data(),fragment.size(),RR->node->now());
  523. }
  524. } else {
  525. TRACE("dropped relay [fragment](%s) -> %s, max hops exceeded",fromAddr.toString().c_str(),destination.toString().c_str());
  526. }
  527. } else {
  528. // Fragment looks like ours
  529. uint64_t pid = fragment.packetId();
  530. unsigned int fno = fragment.fragmentNumber();
  531. unsigned int tf = fragment.totalFragments();
  532. if ((tf <= ZT_MAX_PACKET_FRAGMENTS)&&(fno < ZT_MAX_PACKET_FRAGMENTS)&&(fno > 0)&&(tf > 1)) {
  533. // Fragment appears basically sane. Its fragment number must be
  534. // 1 or more, since a Packet with fragmented bit set is fragment 0.
  535. // Total fragments must be more than 1, otherwise why are we
  536. // seeing a Packet::Fragment?
  537. Mutex::Lock _l(_defragQueue_m);
  538. std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid));
  539. if (dqe == _defragQueue.end()) {
  540. // We received a Packet::Fragment without its head, so queue it and wait
  541. DefragQueueEntry &dq = _defragQueue[pid];
  542. dq.creationTime = RR->node->now();
  543. dq.frags[fno - 1] = fragment;
  544. dq.totalFragments = tf; // total fragment count is known
  545. dq.haveFragments = 1 << fno; // we have only this fragment
  546. //TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str());
  547. } else if (!(dqe->second.haveFragments & (1 << fno))) {
  548. // We have other fragments and maybe the head, so add this one and check
  549. dqe->second.frags[fno - 1] = fragment;
  550. dqe->second.totalFragments = tf;
  551. //TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str());
  552. if (Utils::countBits(dqe->second.haveFragments |= (1 << fno)) == tf) {
  553. // We have all fragments -- assemble and process full Packet
  554. //TRACE("packet %.16llx is complete, assembling and processing...",pid);
  555. SharedPtr<IncomingPacket> packet(dqe->second.frag0);
  556. for(unsigned int f=1;f<tf;++f)
  557. packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength());
  558. _defragQueue.erase(dqe);
  559. if (!packet->tryDecode(RR)) {
  560. Mutex::Lock _l(_rxQueue_m);
  561. _rxQueue.push_back(packet);
  562. }
  563. }
  564. } // else this is a duplicate fragment, ignore
  565. }
  566. }
  567. }
  568. void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,int linkDesperation,const void *data,unsigned int len)
  569. {
  570. SharedPtr<IncomingPacket> packet(new IncomingPacket(data,len,fromAddr,linkDesperation,RR->node->now()));
  571. Address source(packet->source());
  572. Address destination(packet->destination());
  573. //TRACE("<< %.16llx %s -> %s (size: %u)",(unsigned long long)packet->packetId(),source.toString().c_str(),destination.toString().c_str(),packet->size());
  574. if (destination != RR->identity.address()) {
  575. // Packet is not for us, so try to relay it
  576. if (packet->hops() < ZT_RELAY_MAX_HOPS) {
  577. packet->incrementHops();
  578. SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
  579. if ((relayTo)&&((relayTo->send(RR,packet->data(),packet->size(),RR->node->now())))) {
  580. unite(source,destination,false);
  581. } else {
  582. // Don't know peer or no direct path -- so relay via supernode
  583. relayTo = RR->topology->getBestSupernode(&source,1,true);
  584. if (relayTo)
  585. relayTo->send(RR,packet->data(),packet->size(),RR->node->now());
  586. }
  587. } else {
  588. TRACE("dropped relay %s(%s) -> %s, max hops exceeded",packet->source().toString().c_str(),fromAddr.toString().c_str(),destination.toString().c_str());
  589. }
  590. } else if (packet->fragmented()) {
  591. // Packet is the head of a fragmented packet series
  592. uint64_t pid = packet->packetId();
  593. Mutex::Lock _l(_defragQueue_m);
  594. std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid));
  595. if (dqe == _defragQueue.end()) {
  596. // If we have no other fragments yet, create an entry and save the head
  597. DefragQueueEntry &dq = _defragQueue[pid];
  598. dq.creationTime = RR->node->now();
  599. dq.frag0 = packet;
  600. dq.totalFragments = 0; // 0 == unknown, waiting for Packet::Fragment
  601. dq.haveFragments = 1; // head is first bit (left to right)
  602. //TRACE("fragment (0/?) of %.16llx from %s",pid,fromAddr.toString().c_str());
  603. } else if (!(dqe->second.haveFragments & 1)) {
  604. // If we have other fragments but no head, see if we are complete with the head
  605. if ((dqe->second.totalFragments)&&(Utils::countBits(dqe->second.haveFragments |= 1) == dqe->second.totalFragments)) {
  606. // We have all fragments -- assemble and process full Packet
  607. //TRACE("packet %.16llx is complete, assembling and processing...",pid);
  608. // packet already contains head, so append fragments
  609. for(unsigned int f=1;f<dqe->second.totalFragments;++f)
  610. packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength());
  611. _defragQueue.erase(dqe);
  612. if (!packet->tryDecode(RR)) {
  613. Mutex::Lock _l(_rxQueue_m);
  614. _rxQueue.push_back(packet);
  615. }
  616. } else {
  617. // Still waiting on more fragments, so queue the head
  618. dqe->second.frag0 = packet;
  619. }
  620. } // else this is a duplicate head, ignore
  621. } else {
  622. // Packet is unfragmented, so just process it
  623. if (!packet->tryDecode(RR)) {
  624. Mutex::Lock _l(_rxQueue_m);
  625. _rxQueue.push_back(packet);
  626. }
  627. }
  628. }
  629. void Switch::_handleBeacon(const InetAddress &fromAddr,int linkDesperation,const Buffer<ZT_PROTO_BEACON_LENGTH> &data)
  630. {
  631. Address beaconAddr(data.field(ZT_PROTO_BEACON_IDX_ADDRESS,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH);
  632. if (beaconAddr == RR->identity.address())
  633. return;
  634. SharedPtr<Peer> peer(RR->topology->getPeer(beaconAddr));
  635. if (peer) {
  636. const uint64_t now = RR->node->now();
  637. if ((now - _lastBeacon) >= ZT_MIN_BEACON_RESPONSE_INTERVAL) {
  638. _lastBeacon = now;
  639. Packet outp(peer->address(),RR->identity.address(),Packet::VERB_NOP);
  640. outp.armor(peer->key(),false);
  641. RR->node->putPacket(fromAddr,outp.data(),outp.size(),linkDesperation);
  642. }
  643. }
  644. }
  645. Address Switch::_sendWhoisRequest(const Address &addr,const Address *peersAlreadyConsulted,unsigned int numPeersAlreadyConsulted)
  646. {
  647. SharedPtr<Peer> supernode(RR->topology->getBestSupernode(peersAlreadyConsulted,numPeersAlreadyConsulted,false));
  648. if (supernode) {
  649. Packet outp(supernode->address(),RR->identity.address(),Packet::VERB_WHOIS);
  650. addr.appendTo(outp);
  651. outp.armor(supernode->key(),true);
  652. if (supernode->send(RR,outp.data(),outp.size(),RR->node->now()))
  653. return supernode->address();
  654. }
  655. return Address();
  656. }
  657. bool Switch::_trySend(const Packet &packet,bool encrypt)
  658. {
  659. SharedPtr<Peer> peer(RR->topology->getPeer(packet.destination()));
  660. if (peer) {
  661. const uint64_t now = RR->node->now();
  662. Path *viaPath = peer->getBestPath(now);
  663. if (!viaPath) {
  664. SharedPtr<Peer> sn(RR->topology->getBestSupernode());
  665. if (!(sn)||(!(viaPath = sn->getBestPath(now))))
  666. return false;
  667. }
  668. Packet tmp(packet);
  669. unsigned int chunkSize = std::min(tmp.size(),(unsigned int)ZT_UDP_DEFAULT_PAYLOAD_MTU);
  670. tmp.setFragmented(chunkSize < tmp.size());
  671. tmp.armor(peer->key(),encrypt);
  672. if (viaPath->send(RR,tmp.data(),chunkSize,now)) {
  673. if (chunkSize < tmp.size()) {
  674. // Too big for one bite, fragment the rest
  675. unsigned int fragStart = chunkSize;
  676. unsigned int remaining = tmp.size() - chunkSize;
  677. unsigned int fragsRemaining = (remaining / (ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH));
  678. if ((fragsRemaining * (ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH)) < remaining)
  679. ++fragsRemaining;
  680. unsigned int totalFragments = fragsRemaining + 1;
  681. for(unsigned int fno=1;fno<totalFragments;++fno) {
  682. chunkSize = std::min(remaining,(unsigned int)(ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH));
  683. Packet::Fragment frag(tmp,fragStart,chunkSize,fno,totalFragments);
  684. viaPath->send(RR,frag.data(),frag.size(),now);
  685. fragStart += chunkSize;
  686. remaining -= chunkSize;
  687. }
  688. }
  689. return true;
  690. }
  691. } else {
  692. requestWhois(packet.destination());
  693. }
  694. return false;
  695. }
  696. } // namespace ZeroTier