1
0

Switch.cpp 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <stdio.h>
  19. #include <stdlib.h>
  20. #include <algorithm>
  21. #include <utility>
  22. #include <stdexcept>
  23. #include "../version.h"
  24. #include "../include/ZeroTierOne.h"
  25. #include "Constants.hpp"
  26. #include "RuntimeEnvironment.hpp"
  27. #include "Switch.hpp"
  28. #include "Node.hpp"
  29. #include "InetAddress.hpp"
  30. #include "Topology.hpp"
  31. #include "Peer.hpp"
  32. #include "SelfAwareness.hpp"
  33. #include "Packet.hpp"
  34. #include "Filter.hpp"
  35. #include "Cluster.hpp"
  36. namespace ZeroTier {
  37. #ifdef ZT_TRACE
  38. static const char *etherTypeName(const unsigned int etherType)
  39. {
  40. switch(etherType) {
  41. case ZT_ETHERTYPE_IPV4: return "IPV4";
  42. case ZT_ETHERTYPE_ARP: return "ARP";
  43. case ZT_ETHERTYPE_RARP: return "RARP";
  44. case ZT_ETHERTYPE_ATALK: return "ATALK";
  45. case ZT_ETHERTYPE_AARP: return "AARP";
  46. case ZT_ETHERTYPE_IPX_A: return "IPX_A";
  47. case ZT_ETHERTYPE_IPX_B: return "IPX_B";
  48. case ZT_ETHERTYPE_IPV6: return "IPV6";
  49. }
  50. return "UNKNOWN";
  51. }
  52. #endif // ZT_TRACE
  53. Switch::Switch(const RuntimeEnvironment *renv) :
  54. RR(renv),
  55. _lastBeaconResponse(0),
  56. _outstandingWhoisRequests(32),
  57. _lastUniteAttempt(8) // only really used on root servers and upstreams, and it'll grow there just fine
  58. {
  59. }
  60. Switch::~Switch()
  61. {
  62. }
  63. void Switch::onRemotePacket(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len)
  64. {
  65. try {
  66. const uint64_t now = RR->node->now();
  67. if (len == 13) {
  68. /* LEGACY: before VERB_PUSH_DIRECT_PATHS, peers used broadcast
  69. * announcements on the LAN to solve the 'same network problem.' We
  70. * no longer send these, but we'll listen for them for a while to
  71. * locate peers with versions <1.0.4. */
  72. Address beaconAddr(reinterpret_cast<const char *>(data) + 8,5);
  73. if (beaconAddr == RR->identity.address())
  74. return;
  75. if (!RR->node->shouldUsePathForZeroTierTraffic(localAddr,fromAddr))
  76. return;
  77. SharedPtr<Peer> peer(RR->topology->getPeer(beaconAddr));
  78. if (peer) { // we'll only respond to beacons from known peers
  79. if ((now - _lastBeaconResponse) >= 2500) { // limit rate of responses
  80. _lastBeaconResponse = now;
  81. Packet outp(peer->address(),RR->identity.address(),Packet::VERB_NOP);
  82. outp.armor(peer->key(),true);
  83. RR->node->putPacket(localAddr,fromAddr,outp.data(),outp.size());
  84. }
  85. }
  86. } else if (len > ZT_PROTO_MIN_FRAGMENT_LENGTH) { // min length check is important!
  87. if (reinterpret_cast<const uint8_t *>(data)[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR) {
  88. // Handle fragment ----------------------------------------------------
  89. Packet::Fragment fragment(data,len);
  90. const Address destination(fragment.destination());
  91. if (destination != RR->identity.address()) {
  92. // Fragment is not for us, so try to relay it
  93. if (fragment.hops() < ZT_RELAY_MAX_HOPS) {
  94. fragment.incrementHops();
  95. // Note: we don't bother initiating NAT-t for fragments, since heads will set that off.
  96. // It wouldn't hurt anything, just redundant and unnecessary.
  97. SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
  98. if ((!relayTo)||(!relayTo->send(fragment.data(),fragment.size(),now))) {
  99. #ifdef ZT_ENABLE_CLUSTER
  100. if (RR->cluster) {
  101. RR->cluster->sendViaCluster(Address(),destination,fragment.data(),fragment.size(),false);
  102. return;
  103. }
  104. #endif
  105. // Don't know peer or no direct path -- so relay via root server
  106. relayTo = RR->topology->getBestRoot();
  107. if (relayTo)
  108. relayTo->send(fragment.data(),fragment.size(),now);
  109. }
  110. } else {
  111. TRACE("dropped relay [fragment](%s) -> %s, max hops exceeded",fromAddr.toString().c_str(),destination.toString().c_str());
  112. }
  113. } else {
  114. // Fragment looks like ours
  115. const uint64_t fragmentPacketId = fragment.packetId();
  116. const unsigned int fragmentNumber = fragment.fragmentNumber();
  117. const unsigned int totalFragments = fragment.totalFragments();
  118. if ((totalFragments <= ZT_MAX_PACKET_FRAGMENTS)&&(fragmentNumber < ZT_MAX_PACKET_FRAGMENTS)&&(fragmentNumber > 0)&&(totalFragments > 1)) {
  119. // Fragment appears basically sane. Its fragment number must be
  120. // 1 or more, since a Packet with fragmented bit set is fragment 0.
  121. // Total fragments must be more than 1, otherwise why are we
  122. // seeing a Packet::Fragment?
  123. Mutex::Lock _l(_rxQueue_m);
  124. RXQueueEntry *const rq = _findRXQueueEntry(now,fragmentPacketId);
  125. if ((!rq->timestamp)||(rq->packetId != fragmentPacketId)) {
  126. // No packet found, so we received a fragment without its head.
  127. //TRACE("fragment (%u/%u) of %.16llx from %s",fragmentNumber + 1,totalFragments,fragmentPacketId,fromAddr.toString().c_str());
  128. rq->timestamp = now;
  129. rq->packetId = fragmentPacketId;
  130. rq->frags[fragmentNumber - 1] = fragment;
  131. rq->totalFragments = totalFragments; // total fragment count is known
  132. rq->haveFragments = 1 << fragmentNumber; // we have only this fragment
  133. rq->complete = false;
  134. } else if (!(rq->haveFragments & (1 << fragmentNumber))) {
  135. // We have other fragments and maybe the head, so add this one and check
  136. //TRACE("fragment (%u/%u) of %.16llx from %s",fragmentNumber + 1,totalFragments,fragmentPacketId,fromAddr.toString().c_str());
  137. rq->frags[fragmentNumber - 1] = fragment;
  138. rq->totalFragments = totalFragments;
  139. if (Utils::countBits(rq->haveFragments |= (1 << fragmentNumber)) == totalFragments) {
  140. // We have all fragments -- assemble and process full Packet
  141. //TRACE("packet %.16llx is complete, assembling and processing...",fragmentPacketId);
  142. for(unsigned int f=1;f<totalFragments;++f)
  143. rq->frag0.append(rq->frags[f - 1].payload(),rq->frags[f - 1].payloadLength());
  144. if (rq->frag0.tryDecode(RR,false)) {
  145. rq->timestamp = 0; // packet decoded, free entry
  146. } else {
  147. rq->complete = true; // set complete flag but leave entry since it probably needs WHOIS or something
  148. }
  149. }
  150. } // else this is a duplicate fragment, ignore
  151. }
  152. }
  153. // --------------------------------------------------------------------
  154. } else if (len >= ZT_PROTO_MIN_PACKET_LENGTH) { // min length check is important!
  155. // Handle packet head -------------------------------------------------
  156. // See packet format in Packet.hpp to understand this
  157. const uint64_t packetId = (
  158. (((uint64_t)reinterpret_cast<const uint8_t *>(data)[0]) << 56) |
  159. (((uint64_t)reinterpret_cast<const uint8_t *>(data)[1]) << 48) |
  160. (((uint64_t)reinterpret_cast<const uint8_t *>(data)[2]) << 40) |
  161. (((uint64_t)reinterpret_cast<const uint8_t *>(data)[3]) << 32) |
  162. (((uint64_t)reinterpret_cast<const uint8_t *>(data)[4]) << 24) |
  163. (((uint64_t)reinterpret_cast<const uint8_t *>(data)[5]) << 16) |
  164. (((uint64_t)reinterpret_cast<const uint8_t *>(data)[6]) << 8) |
  165. ((uint64_t)reinterpret_cast<const uint8_t *>(data)[7])
  166. );
  167. const Address destination(reinterpret_cast<const uint8_t *>(data) + 8,ZT_ADDRESS_LENGTH);
  168. const Address source(reinterpret_cast<const uint8_t *>(data) + 13,ZT_ADDRESS_LENGTH);
  169. // Catch this and toss it -- it would never work, but it could happen if we somehow
  170. // mistakenly guessed an address we're bound to as a destination for another peer.
  171. if (source == RR->identity.address())
  172. return;
  173. //TRACE("<< %.16llx %s -> %s (size: %u)",(unsigned long long)packet->packetId(),source.toString().c_str(),destination.toString().c_str(),packet->size());
  174. if (destination != RR->identity.address()) {
  175. Packet packet(data,len);
  176. // Packet is not for us, so try to relay it
  177. if (packet.hops() < ZT_RELAY_MAX_HOPS) {
  178. packet.incrementHops();
  179. SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
  180. if ((relayTo)&&((relayTo->send(packet.data(),packet.size(),now)))) {
  181. Mutex::Lock _l(_lastUniteAttempt_m);
  182. uint64_t &luts = _lastUniteAttempt[_LastUniteKey(source,destination)];
  183. if ((now - luts) >= ZT_MIN_UNITE_INTERVAL) {
  184. luts = now;
  185. unite(source,destination);
  186. }
  187. } else {
  188. #ifdef ZT_ENABLE_CLUSTER
  189. if (RR->cluster) {
  190. bool shouldUnite;
  191. {
  192. Mutex::Lock _l(_lastUniteAttempt_m);
  193. uint64_t &luts = _lastUniteAttempt[_LastUniteKey(source,destination)];
  194. shouldUnite = ((now - luts) >= ZT_MIN_UNITE_INTERVAL);
  195. if (shouldUnite)
  196. luts = now;
  197. }
  198. RR->cluster->sendViaCluster(source,destination,packet.data(),packet.size(),shouldUnite);
  199. return;
  200. }
  201. #endif
  202. relayTo = RR->topology->getBestRoot(&source,1,true);
  203. if (relayTo)
  204. relayTo->send(packet.data(),packet.size(),now);
  205. }
  206. } else {
  207. TRACE("dropped relay %s(%s) -> %s, max hops exceeded",packet.source().toString().c_str(),fromAddr.toString().c_str(),destination.toString().c_str());
  208. }
  209. } else if ((reinterpret_cast<const uint8_t *>(data)[ZT_PACKET_IDX_FLAGS] & ZT_PROTO_FLAG_FRAGMENTED) != 0) {
  210. // Packet is the head of a fragmented packet series
  211. Mutex::Lock _l(_rxQueue_m);
  212. RXQueueEntry *const rq = _findRXQueueEntry(now,packetId);
  213. if ((!rq->timestamp)||(rq->packetId != packetId)) {
  214. // If we have no other fragments yet, create an entry and save the head
  215. //TRACE("fragment (0/?) of %.16llx from %s",pid,fromAddr.toString().c_str());
  216. rq->timestamp = now;
  217. rq->packetId = packetId;
  218. rq->frag0.init(data,len,localAddr,fromAddr,now);
  219. rq->totalFragments = 0;
  220. rq->haveFragments = 1;
  221. rq->complete = false;
  222. } else if (!(rq->haveFragments & 1)) {
  223. // If we have other fragments but no head, see if we are complete with the head
  224. if ((rq->totalFragments > 1)&&(Utils::countBits(rq->haveFragments |= 1) == rq->totalFragments)) {
  225. // We have all fragments -- assemble and process full Packet
  226. //TRACE("packet %.16llx is complete, assembling and processing...",pid);
  227. rq->frag0.init(data,len,localAddr,fromAddr,now);
  228. for(unsigned int f=1;f<rq->totalFragments;++f)
  229. rq->frag0.append(rq->frags[f - 1].payload(),rq->frags[f - 1].payloadLength());
  230. if (rq->frag0.tryDecode(RR,false)) {
  231. rq->timestamp = 0; // packet decoded, free entry
  232. } else {
  233. rq->complete = true; // set complete flag but leave entry since it probably needs WHOIS or something
  234. }
  235. } else {
  236. // Still waiting on more fragments, but keep the head
  237. rq->frag0.init(data,len,localAddr,fromAddr,now);
  238. }
  239. } // else this is a duplicate head, ignore
  240. } else {
  241. // Packet is unfragmented, so just process it
  242. IncomingPacket packet(data,len,localAddr,fromAddr,now);
  243. if (!packet.tryDecode(RR,false)) {
  244. Mutex::Lock _l(_rxQueue_m);
  245. RXQueueEntry *rq = &(_rxQueue[ZT_RX_QUEUE_SIZE - 1]);
  246. unsigned long i = ZT_RX_QUEUE_SIZE - 1;
  247. while ((i)&&(rq->timestamp)) {
  248. RXQueueEntry *tmp = &(_rxQueue[--i]);
  249. if (tmp->timestamp < rq->timestamp)
  250. rq = tmp;
  251. }
  252. rq->timestamp = now;
  253. rq->packetId = packetId;
  254. rq->frag0 = packet;
  255. rq->totalFragments = 1;
  256. rq->haveFragments = 1;
  257. rq->complete = true;
  258. }
  259. }
  260. // --------------------------------------------------------------------
  261. }
  262. }
  263. } catch (std::exception &ex) {
  264. TRACE("dropped packet from %s: unexpected exception: %s",fromAddr.toString().c_str(),ex.what());
  265. } catch ( ... ) {
  266. TRACE("dropped packet from %s: unexpected exception: (unknown)",fromAddr.toString().c_str());
  267. }
  268. }
  269. void Switch::onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
  270. {
  271. if (!network->hasConfig())
  272. return;
  273. // Sanity check -- bridge loop? OS problem?
  274. if (to == network->mac())
  275. return;
  276. // Check if this packet is from someone other than the tap -- i.e. bridged in
  277. bool fromBridged = false;
  278. if (from != network->mac()) {
  279. if (!network->config().permitsBridging(RR->identity.address())) {
  280. TRACE("%.16llx: %s -> %s %s not forwarded, bridging disabled or this peer not a bridge",network->id(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType));
  281. return;
  282. }
  283. fromBridged = true;
  284. }
  285. if (to.isMulticast()) {
  286. // Destination is a multicast address (including broadcast)
  287. MulticastGroup mg(to,0);
  288. if (to.isBroadcast()) {
  289. if ( (etherType == ZT_ETHERTYPE_ARP) && (len >= 28) && ((((const uint8_t *)data)[2] == 0x08)&&(((const uint8_t *)data)[3] == 0x00)&&(((const uint8_t *)data)[4] == 6)&&(((const uint8_t *)data)[5] == 4)&&(((const uint8_t *)data)[7] == 0x01)) ) {
  290. /* IPv4 ARP is one of the few special cases that we impose upon what is
  291. * otherwise a straightforward Ethernet switch emulation. Vanilla ARP
  292. * is dumb old broadcast and simply doesn't scale. ZeroTier multicast
  293. * groups have an additional field called ADI (additional distinguishing
  294. * information) which was added specifically for ARP though it could
  295. * be used for other things too. We then take ARP broadcasts and turn
  296. * them into multicasts by stuffing the IP address being queried into
  297. * the 32-bit ADI field. In practice this uses our multicast pub/sub
  298. * system to implement a kind of extended/distributed ARP table. */
  299. mg = MulticastGroup::deriveMulticastGroupForAddressResolution(InetAddress(((const unsigned char *)data) + 24,4,0));
  300. } else if (!network->config().enableBroadcast()) {
  301. // Don't transmit broadcasts if this network doesn't want them
  302. TRACE("%.16llx: dropped broadcast since ff:ff:ff:ff:ff:ff is not enabled",network->id());
  303. return;
  304. }
  305. } else if ((etherType == ZT_ETHERTYPE_IPV6)&&(len >= (40 + 8 + 16))) {
  306. // IPv6 NDP emulation for certain very special patterns of private IPv6 addresses -- if enabled
  307. if ((network->config().ndpEmulation())&&(reinterpret_cast<const uint8_t *>(data)[6] == 0x3a)&&(reinterpret_cast<const uint8_t *>(data)[40] == 0x87)) { // ICMPv6 neighbor solicitation
  308. Address v6EmbeddedAddress;
  309. const uint8_t *const pkt6 = reinterpret_cast<const uint8_t *>(data) + 40 + 8;
  310. const uint8_t *my6 = (const uint8_t *)0;
  311. // ZT-RFC4193 address: fdNN:NNNN:NNNN:NNNN:NN99:93DD:DDDD:DDDD / 88 (one /128 per actual host)
  312. // ZT-6PLANE address: fcXX:XXXX:XXDD:DDDD:DDDD:####:####:#### / 40 (one /80 per actual host)
  313. // (XX - lower 32 bits of network ID XORed with higher 32 bits)
  314. // For these to work, we must have a ZT-managed address assigned in one of the
  315. // above formats, and the query must match its prefix.
  316. for(unsigned int sipk=0;sipk<network->config().staticIpCount;++sipk) {
  317. const InetAddress *const sip = &(network->config().staticIps[sipk]);
  318. if (sip->ss_family == AF_INET6) {
  319. my6 = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&(*sip))->sin6_addr.s6_addr);
  320. const unsigned int sipNetmaskBits = Utils::ntoh((uint16_t)reinterpret_cast<const struct sockaddr_in6 *>(&(*sip))->sin6_port);
  321. if ((sipNetmaskBits == 88)&&(my6[0] == 0xfd)&&(my6[9] == 0x99)&&(my6[10] == 0x93)) { // ZT-RFC4193 /88 ???
  322. unsigned int ptr = 0;
  323. while (ptr != 11) {
  324. if (pkt6[ptr] != my6[ptr])
  325. break;
  326. ++ptr;
  327. }
  328. if (ptr == 11) { // prefix match!
  329. v6EmbeddedAddress.setTo(pkt6 + ptr,5);
  330. break;
  331. }
  332. } else if (sipNetmaskBits == 40) { // ZT-6PLANE /40 ???
  333. const uint32_t nwid32 = (uint32_t)((network->id() ^ (network->id() >> 32)) & 0xffffffff);
  334. if ( (my6[0] == 0xfc) && (my6[1] == (uint8_t)((nwid32 >> 24) & 0xff)) && (my6[2] == (uint8_t)((nwid32 >> 16) & 0xff)) && (my6[3] == (uint8_t)((nwid32 >> 8) & 0xff)) && (my6[4] == (uint8_t)(nwid32 & 0xff))) {
  335. unsigned int ptr = 0;
  336. while (ptr != 5) {
  337. if (pkt6[ptr] != my6[ptr])
  338. break;
  339. ++ptr;
  340. }
  341. if (ptr == 5) { // prefix match!
  342. v6EmbeddedAddress.setTo(pkt6 + ptr,5);
  343. break;
  344. }
  345. }
  346. }
  347. }
  348. }
  349. if ((v6EmbeddedAddress)&&(v6EmbeddedAddress != RR->identity.address())) {
  350. const MAC peerMac(v6EmbeddedAddress,network->id());
  351. TRACE("IPv6 NDP emulation: %.16llx: forging response for %s/%s",network->id(),v6EmbeddedAddress.toString().c_str(),peerMac.toString().c_str());
  352. uint8_t adv[72];
  353. adv[0] = 0x60; adv[1] = 0x00; adv[2] = 0x00; adv[3] = 0x00;
  354. adv[4] = 0x00; adv[5] = 0x20;
  355. adv[6] = 0x3a; adv[7] = 0xff;
  356. for(int i=0;i<16;++i) adv[8 + i] = pkt6[i];
  357. for(int i=0;i<16;++i) adv[24 + i] = my6[i];
  358. adv[40] = 0x88; adv[41] = 0x00;
  359. adv[42] = 0x00; adv[43] = 0x00; // future home of checksum
  360. adv[44] = 0x60; adv[45] = 0x00; adv[46] = 0x00; adv[47] = 0x00;
  361. for(int i=0;i<16;++i) adv[48 + i] = pkt6[i];
  362. adv[64] = 0x02; adv[65] = 0x01;
  363. adv[66] = peerMac[0]; adv[67] = peerMac[1]; adv[68] = peerMac[2]; adv[69] = peerMac[3]; adv[70] = peerMac[4]; adv[71] = peerMac[5];
  364. uint16_t pseudo_[36];
  365. uint8_t *const pseudo = reinterpret_cast<uint8_t *>(pseudo_);
  366. for(int i=0;i<32;++i) pseudo[i] = adv[8 + i];
  367. pseudo[32] = 0x00; pseudo[33] = 0x00; pseudo[34] = 0x00; pseudo[35] = 0x20;
  368. pseudo[36] = 0x00; pseudo[37] = 0x00; pseudo[38] = 0x00; pseudo[39] = 0x3a;
  369. for(int i=0;i<32;++i) pseudo[40 + i] = adv[40 + i];
  370. uint32_t checksum = 0;
  371. for(int i=0;i<36;++i) checksum += Utils::hton(pseudo_[i]);
  372. while ((checksum >> 16)) checksum = (checksum & 0xffff) + (checksum >> 16);
  373. checksum = ~checksum;
  374. adv[42] = (checksum >> 8) & 0xff;
  375. adv[43] = checksum & 0xff;
  376. RR->node->putFrame(network->id(),network->userPtr(),peerMac,from,ZT_ETHERTYPE_IPV6,0,adv,72);
  377. return; // NDP emulation done. We have forged a "fake" reply, so no need to send actual NDP query.
  378. } // else no NDP emulation
  379. } // else no NDP emulation
  380. }
  381. /* Learn multicast groups for bridged-in hosts.
  382. * Note that some OSes, most notably Linux, do this for you by learning
  383. * multicast addresses on bridge interfaces and subscribing each slave.
  384. * But in that case this does no harm, as the sets are just merged. */
  385. if (fromBridged)
  386. network->learnBridgedMulticastGroup(mg,RR->node->now());
  387. //TRACE("%.16llx: MULTICAST %s -> %s %s %u",network->id(),from.toString().c_str(),mg.toString().c_str(),etherTypeName(etherType),len);
  388. if (!Filter::run(
  389. RR,
  390. network->id(),
  391. RR->identity.address(),
  392. Address(), // 0 destination ZT address for multicasts since this is unknown at time of send
  393. from,
  394. to,
  395. (const uint8_t *)data,
  396. len,
  397. etherType,
  398. vlanId,
  399. network->config().rules,
  400. network->config().ruleCount))
  401. {
  402. TRACE("%.16llx: %s -> %s %s packet not sent: Filter::run() == false (multicast)",network->id(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType));
  403. return;
  404. }
  405. RR->mc->send(
  406. ((!network->config().isPublic())&&(network->config().com)) ? &(network->config().com) : (const CertificateOfMembership *)0,
  407. network->config().multicastLimit,
  408. RR->node->now(),
  409. network->id(),
  410. network->config().activeBridges(),
  411. mg,
  412. (fromBridged) ? from : MAC(),
  413. etherType,
  414. data,
  415. len);
  416. return;
  417. }
  418. if (to[0] == MAC::firstOctetForNetwork(network->id())) {
  419. // Destination is another ZeroTier peer on the same network
  420. Address toZT(to.toAddress(network->id())); // since in-network MACs are derived from addresses and network IDs, we can reverse this
  421. SharedPtr<Peer> toPeer(RR->topology->getPeer(toZT));
  422. if (!Filter::run(
  423. RR,
  424. network->id(),
  425. RR->identity.address(),
  426. toZT,
  427. from,
  428. to,
  429. (const uint8_t *)data,
  430. len,
  431. etherType,
  432. vlanId,
  433. network->config().rules,
  434. network->config().ruleCount))
  435. {
  436. TRACE("%.16llx: %s -> %s %s packet not sent: Filter::run() == false",network->id(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType));
  437. return;
  438. }
  439. const bool includeCom = ( (network->config().isPrivate()) && (network->config().com) && ((!toPeer)||(toPeer->needsOurNetworkMembershipCertificate(network->id(),RR->node->now(),true))) );
  440. if ((fromBridged)||(includeCom)) {
  441. Packet outp(toZT,RR->identity.address(),Packet::VERB_EXT_FRAME);
  442. outp.append(network->id());
  443. if (includeCom) {
  444. outp.append((unsigned char)0x01); // 0x01 -- COM included
  445. network->config().com.serialize(outp);
  446. } else {
  447. outp.append((unsigned char)0x00);
  448. }
  449. to.appendTo(outp);
  450. from.appendTo(outp);
  451. outp.append((uint16_t)etherType);
  452. outp.append(data,len);
  453. outp.compress();
  454. send(outp,true,network->id());
  455. } else {
  456. Packet outp(toZT,RR->identity.address(),Packet::VERB_FRAME);
  457. outp.append(network->id());
  458. outp.append((uint16_t)etherType);
  459. outp.append(data,len);
  460. outp.compress();
  461. send(outp,true,network->id());
  462. }
  463. //TRACE("%.16llx: UNICAST: %s -> %s etherType==%s(%.4x) vlanId==%u len==%u fromBridged==%d includeCom==%d",network->id(),from.toString().c_str(),to.toString().c_str(),etherTypeName(etherType),etherType,vlanId,len,(int)fromBridged,(int)includeCom);
  464. return;
  465. }
  466. {
  467. // Destination is bridged behind a remote peer
  468. Address bridges[ZT_MAX_BRIDGE_SPAM];
  469. unsigned int numBridges = 0;
  470. /* Create an array of up to ZT_MAX_BRIDGE_SPAM recipients for this bridged frame. */
  471. bridges[0] = network->findBridgeTo(to);
  472. std::vector<Address> activeBridges(network->config().activeBridges());
  473. if ((bridges[0])&&(bridges[0] != RR->identity.address())&&(network->config().permitsBridging(bridges[0]))) {
  474. /* We have a known bridge route for this MAC, send it there. */
  475. ++numBridges;
  476. } else if (!activeBridges.empty()) {
  477. /* If there is no known route, spam to up to ZT_MAX_BRIDGE_SPAM active
  478. * bridges. If someone responds, we'll learn the route. */
  479. std::vector<Address>::const_iterator ab(activeBridges.begin());
  480. if (activeBridges.size() <= ZT_MAX_BRIDGE_SPAM) {
  481. // If there are <= ZT_MAX_BRIDGE_SPAM active bridges, spam them all
  482. while (ab != activeBridges.end()) {
  483. bridges[numBridges++] = *ab;
  484. ++ab;
  485. }
  486. } else {
  487. // Otherwise pick a random set of them
  488. while (numBridges < ZT_MAX_BRIDGE_SPAM) {
  489. if (ab == activeBridges.end())
  490. ab = activeBridges.begin();
  491. if (((unsigned long)RR->node->prng() % (unsigned long)activeBridges.size()) == 0) {
  492. bridges[numBridges++] = *ab;
  493. ++ab;
  494. } else ++ab;
  495. }
  496. }
  497. }
  498. for(unsigned int b=0;b<numBridges;++b) {
  499. SharedPtr<Peer> bridgePeer(RR->topology->getPeer(bridges[b]));
  500. Packet outp(bridges[b],RR->identity.address(),Packet::VERB_EXT_FRAME);
  501. outp.append(network->id());
  502. if ( (network->config().isPrivate()) && (network->config().com) && ((!bridgePeer)||(bridgePeer->needsOurNetworkMembershipCertificate(network->id(),RR->node->now(),true))) ) {
  503. outp.append((unsigned char)0x01); // 0x01 -- COM included
  504. network->config().com.serialize(outp);
  505. } else {
  506. outp.append((unsigned char)0);
  507. }
  508. to.appendTo(outp);
  509. from.appendTo(outp);
  510. outp.append((uint16_t)etherType);
  511. outp.append(data,len);
  512. outp.compress();
  513. send(outp,true,network->id());
  514. }
  515. }
  516. }
  517. void Switch::send(const Packet &packet,bool encrypt,uint64_t nwid)
  518. {
  519. if (packet.destination() == RR->identity.address()) {
  520. TRACE("BUG: caught attempt to send() to self, ignored");
  521. return;
  522. }
  523. //TRACE(">> %s to %s (%u bytes, encrypt==%d, nwid==%.16llx)",Packet::verbString(packet.verb()),packet.destination().toString().c_str(),packet.size(),(int)encrypt,nwid);
  524. if (!_trySend(packet,encrypt,nwid)) {
  525. Mutex::Lock _l(_txQueue_m);
  526. _txQueue.push_back(TXQueueEntry(packet.destination(),RR->node->now(),packet,encrypt,nwid));
  527. }
  528. }
  529. bool Switch::unite(const Address &p1,const Address &p2)
  530. {
  531. if ((p1 == RR->identity.address())||(p2 == RR->identity.address()))
  532. return false;
  533. SharedPtr<Peer> p1p = RR->topology->getPeer(p1);
  534. if (!p1p)
  535. return false;
  536. SharedPtr<Peer> p2p = RR->topology->getPeer(p2);
  537. if (!p2p)
  538. return false;
  539. const uint64_t now = RR->node->now();
  540. std::pair<InetAddress,InetAddress> cg(Peer::findCommonGround(*p1p,*p2p,now));
  541. if ((!(cg.first))||(cg.first.ipScope() != cg.second.ipScope()))
  542. return false;
  543. TRACE("unite: %s(%s) <> %s(%s)",p1.toString().c_str(),cg.second.toString().c_str(),p2.toString().c_str(),cg.first.toString().c_str());
  544. /* Tell P1 where to find P2 and vice versa, sending the packets to P1 and
  545. * P2 in randomized order in terms of which gets sent first. This is done
  546. * since in a few cases NAT-t can be sensitive to slight timing differences
  547. * in terms of when the two peers initiate. Normally this is accounted for
  548. * by the nearly-simultaneous RENDEZVOUS kickoff from the relay, but
  549. * given that relay are hosted on cloud providers this can in some
  550. * cases have a few ms of latency between packet departures. By randomizing
  551. * the order we make each attempted NAT-t favor one or the other going
  552. * first, meaning if it doesn't succeed the first time it might the second
  553. * and so forth. */
  554. unsigned int alt = (unsigned int)RR->node->prng() & 1;
  555. unsigned int completed = alt + 2;
  556. while (alt != completed) {
  557. if ((alt & 1) == 0) {
  558. // Tell p1 where to find p2.
  559. Packet outp(p1,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  560. outp.append((unsigned char)0);
  561. p2.appendTo(outp);
  562. outp.append((uint16_t)cg.first.port());
  563. if (cg.first.isV6()) {
  564. outp.append((unsigned char)16);
  565. outp.append(cg.first.rawIpData(),16);
  566. } else {
  567. outp.append((unsigned char)4);
  568. outp.append(cg.first.rawIpData(),4);
  569. }
  570. outp.armor(p1p->key(),true);
  571. p1p->send(outp.data(),outp.size(),now);
  572. } else {
  573. // Tell p2 where to find p1.
  574. Packet outp(p2,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  575. outp.append((unsigned char)0);
  576. p1.appendTo(outp);
  577. outp.append((uint16_t)cg.second.port());
  578. if (cg.second.isV6()) {
  579. outp.append((unsigned char)16);
  580. outp.append(cg.second.rawIpData(),16);
  581. } else {
  582. outp.append((unsigned char)4);
  583. outp.append(cg.second.rawIpData(),4);
  584. }
  585. outp.armor(p2p->key(),true);
  586. p2p->send(outp.data(),outp.size(),now);
  587. }
  588. ++alt; // counts up and also flips LSB
  589. }
  590. return true;
  591. }
  592. void Switch::rendezvous(const SharedPtr<Peer> &peer,const InetAddress &localAddr,const InetAddress &atAddr)
  593. {
  594. TRACE("sending NAT-t message to %s(%s)",peer->address().toString().c_str(),atAddr.toString().c_str());
  595. const uint64_t now = RR->node->now();
  596. peer->sendHELLO(localAddr,atAddr,now,2); // first attempt: send low-TTL packet to 'open' local NAT
  597. {
  598. Mutex::Lock _l(_contactQueue_m);
  599. _contactQueue.push_back(ContactQueueEntry(peer,now + ZT_NAT_T_TACTICAL_ESCALATION_DELAY,localAddr,atAddr));
  600. }
  601. }
  602. void Switch::requestWhois(const Address &addr)
  603. {
  604. bool inserted = false;
  605. {
  606. Mutex::Lock _l(_outstandingWhoisRequests_m);
  607. WhoisRequest &r = _outstandingWhoisRequests[addr];
  608. if (r.lastSent) {
  609. r.retries = 0; // reset retry count if entry already existed, but keep waiting and retry again after normal timeout
  610. } else {
  611. r.lastSent = RR->node->now();
  612. inserted = true;
  613. }
  614. }
  615. if (inserted)
  616. _sendWhoisRequest(addr,(const Address *)0,0);
  617. }
  618. void Switch::doAnythingWaitingForPeer(const SharedPtr<Peer> &peer)
  619. {
  620. { // cancel pending WHOIS since we now know this peer
  621. Mutex::Lock _l(_outstandingWhoisRequests_m);
  622. _outstandingWhoisRequests.erase(peer->address());
  623. }
  624. { // finish processing any packets waiting on peer's public key / identity
  625. Mutex::Lock _l(_rxQueue_m);
  626. unsigned long i = ZT_RX_QUEUE_SIZE;
  627. while (i) {
  628. RXQueueEntry *rq = &(_rxQueue[--i]);
  629. if ((rq->timestamp)&&(rq->complete)) {
  630. if (rq->frag0.tryDecode(RR,false))
  631. rq->timestamp = 0;
  632. }
  633. }
  634. }
  635. { // finish sending any packets waiting on peer's public key / identity
  636. Mutex::Lock _l(_txQueue_m);
  637. for(std::list< TXQueueEntry >::iterator txi(_txQueue.begin());txi!=_txQueue.end();) {
  638. if (txi->dest == peer->address()) {
  639. if (_trySend(txi->packet,txi->encrypt,txi->nwid))
  640. _txQueue.erase(txi++);
  641. else ++txi;
  642. } else ++txi;
  643. }
  644. }
  645. }
  646. unsigned long Switch::doTimerTasks(uint64_t now)
  647. {
  648. unsigned long nextDelay = 0xffffffff; // ceiling delay, caller will cap to minimum
  649. { // Iterate through NAT traversal strategies for entries in contact queue
  650. Mutex::Lock _l(_contactQueue_m);
  651. for(std::list<ContactQueueEntry>::iterator qi(_contactQueue.begin());qi!=_contactQueue.end();) {
  652. if (now >= qi->fireAtTime) {
  653. if (!qi->peer->pushDirectPaths(qi->localAddr,qi->inaddr,now,true,false))
  654. qi->peer->sendHELLO(qi->localAddr,qi->inaddr,now);
  655. _contactQueue.erase(qi++);
  656. continue;
  657. /* Old symmetric NAT buster code, obsoleted by port prediction alg in SelfAwareness but left around for now in case we revert
  658. if (qi->strategyIteration == 0) {
  659. // First strategy: send packet directly to destination
  660. qi->peer->sendHELLO(qi->localAddr,qi->inaddr,now);
  661. } else if (qi->strategyIteration <= 3) {
  662. // Strategies 1-3: try escalating ports for symmetric NATs that remap sequentially
  663. InetAddress tmpaddr(qi->inaddr);
  664. int p = (int)qi->inaddr.port() + qi->strategyIteration;
  665. if (p > 65535)
  666. p -= 64511;
  667. tmpaddr.setPort((unsigned int)p);
  668. qi->peer->sendHELLO(qi->localAddr,tmpaddr,now);
  669. } else {
  670. // All strategies tried, expire entry
  671. _contactQueue.erase(qi++);
  672. continue;
  673. }
  674. ++qi->strategyIteration;
  675. qi->fireAtTime = now + ZT_NAT_T_TACTICAL_ESCALATION_DELAY;
  676. nextDelay = std::min(nextDelay,(unsigned long)ZT_NAT_T_TACTICAL_ESCALATION_DELAY);
  677. */
  678. } else {
  679. nextDelay = std::min(nextDelay,(unsigned long)(qi->fireAtTime - now));
  680. }
  681. ++qi; // if qi was erased, loop will have continued before here
  682. }
  683. }
  684. { // Retry outstanding WHOIS requests
  685. Mutex::Lock _l(_outstandingWhoisRequests_m);
  686. Hashtable< Address,WhoisRequest >::Iterator i(_outstandingWhoisRequests);
  687. Address *a = (Address *)0;
  688. WhoisRequest *r = (WhoisRequest *)0;
  689. while (i.next(a,r)) {
  690. const unsigned long since = (unsigned long)(now - r->lastSent);
  691. if (since >= ZT_WHOIS_RETRY_DELAY) {
  692. if (r->retries >= ZT_MAX_WHOIS_RETRIES) {
  693. TRACE("WHOIS %s timed out",a->toString().c_str());
  694. _outstandingWhoisRequests.erase(*a);
  695. } else {
  696. r->lastSent = now;
  697. r->peersConsulted[r->retries] = _sendWhoisRequest(*a,r->peersConsulted,r->retries);
  698. ++r->retries;
  699. TRACE("WHOIS %s (retry %u)",a->toString().c_str(),r->retries);
  700. nextDelay = std::min(nextDelay,(unsigned long)ZT_WHOIS_RETRY_DELAY);
  701. }
  702. } else {
  703. nextDelay = std::min(nextDelay,ZT_WHOIS_RETRY_DELAY - since);
  704. }
  705. }
  706. }
  707. { // Time out TX queue packets that never got WHOIS lookups or other info.
  708. Mutex::Lock _l(_txQueue_m);
  709. for(std::list< TXQueueEntry >::iterator txi(_txQueue.begin());txi!=_txQueue.end();) {
  710. if (_trySend(txi->packet,txi->encrypt,txi->nwid))
  711. _txQueue.erase(txi++);
  712. else if ((now - txi->creationTime) > ZT_TRANSMIT_QUEUE_TIMEOUT) {
  713. TRACE("TX %s -> %s timed out",txi->packet.source().toString().c_str(),txi->packet.destination().toString().c_str());
  714. _txQueue.erase(txi++);
  715. } else ++txi;
  716. }
  717. }
  718. { // Remove really old last unite attempt entries to keep table size controlled
  719. Mutex::Lock _l(_lastUniteAttempt_m);
  720. Hashtable< _LastUniteKey,uint64_t >::Iterator i(_lastUniteAttempt);
  721. _LastUniteKey *k = (_LastUniteKey *)0;
  722. uint64_t *v = (uint64_t *)0;
  723. while (i.next(k,v)) {
  724. if ((now - *v) >= (ZT_MIN_UNITE_INTERVAL * 8))
  725. _lastUniteAttempt.erase(*k);
  726. }
  727. }
  728. return nextDelay;
  729. }
  730. Address Switch::_sendWhoisRequest(const Address &addr,const Address *peersAlreadyConsulted,unsigned int numPeersAlreadyConsulted)
  731. {
  732. SharedPtr<Peer> root(RR->topology->getBestRoot(peersAlreadyConsulted,numPeersAlreadyConsulted,false));
  733. if (root) {
  734. Packet outp(root->address(),RR->identity.address(),Packet::VERB_WHOIS);
  735. addr.appendTo(outp);
  736. outp.armor(root->key(),true);
  737. if (root->send(outp.data(),outp.size(),RR->node->now()))
  738. return root->address();
  739. }
  740. return Address();
  741. }
  742. bool Switch::_trySend(const Packet &packet,bool encrypt,uint64_t nwid)
  743. {
  744. SharedPtr<Peer> peer(RR->topology->getPeer(packet.destination()));
  745. if (peer) {
  746. const uint64_t now = RR->node->now();
  747. SharedPtr<Network> network;
  748. if (nwid) {
  749. network = RR->node->network(nwid);
  750. if ((!network)||(!network->hasConfig()))
  751. return false; // we probably just left this network, let its packets die
  752. }
  753. Path *viaPath = peer->getBestPath(now);
  754. SharedPtr<Peer> relay;
  755. if (!viaPath) {
  756. relay = RR->topology->getBestRoot();
  757. if ( (!relay) || (!(viaPath = relay->getBestPath(now))) )
  758. return false;
  759. }
  760. if (relay) {
  761. peer->pushDirectPaths(viaPath->localAddress(),viaPath->address(),now,false,( (network)&&(network->isAllowed(peer)) ));
  762. viaPath->sent(now);
  763. }
  764. Packet tmp(packet);
  765. unsigned int chunkSize = std::min(tmp.size(),(unsigned int)ZT_UDP_DEFAULT_PAYLOAD_MTU);
  766. tmp.setFragmented(chunkSize < tmp.size());
  767. const uint64_t trustedPathId = RR->topology->getOutboundPathTrust(viaPath->address());
  768. if (trustedPathId) {
  769. tmp.setTrusted(trustedPathId);
  770. } else {
  771. tmp.armor(peer->key(),encrypt);
  772. }
  773. if (viaPath->send(RR,tmp.data(),chunkSize,now)) {
  774. if (chunkSize < tmp.size()) {
  775. // Too big for one packet, fragment the rest
  776. unsigned int fragStart = chunkSize;
  777. unsigned int remaining = tmp.size() - chunkSize;
  778. unsigned int fragsRemaining = (remaining / (ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH));
  779. if ((fragsRemaining * (ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH)) < remaining)
  780. ++fragsRemaining;
  781. unsigned int totalFragments = fragsRemaining + 1;
  782. for(unsigned int fno=1;fno<totalFragments;++fno) {
  783. chunkSize = std::min(remaining,(unsigned int)(ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH));
  784. Packet::Fragment frag(tmp,fragStart,chunkSize,fno,totalFragments);
  785. viaPath->send(RR,frag.data(),frag.size(),now);
  786. fragStart += chunkSize;
  787. remaining -= chunkSize;
  788. }
  789. }
  790. return true;
  791. }
  792. } else {
  793. requestWhois(packet.destination());
  794. }
  795. return false;
  796. }
  797. } // namespace ZeroTier