Cluster.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2015 ZeroTier, Inc.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #ifdef ZT_ENABLE_CLUSTER
  28. #include <stdint.h>
  29. #include <stdio.h>
  30. #include <stdlib.h>
  31. #include <string.h>
  32. #include <math.h>
  33. #include <algorithm>
  34. #include <utility>
  35. #include "../version.h"
  36. #include "Cluster.hpp"
  37. #include "RuntimeEnvironment.hpp"
  38. #include "MulticastGroup.hpp"
  39. #include "CertificateOfMembership.hpp"
  40. #include "Salsa20.hpp"
  41. #include "Poly1305.hpp"
  42. #include "Identity.hpp"
  43. #include "Topology.hpp"
  44. #include "Packet.hpp"
  45. #include "Switch.hpp"
  46. #include "Node.hpp"
  47. namespace ZeroTier {
  48. static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2)
  49. throw()
  50. {
  51. double dx = ((double)x2 - (double)x1);
  52. double dy = ((double)y2 - (double)y1);
  53. double dz = ((double)z2 - (double)z1);
  54. return sqrt((dx * dx) + (dy * dy) + (dz * dz));
  55. }
  56. Cluster::Cluster(
  57. const RuntimeEnvironment *renv,
  58. uint16_t id,
  59. const std::vector<InetAddress> &zeroTierPhysicalEndpoints,
  60. int32_t x,
  61. int32_t y,
  62. int32_t z,
  63. void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
  64. void *sendFunctionArg,
  65. int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
  66. void *addressToLocationFunctionArg) :
  67. RR(renv),
  68. _sendFunction(sendFunction),
  69. _sendFunctionArg(sendFunctionArg),
  70. _addressToLocationFunction(addressToLocationFunction),
  71. _addressToLocationFunctionArg(addressToLocationFunctionArg),
  72. _x(x),
  73. _y(y),
  74. _z(z),
  75. _id(id),
  76. _zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
  77. _members(new _Member[ZT_CLUSTER_MAX_MEMBERS]),
  78. _lastFlushed(0),
  79. _lastCleanedRemotePeers(0)
  80. {
  81. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  82. // Generate master secret by hashing the secret from our Identity key pair
  83. RR->identity.sha512PrivateKey(_masterSecret);
  84. // Generate our inbound message key, which is the master secret XORed with our ID and hashed twice
  85. memcpy(stmp,_masterSecret,sizeof(stmp));
  86. stmp[0] ^= Utils::hton(id);
  87. SHA512::hash(stmp,stmp,sizeof(stmp));
  88. SHA512::hash(stmp,stmp,sizeof(stmp));
  89. memcpy(_key,stmp,sizeof(_key));
  90. Utils::burn(stmp,sizeof(stmp));
  91. }
  92. Cluster::~Cluster()
  93. {
  94. Utils::burn(_masterSecret,sizeof(_masterSecret));
  95. Utils::burn(_key,sizeof(_key));
  96. delete [] _members;
  97. }
  98. void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
  99. {
  100. Buffer<ZT_CLUSTER_MAX_MESSAGE_LENGTH> dmsg;
  101. {
  102. // FORMAT: <[16] iv><[8] MAC><... data>
  103. if ((len < 24)||(len > ZT_CLUSTER_MAX_MESSAGE_LENGTH))
  104. return;
  105. // 16-byte IV: first 8 bytes XORed with key, last 8 bytes used as Salsa20 64-bit IV
  106. char keytmp[32];
  107. memcpy(keytmp,_key,32);
  108. for(int i=0;i<8;++i)
  109. keytmp[i] ^= reinterpret_cast<const char *>(msg)[i];
  110. Salsa20 s20(keytmp,256,reinterpret_cast<const char *>(msg) + 8);
  111. Utils::burn(keytmp,sizeof(keytmp));
  112. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  113. char polykey[ZT_POLY1305_KEY_LEN];
  114. memset(polykey,0,sizeof(polykey));
  115. s20.encrypt12(polykey,polykey,sizeof(polykey));
  116. // Compute 16-byte MAC
  117. char mac[ZT_POLY1305_MAC_LEN];
  118. Poly1305::compute(mac,reinterpret_cast<const char *>(msg) + 24,len - 24,polykey);
  119. // Check first 8 bytes of MAC against 64-bit MAC in stream
  120. if (!Utils::secureEq(mac,reinterpret_cast<const char *>(msg) + 16,8))
  121. return;
  122. // Decrypt!
  123. dmsg.setSize(len - 24);
  124. s20.decrypt12(reinterpret_cast<const char *>(msg) + 24,const_cast<void *>(dmsg.data()),dmsg.size());
  125. }
  126. if (dmsg.size() < 4)
  127. return;
  128. const uint16_t fromMemberId = dmsg.at<uint16_t>(0);
  129. unsigned int ptr = 2;
  130. if (fromMemberId == _id) // sanity check: we don't talk to ourselves
  131. return;
  132. const uint16_t toMemberId = dmsg.at<uint16_t>(ptr);
  133. ptr += 2;
  134. if (toMemberId != _id) // sanity check: message not for us?
  135. return;
  136. { // make sure sender is actually considered a member
  137. Mutex::Lock _l3(_memberIds_m);
  138. if (std::find(_memberIds.begin(),_memberIds.end(),fromMemberId) == _memberIds.end())
  139. return;
  140. }
  141. try {
  142. while (ptr < dmsg.size()) {
  143. const unsigned int mlen = dmsg.at<uint16_t>(ptr); ptr += 2;
  144. const unsigned int nextPtr = ptr + mlen;
  145. if (nextPtr > dmsg.size())
  146. break;
  147. int mtype = -1;
  148. try {
  149. switch((StateMessageType)(mtype = (int)dmsg[ptr++])) {
  150. default:
  151. break;
  152. case CLUSTER_MESSAGE_ALIVE: {
  153. _Member &m = _members[fromMemberId];
  154. Mutex::Lock mlck(m.lock);
  155. ptr += 7; // skip version stuff, not used yet
  156. m.x = dmsg.at<int32_t>(ptr); ptr += 4;
  157. m.y = dmsg.at<int32_t>(ptr); ptr += 4;
  158. m.z = dmsg.at<int32_t>(ptr); ptr += 4;
  159. ptr += 8; // skip local clock, not used
  160. m.load = dmsg.at<uint64_t>(ptr); ptr += 8;
  161. m.peers = dmsg.at<uint64_t>(ptr); ptr += 8;
  162. ptr += 8; // skip flags, unused
  163. #ifdef ZT_TRACE
  164. std::string addrs;
  165. #endif
  166. unsigned int physicalAddressCount = dmsg[ptr++];
  167. m.zeroTierPhysicalEndpoints.clear();
  168. for(unsigned int i=0;i<physicalAddressCount;++i) {
  169. m.zeroTierPhysicalEndpoints.push_back(InetAddress());
  170. ptr += m.zeroTierPhysicalEndpoints.back().deserialize(dmsg,ptr);
  171. if (!(m.zeroTierPhysicalEndpoints.back())) {
  172. m.zeroTierPhysicalEndpoints.pop_back();
  173. }
  174. #ifdef ZT_TRACE
  175. else {
  176. if (addrs.length() > 0)
  177. addrs.push_back(',');
  178. addrs.append(m.zeroTierPhysicalEndpoints.back().toString());
  179. }
  180. #endif
  181. }
  182. #ifdef ZT_TRACE
  183. if ((RR->node->now() - m.lastReceivedAliveAnnouncement) >= ZT_CLUSTER_TIMEOUT) {
  184. TRACE("[%u] I'm alive! peers close to %d,%d,%d can be redirected to: %s",(unsigned int)fromMemberId,m.x,m.y,m.z,addrs.c_str());
  185. }
  186. #endif
  187. m.lastReceivedAliveAnnouncement = RR->node->now();
  188. } break;
  189. case CLUSTER_MESSAGE_HAVE_PEER: {
  190. Identity id;
  191. ptr += id.deserialize(dmsg,ptr);
  192. if (id) {
  193. RR->topology->saveIdentity(id);
  194. {
  195. Mutex::Lock _l(_remotePeers_m);
  196. _remotePeers[std::pair<Address,unsigned int>(id.address(),(unsigned int)fromMemberId)] = RR->node->now();
  197. }
  198. std::list<_SQE> q;
  199. {
  200. Mutex::Lock _l(_sendViaClusterQueue_m);
  201. std::map< Address,std::list<_SQE> >::iterator qe(_sendViaClusterQueue.find(id.address()));
  202. if (qe != _sendViaClusterQueue.end()) {
  203. q.swap(qe->second); // just swap ptr instead of copying
  204. _sendViaClusterQueue.erase(qe);
  205. }
  206. }
  207. for(std::list<_SQE>::iterator qi(q.begin());qi!=q.end();++qi)
  208. this->sendViaCluster(id.address(),qi->toPeerAddress,qi->data,qi->len,qi->unite);
  209. TRACE("[%u] has %s (retried %u queued sends)",(unsigned int)fromMemberId,id.address().toString().c_str(),(unsigned int)q.size());
  210. }
  211. } break;
  212. case CLUSTER_MESSAGE_WANT_PEER: {
  213. const Address zeroTierAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  214. SharedPtr<Peer> peer(RR->topology->getPeerNoCache(zeroTierAddress));
  215. if ( (peer) && (peer->hasActiveDirectPath(RR->node->now())) ) {
  216. Buffer<1024> buf;
  217. peer->identity().serialize(buf);
  218. Mutex::Lock _l2(_members[fromMemberId].lock);
  219. _send(fromMemberId,CLUSTER_MESSAGE_HAVE_PEER,buf.data(),buf.size());
  220. _flush(fromMemberId);
  221. }
  222. } break;
  223. case CLUSTER_MESSAGE_REMOTE_PACKET: {
  224. const unsigned int plen = dmsg.at<uint16_t>(ptr); ptr += 2;
  225. if (plen) {
  226. Packet remotep(dmsg.field(ptr,plen),plen); ptr += plen;
  227. //TRACE("remote %s from %s via %u (%u bytes)",Packet::verbString(remotep.verb()),remotep.source().toString().c_str(),fromMemberId,plen);
  228. switch(remotep.verb()) {
  229. case Packet::VERB_WHOIS: _doREMOTE_WHOIS(fromMemberId,remotep); break;
  230. case Packet::VERB_MULTICAST_GATHER: _doREMOTE_MULTICAST_GATHER(fromMemberId,remotep); break;
  231. default: break; // ignore things we don't care about across cluster
  232. }
  233. }
  234. } break;
  235. case CLUSTER_MESSAGE_PROXY_UNITE: {
  236. const Address localPeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  237. const Address remotePeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  238. const unsigned int numRemotePeerPaths = dmsg[ptr++];
  239. InetAddress remotePeerPaths[256]; // size is 8-bit, so 256 is max
  240. for(unsigned int i=0;i<numRemotePeerPaths;++i)
  241. ptr += remotePeerPaths[i].deserialize(dmsg,ptr);
  242. TRACE("[%u] requested that we unite local %s with remote %s",(unsigned int)fromMemberId,localPeerAddress.toString().c_str(),remotePeerAddress.toString().c_str());
  243. const uint64_t now = RR->node->now();
  244. SharedPtr<Peer> localPeer(RR->topology->getPeerNoCache(localPeerAddress));
  245. if ((localPeer)&&(numRemotePeerPaths > 0)) {
  246. InetAddress bestLocalV4,bestLocalV6;
  247. localPeer->getBestActiveAddresses(now,bestLocalV4,bestLocalV6);
  248. InetAddress bestRemoteV4,bestRemoteV6;
  249. for(unsigned int i=0;i<numRemotePeerPaths;++i) {
  250. if ((bestRemoteV4)&&(bestRemoteV6))
  251. break;
  252. switch(remotePeerPaths[i].ss_family) {
  253. case AF_INET:
  254. if (!bestRemoteV4)
  255. bestRemoteV4 = remotePeerPaths[i];
  256. break;
  257. case AF_INET6:
  258. if (!bestRemoteV6)
  259. bestRemoteV6 = remotePeerPaths[i];
  260. break;
  261. }
  262. }
  263. Packet rendezvousForLocal(localPeerAddress,RR->identity.address(),Packet::VERB_RENDEZVOUS);
  264. rendezvousForLocal.append((uint8_t)0);
  265. remotePeerAddress.appendTo(rendezvousForLocal);
  266. Buffer<2048> rendezvousForRemote;
  267. remotePeerAddress.appendTo(rendezvousForRemote);
  268. rendezvousForRemote.append((uint8_t)Packet::VERB_RENDEZVOUS);
  269. rendezvousForRemote.addSize(2); // space for actual packet payload length
  270. rendezvousForRemote.append((uint8_t)0); // flags == 0
  271. localPeerAddress.appendTo(rendezvousForRemote);
  272. bool haveMatch = false;
  273. if ((bestLocalV6)&&(bestRemoteV6)) {
  274. haveMatch = true;
  275. rendezvousForLocal.append((uint16_t)bestRemoteV6.port());
  276. rendezvousForLocal.append((uint8_t)16);
  277. rendezvousForLocal.append(bestRemoteV6.rawIpData(),16);
  278. rendezvousForRemote.append((uint16_t)bestLocalV6.port());
  279. rendezvousForRemote.append((uint8_t)16);
  280. rendezvousForRemote.append(bestLocalV6.rawIpData(),16);
  281. rendezvousForRemote.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(9 + 16));
  282. } else if ((bestLocalV4)&&(bestRemoteV4)) {
  283. haveMatch = true;
  284. rendezvousForLocal.append((uint16_t)bestRemoteV4.port());
  285. rendezvousForLocal.append((uint8_t)4);
  286. rendezvousForLocal.append(bestRemoteV4.rawIpData(),4);
  287. rendezvousForRemote.append((uint16_t)bestLocalV4.port());
  288. rendezvousForRemote.append((uint8_t)4);
  289. rendezvousForRemote.append(bestLocalV4.rawIpData(),4);
  290. rendezvousForRemote.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(9 + 4));
  291. }
  292. if (haveMatch) {
  293. {
  294. Mutex::Lock _l2(_members[fromMemberId].lock);
  295. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,rendezvousForRemote.data(),rendezvousForRemote.size());
  296. _flush(fromMemberId);
  297. }
  298. RR->sw->send(rendezvousForLocal,true,0);
  299. }
  300. }
  301. } break;
  302. case CLUSTER_MESSAGE_PROXY_SEND: {
  303. const Address rcpt(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
  304. const Packet::Verb verb = (Packet::Verb)dmsg[ptr++];
  305. const unsigned int len = dmsg.at<uint16_t>(ptr); ptr += 2;
  306. Packet outp(rcpt,RR->identity.address(),verb);
  307. outp.append(dmsg.field(ptr,len),len); ptr += len;
  308. RR->sw->send(outp,true,0);
  309. //TRACE("[%u] proxy send %s to %s length %u",(unsigned int)fromMemberId,Packet::verbString(verb),rcpt.toString().c_str(),len);
  310. } break;
  311. }
  312. } catch ( ... ) {
  313. TRACE("invalid message of size %u type %d (inner decode), discarding",mlen,mtype);
  314. // drop invalids
  315. }
  316. ptr = nextPtr;
  317. }
  318. } catch ( ... ) {
  319. TRACE("invalid message (outer loop), discarding");
  320. // drop invalids
  321. }
  322. }
  323. void Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPeerAddress,const void *data,unsigned int len,bool unite)
  324. {
  325. if (len > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check
  326. return;
  327. _sendViaClusterQueue_m.lock();
  328. unsigned long queueCount;
  329. {
  330. std::map< Address,std::list<_SQE> >::const_iterator qe(_sendViaClusterQueue.find(fromPeerAddress));
  331. queueCount = (qe == _sendViaClusterQueue.end()) ? 0 : (unsigned long)qe->second.size();
  332. }
  333. _sendViaClusterQueue_m.unlock();
  334. if (queueCount > ZT_CLUSTER_MAX_QUEUE_PER_SENDER) {
  335. TRACE("dropping sendViaCluster for %s -> %s since queue for sender is full",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
  336. return;
  337. }
  338. const uint64_t now = RR->node->now();
  339. uint64_t mostRecentTs = 0;
  340. unsigned int mostRecentMemberId = 0xffffffff;
  341. {
  342. Mutex::Lock _l2(_remotePeers_m);
  343. std::map< std::pair<Address,unsigned int>,uint64_t >::const_iterator rpe(_remotePeers.lower_bound(std::pair<Address,unsigned int>(fromPeerAddress,0)));
  344. for(;;) {
  345. if ((rpe == _remotePeers.end())||(rpe->first.first != fromPeerAddress))
  346. break;
  347. else if (rpe->second > mostRecentTs) {
  348. mostRecentTs = rpe->second;
  349. mostRecentMemberId = rpe->first.second;
  350. }
  351. }
  352. }
  353. const uint64_t age = now - mostRecentTs;
  354. if (age >= (ZT_PEER_ACTIVITY_TIMEOUT / 3)) {
  355. const bool enqueueAndWait = ((age >= ZT_PEER_ACTIVITY_TIMEOUT)||(mostRecentMemberId > 0xffff));
  356. // Poll everyone with WANT_PEER if the age of our most recent entry is
  357. // approaching expiration (or has expired, or does not exist).
  358. char tmp[ZT_ADDRESS_LENGTH];
  359. toPeerAddress.copyTo(tmp,ZT_ADDRESS_LENGTH);
  360. {
  361. Mutex::Lock _l(_memberIds_m);
  362. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  363. Mutex::Lock _l2(_members[*mid].lock);
  364. _send(*mid,CLUSTER_MESSAGE_WANT_PEER,tmp,ZT_ADDRESS_LENGTH);
  365. if ((enqueueAndWait)&&(queueCount == 0))
  366. _flush(*mid);
  367. }
  368. }
  369. // If there isn't a good place to send via, then enqueue this for retrying
  370. // later and return after having broadcasted a WANT_PEER.
  371. if (enqueueAndWait) {
  372. TRACE("sendViaCluster %s -> %s enqueueing to wait for HAVE_PEER",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
  373. Mutex::Lock _l(_sendViaClusterQueue_m);
  374. _sendViaClusterQueue[fromPeerAddress].push_back(_SQE(now,toPeerAddress,data,len,unite));
  375. return;
  376. }
  377. }
  378. Buffer<1024> buf;
  379. if (unite) {
  380. InetAddress v4,v6;
  381. if (fromPeerAddress) {
  382. SharedPtr<Peer> fromPeer(RR->topology->getPeerNoCache(fromPeerAddress));
  383. if (fromPeer)
  384. fromPeer->getBestActiveAddresses(now,v4,v6);
  385. }
  386. uint8_t addrCount = 0;
  387. if (v4)
  388. ++addrCount;
  389. if (v6)
  390. ++addrCount;
  391. if (addrCount) {
  392. toPeerAddress.appendTo(buf);
  393. fromPeerAddress.appendTo(buf);
  394. buf.append(addrCount);
  395. if (v4)
  396. v4.serialize(buf);
  397. if (v6)
  398. v6.serialize(buf);
  399. }
  400. }
  401. {
  402. Mutex::Lock _l2(_members[mostRecentMemberId].lock);
  403. if (buf.size() > 0) {
  404. _send(mostRecentMemberId,CLUSTER_MESSAGE_PROXY_UNITE,buf.data(),buf.size());
  405. _flush(mostRecentMemberId);
  406. }
  407. if (_members[mostRecentMemberId].zeroTierPhysicalEndpoints.size() > 0) {
  408. TRACE("sendViaCluster relaying %u bytes from %s to %s by way of %u",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str(),(unsigned int)mostRecentMemberId);
  409. RR->node->putPacket(InetAddress(),_members[mostRecentMemberId].zeroTierPhysicalEndpoints.front(),data,len);
  410. }
  411. }
  412. }
  413. void Cluster::sendDistributedQuery(const Packet &pkt)
  414. {
  415. Buffer<4096> buf;
  416. buf.append((uint16_t)pkt.size());
  417. buf.append(pkt.data(),pkt.size());
  418. Mutex::Lock _l(_memberIds_m);
  419. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  420. Mutex::Lock _l2(_members[*mid].lock);
  421. _send(*mid,CLUSTER_MESSAGE_REMOTE_PACKET,buf.data(),buf.size());
  422. _flush(*mid);
  423. }
  424. }
  425. void Cluster::doPeriodicTasks()
  426. {
  427. const uint64_t now = RR->node->now();
  428. if ((now - _lastFlushed) >= ZT_CLUSTER_FLUSH_PERIOD) {
  429. _lastFlushed = now;
  430. {
  431. Mutex::Lock _l2(_sendViaClusterQueue_m);
  432. for(std::map< Address,std::list<_SQE> >::iterator qi(_sendViaClusterQueue.begin());qi!=_sendViaClusterQueue.end();) {
  433. for(std::list<_SQE>::iterator qii(qi->second.begin());qii!=qi->second.end();) {
  434. if ((now - qii->timestamp) >= ZT_CLUSTER_QUEUE_EXPIRATION)
  435. qi->second.erase(qii++);
  436. else ++qii;
  437. }
  438. if (qi->second.empty())
  439. _sendViaClusterQueue.erase(qi++);
  440. else ++qi;
  441. }
  442. }
  443. Mutex::Lock _l(_memberIds_m);
  444. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  445. Mutex::Lock _l2(_members[*mid].lock);
  446. if ((now - _members[*mid].lastAnnouncedAliveTo) >= ((ZT_CLUSTER_TIMEOUT / 2) - 1000)) {
  447. _members[*mid].lastAnnouncedAliveTo = now;
  448. Buffer<2048> alive;
  449. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MAJOR);
  450. alive.append((uint16_t)ZEROTIER_ONE_VERSION_MINOR);
  451. alive.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
  452. alive.append((uint8_t)ZT_PROTO_VERSION);
  453. if (_addressToLocationFunction) {
  454. alive.append((int32_t)_x);
  455. alive.append((int32_t)_y);
  456. alive.append((int32_t)_z);
  457. } else {
  458. alive.append((int32_t)0);
  459. alive.append((int32_t)0);
  460. alive.append((int32_t)0);
  461. }
  462. alive.append((uint64_t)now);
  463. alive.append((uint64_t)0); // TODO: compute and send load average
  464. alive.append((uint64_t)RR->topology->countActive());
  465. alive.append((uint64_t)0); // unused/reserved flags
  466. alive.append((uint8_t)_zeroTierPhysicalEndpoints.size());
  467. for(std::vector<InetAddress>::const_iterator pe(_zeroTierPhysicalEndpoints.begin());pe!=_zeroTierPhysicalEndpoints.end();++pe)
  468. pe->serialize(alive);
  469. _send(*mid,CLUSTER_MESSAGE_ALIVE,alive.data(),alive.size());
  470. }
  471. _flush(*mid);
  472. }
  473. }
  474. if ((now - _lastCleanedRemotePeers) >= (ZT_PEER_ACTIVITY_TIMEOUT * 2)) {
  475. _lastCleanedRemotePeers = now;
  476. Mutex::Lock _l(_remotePeers_m);
  477. for(std::map< std::pair<Address,unsigned int>,uint64_t >::iterator rp(_remotePeers.begin());rp!=_remotePeers.end();) {
  478. if ((now - rp->second) >= ZT_PEER_ACTIVITY_TIMEOUT)
  479. _remotePeers.erase(rp++);
  480. else ++rp;
  481. }
  482. }
  483. }
  484. void Cluster::addMember(uint16_t memberId)
  485. {
  486. if ((memberId >= ZT_CLUSTER_MAX_MEMBERS)||(memberId == _id))
  487. return;
  488. Mutex::Lock _l2(_members[memberId].lock);
  489. {
  490. Mutex::Lock _l(_memberIds_m);
  491. if (std::find(_memberIds.begin(),_memberIds.end(),memberId) != _memberIds.end())
  492. return;
  493. _memberIds.push_back(memberId);
  494. std::sort(_memberIds.begin(),_memberIds.end());
  495. }
  496. _members[memberId].clear();
  497. // Generate this member's message key from the master and its ID
  498. uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
  499. memcpy(stmp,_masterSecret,sizeof(stmp));
  500. stmp[0] ^= Utils::hton(memberId);
  501. SHA512::hash(stmp,stmp,sizeof(stmp));
  502. SHA512::hash(stmp,stmp,sizeof(stmp));
  503. memcpy(_members[memberId].key,stmp,sizeof(_members[memberId].key));
  504. Utils::burn(stmp,sizeof(stmp));
  505. // Prepare q
  506. _members[memberId].q.clear();
  507. char iv[16];
  508. Utils::getSecureRandom(iv,16);
  509. _members[memberId].q.append(iv,16);
  510. _members[memberId].q.addSize(8); // room for MAC
  511. _members[memberId].q.append((uint16_t)_id);
  512. _members[memberId].q.append((uint16_t)memberId);
  513. }
  514. void Cluster::removeMember(uint16_t memberId)
  515. {
  516. Mutex::Lock _l(_memberIds_m);
  517. std::vector<uint16_t> newMemberIds;
  518. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  519. if (*mid != memberId)
  520. newMemberIds.push_back(*mid);
  521. }
  522. _memberIds = newMemberIds;
  523. }
  524. bool Cluster::findBetterEndpoint(InetAddress &redirectTo,const Address &peerAddress,const InetAddress &peerPhysicalAddress,bool offload)
  525. {
  526. if (_addressToLocationFunction) {
  527. // Pick based on location if it can be determined
  528. int px = 0,py = 0,pz = 0;
  529. if (_addressToLocationFunction(_addressToLocationFunctionArg,reinterpret_cast<const struct sockaddr_storage *>(&peerPhysicalAddress),&px,&py,&pz) == 0) {
  530. TRACE("no geolocation data for %s (geo-lookup is lazy/async so it may work next time)",peerPhysicalAddress.toIpString().c_str());
  531. return false;
  532. }
  533. // Find member closest to this peer
  534. const uint64_t now = RR->node->now();
  535. std::vector<InetAddress> best;
  536. const double currentDistance = _dist3d(_x,_y,_z,px,py,pz);
  537. double bestDistance = (offload ? 2147483648.0 : currentDistance);
  538. unsigned int bestMember = _id;
  539. {
  540. Mutex::Lock _l(_memberIds_m);
  541. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  542. _Member &m = _members[*mid];
  543. Mutex::Lock _ml(m.lock);
  544. // Consider member if it's alive and has sent us a location and one or more physical endpoints to send peers to
  545. if ( ((now - m.lastReceivedAliveAnnouncement) < ZT_CLUSTER_TIMEOUT) && ((m.x != 0)||(m.y != 0)||(m.z != 0)) && (m.zeroTierPhysicalEndpoints.size() > 0) ) {
  546. const double mdist = _dist3d(m.x,m.y,m.z,px,py,pz);
  547. if (mdist < bestDistance) {
  548. bestDistance = mdist;
  549. bestMember = *mid;
  550. best = m.zeroTierPhysicalEndpoints;
  551. }
  552. }
  553. }
  554. }
  555. // Redirect to a closer member if it has a ZeroTier endpoint address in the same ss_family
  556. for(std::vector<InetAddress>::const_iterator a(best.begin());a!=best.end();++a) {
  557. if (a->ss_family == peerPhysicalAddress.ss_family) {
  558. TRACE("%s at [%d,%d,%d] is %f from us but %f from %u, can redirect to %s",peerAddress.toString().c_str(),px,py,pz,currentDistance,bestDistance,bestMember,a->toString().c_str());
  559. redirectTo = *a;
  560. return true;
  561. }
  562. }
  563. TRACE("%s at [%d,%d,%d] is %f from us, no better endpoints found",peerAddress.toString().c_str(),px,py,pz,currentDistance);
  564. return false;
  565. } else {
  566. // TODO: pick based on load if no location info?
  567. return false;
  568. }
  569. }
  570. void Cluster::status(ZT_ClusterStatus &status) const
  571. {
  572. const uint64_t now = RR->node->now();
  573. memset(&status,0,sizeof(ZT_ClusterStatus));
  574. status.myId = _id;
  575. {
  576. ZT_ClusterMemberStatus *const s = &(status.members[status.clusterSize++]);
  577. s->id = _id;
  578. s->alive = 1;
  579. s->x = _x;
  580. s->y = _y;
  581. s->z = _z;
  582. s->load = 0; // TODO
  583. s->peers = RR->topology->countActive();
  584. for(std::vector<InetAddress>::const_iterator ep(_zeroTierPhysicalEndpoints.begin());ep!=_zeroTierPhysicalEndpoints.end();++ep) {
  585. if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  586. break;
  587. memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  588. }
  589. }
  590. {
  591. Mutex::Lock _l1(_memberIds_m);
  592. for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
  593. if (status.clusterSize >= ZT_CLUSTER_MAX_MEMBERS) // sanity check
  594. break;
  595. _Member &m = _members[*mid];
  596. Mutex::Lock ml(m.lock);
  597. ZT_ClusterMemberStatus *const s = &(status.members[status.clusterSize++]);
  598. s->id = *mid;
  599. s->msSinceLastHeartbeat = (unsigned int)std::min((uint64_t)(~((unsigned int)0)),(now - m.lastReceivedAliveAnnouncement));
  600. s->alive = (s->msSinceLastHeartbeat < ZT_CLUSTER_TIMEOUT) ? 1 : 0;
  601. s->x = m.x;
  602. s->y = m.y;
  603. s->z = m.z;
  604. s->load = m.load;
  605. s->peers = m.peers;
  606. for(std::vector<InetAddress>::const_iterator ep(m.zeroTierPhysicalEndpoints.begin());ep!=m.zeroTierPhysicalEndpoints.end();++ep) {
  607. if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
  608. break;
  609. memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
  610. }
  611. }
  612. }
  613. }
  614. void Cluster::_send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len)
  615. {
  616. if ((len + 3) > (ZT_CLUSTER_MAX_MESSAGE_LENGTH - (24 + 2 + 2))) // sanity check
  617. return;
  618. _Member &m = _members[memberId];
  619. // assumes m.lock is locked!
  620. if ((m.q.size() + len + 3) > ZT_CLUSTER_MAX_MESSAGE_LENGTH)
  621. _flush(memberId);
  622. m.q.append((uint16_t)(len + 1));
  623. m.q.append((uint8_t)type);
  624. m.q.append(msg,len);
  625. }
  626. void Cluster::_flush(uint16_t memberId)
  627. {
  628. _Member &m = _members[memberId];
  629. // assumes m.lock is locked!
  630. if (m.q.size() > (24 + 2 + 2)) { // 16-byte IV + 8-byte MAC + 2 byte from-member-ID + 2 byte to-member-ID
  631. // Create key from member's key and IV
  632. char keytmp[32];
  633. memcpy(keytmp,m.key,32);
  634. for(int i=0;i<8;++i)
  635. keytmp[i] ^= m.q[i];
  636. Salsa20 s20(keytmp,256,m.q.field(8,8));
  637. Utils::burn(keytmp,sizeof(keytmp));
  638. // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
  639. char polykey[ZT_POLY1305_KEY_LEN];
  640. memset(polykey,0,sizeof(polykey));
  641. s20.encrypt12(polykey,polykey,sizeof(polykey));
  642. // Encrypt m.q in place
  643. s20.encrypt12(reinterpret_cast<const char *>(m.q.data()) + 24,const_cast<char *>(reinterpret_cast<const char *>(m.q.data())) + 24,m.q.size() - 24);
  644. // Add MAC for authentication (encrypt-then-MAC)
  645. char mac[ZT_POLY1305_MAC_LEN];
  646. Poly1305::compute(mac,reinterpret_cast<const char *>(m.q.data()) + 24,m.q.size() - 24,polykey);
  647. memcpy(m.q.field(16,8),mac,8);
  648. // Send!
  649. _sendFunction(_sendFunctionArg,memberId,m.q.data(),m.q.size());
  650. // Prepare for more
  651. m.q.clear();
  652. char iv[16];
  653. Utils::getSecureRandom(iv,16);
  654. m.q.append(iv,16);
  655. m.q.addSize(8); // room for MAC
  656. m.q.append((uint16_t)_id); // from member ID
  657. m.q.append((uint16_t)memberId); // to member ID
  658. }
  659. }
  660. void Cluster::_doREMOTE_WHOIS(uint64_t fromMemberId,const Packet &remotep)
  661. {
  662. if (remotep.payloadLength() >= ZT_ADDRESS_LENGTH) {
  663. Identity queried(RR->topology->getIdentity(Address(remotep.payload(),ZT_ADDRESS_LENGTH)));
  664. if (queried) {
  665. Buffer<1024> routp;
  666. remotep.source().appendTo(routp);
  667. routp.append((uint8_t)Packet::VERB_OK);
  668. routp.addSize(2); // space for length
  669. routp.append((uint8_t)Packet::VERB_WHOIS);
  670. routp.append(remotep.packetId());
  671. queried.serialize(routp);
  672. routp.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(routp.size() - ZT_ADDRESS_LENGTH - 3));
  673. TRACE("responding to remote WHOIS from %s @ %u with identity of %s",remotep.source().toString().c_str(),(unsigned int)fromMemberId,queried.address().toString().c_str());
  674. Mutex::Lock _l2(_members[fromMemberId].lock);
  675. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,routp.data(),routp.size());
  676. _flush(fromMemberId);
  677. }
  678. }
  679. }
  680. void Cluster::_doREMOTE_MULTICAST_GATHER(uint64_t fromMemberId,const Packet &remotep)
  681. {
  682. const uint64_t nwid = remotep.at<uint64_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_NETWORK_ID);
  683. const MulticastGroup mg(MAC(remotep.field(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_MAC,6),6),remotep.at<uint32_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_ADI));
  684. unsigned int gatherLimit = remotep.at<uint32_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_GATHER_LIMIT);
  685. const Address remotePeerAddress(remotep.source());
  686. if (gatherLimit) {
  687. Buffer<ZT_PROTO_MAX_PACKET_LENGTH> routp;
  688. remotePeerAddress.appendTo(routp);
  689. routp.append((uint8_t)Packet::VERB_OK);
  690. routp.addSize(2); // space for length
  691. routp.append((uint8_t)Packet::VERB_MULTICAST_GATHER);
  692. routp.append(remotep.packetId());
  693. routp.append(nwid);
  694. mg.mac().appendTo(routp);
  695. routp.append((uint32_t)mg.adi());
  696. if (gatherLimit > ((ZT_CLUSTER_MAX_MESSAGE_LENGTH - 80) / 5))
  697. gatherLimit = ((ZT_CLUSTER_MAX_MESSAGE_LENGTH - 80) / 5);
  698. if (RR->mc->gather(remotePeerAddress,nwid,mg,routp,gatherLimit)) {
  699. routp.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(routp.size() - ZT_ADDRESS_LENGTH - 3));
  700. TRACE("responding to remote MULTICAST_GATHER from %s @ %u with %u bytes",remotePeerAddress.toString().c_str(),(unsigned int)fromMemberId,routp.size());
  701. Mutex::Lock _l2(_members[fromMemberId].lock);
  702. _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,routp.data(),routp.size());
  703. }
  704. }
  705. }
  706. } // namespace ZeroTier
  707. #endif // ZT_ENABLE_CLUSTER