client.go 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. package mux
  2. import (
  3. "context"
  4. goerrors "errors"
  5. "io"
  6. "sync"
  7. "time"
  8. "github.com/xtls/xray-core/common"
  9. "github.com/xtls/xray-core/common/buf"
  10. "github.com/xtls/xray-core/common/errors"
  11. "github.com/xtls/xray-core/common/net"
  12. "github.com/xtls/xray-core/common/protocol"
  13. "github.com/xtls/xray-core/common/session"
  14. "github.com/xtls/xray-core/common/signal/done"
  15. "github.com/xtls/xray-core/common/task"
  16. "github.com/xtls/xray-core/common/xudp"
  17. "github.com/xtls/xray-core/proxy"
  18. "github.com/xtls/xray-core/transport"
  19. "github.com/xtls/xray-core/transport/internet"
  20. "github.com/xtls/xray-core/transport/pipe"
  21. )
  22. type ClientManager struct {
  23. Enabled bool // whether mux is enabled from user config
  24. Picker WorkerPicker
  25. }
  26. func (m *ClientManager) Dispatch(ctx context.Context, link *transport.Link) error {
  27. for i := 0; i < 16; i++ {
  28. worker, err := m.Picker.PickAvailable()
  29. if err != nil {
  30. return err
  31. }
  32. if worker.Dispatch(ctx, link) {
  33. return nil
  34. }
  35. }
  36. return errors.New("unable to find an available mux client").AtWarning()
  37. }
  38. type WorkerPicker interface {
  39. PickAvailable() (*ClientWorker, error)
  40. }
  41. type IncrementalWorkerPicker struct {
  42. Factory ClientWorkerFactory
  43. access sync.Mutex
  44. workers []*ClientWorker
  45. cleanupTask *task.Periodic
  46. }
  47. func (p *IncrementalWorkerPicker) cleanupFunc() error {
  48. p.access.Lock()
  49. defer p.access.Unlock()
  50. if len(p.workers) == 0 {
  51. return errors.New("no worker")
  52. }
  53. p.cleanup()
  54. return nil
  55. }
  56. func (p *IncrementalWorkerPicker) cleanup() {
  57. var activeWorkers []*ClientWorker
  58. for _, w := range p.workers {
  59. if !w.Closed() {
  60. activeWorkers = append(activeWorkers, w)
  61. }
  62. }
  63. p.workers = activeWorkers
  64. }
  65. func (p *IncrementalWorkerPicker) findAvailable() int {
  66. for idx, w := range p.workers {
  67. if !w.IsFull() {
  68. return idx
  69. }
  70. }
  71. return -1
  72. }
  73. func (p *IncrementalWorkerPicker) pickInternal() (*ClientWorker, bool, error) {
  74. p.access.Lock()
  75. defer p.access.Unlock()
  76. idx := p.findAvailable()
  77. if idx >= 0 {
  78. n := len(p.workers)
  79. if n > 1 && idx != n-1 {
  80. p.workers[n-1], p.workers[idx] = p.workers[idx], p.workers[n-1]
  81. }
  82. return p.workers[idx], false, nil
  83. }
  84. p.cleanup()
  85. worker, err := p.Factory.Create()
  86. if err != nil {
  87. return nil, false, err
  88. }
  89. p.workers = append(p.workers, worker)
  90. if p.cleanupTask == nil {
  91. p.cleanupTask = &task.Periodic{
  92. Interval: time.Second * 30,
  93. Execute: p.cleanupFunc,
  94. }
  95. }
  96. return worker, true, nil
  97. }
  98. func (p *IncrementalWorkerPicker) PickAvailable() (*ClientWorker, error) {
  99. worker, start, err := p.pickInternal()
  100. if start {
  101. common.Must(p.cleanupTask.Start())
  102. }
  103. return worker, err
  104. }
  105. type ClientWorkerFactory interface {
  106. Create() (*ClientWorker, error)
  107. }
  108. type DialingWorkerFactory struct {
  109. Proxy proxy.Outbound
  110. Dialer internet.Dialer
  111. Strategy ClientStrategy
  112. }
  113. func (f *DialingWorkerFactory) Create() (*ClientWorker, error) {
  114. opts := []pipe.Option{pipe.WithSizeLimit(64 * 1024)}
  115. uplinkReader, upLinkWriter := pipe.New(opts...)
  116. downlinkReader, downlinkWriter := pipe.New(opts...)
  117. c, err := NewClientWorker(transport.Link{
  118. Reader: downlinkReader,
  119. Writer: upLinkWriter,
  120. }, f.Strategy)
  121. if err != nil {
  122. return nil, err
  123. }
  124. go func(p proxy.Outbound, d internet.Dialer, c common.Closable) {
  125. outbounds := []*session.Outbound{{
  126. Target: net.TCPDestination(muxCoolAddress, muxCoolPort),
  127. }}
  128. ctx := session.ContextWithOutbounds(context.Background(), outbounds)
  129. ctx, cancel := context.WithCancel(ctx)
  130. if errP := p.Process(ctx, &transport.Link{Reader: uplinkReader, Writer: downlinkWriter}, d); errP != nil {
  131. errC := errors.Cause(errP)
  132. if !(goerrors.Is(errC, io.EOF) || goerrors.Is(errC, io.ErrClosedPipe) || goerrors.Is(errC, context.Canceled)) {
  133. errors.LogInfoInner(ctx, errP, "failed to handler mux client connection")
  134. }
  135. }
  136. common.Must(c.Close())
  137. cancel()
  138. }(f.Proxy, f.Dialer, c.done)
  139. return c, nil
  140. }
  141. type ClientStrategy struct {
  142. MaxConcurrency uint32
  143. MaxConnection uint32
  144. }
  145. type ClientWorker struct {
  146. sessionManager *SessionManager
  147. link transport.Link
  148. done *done.Instance
  149. timer *time.Ticker
  150. strategy ClientStrategy
  151. }
  152. var (
  153. muxCoolAddress = net.DomainAddress("v1.mux.cool")
  154. muxCoolPort = net.Port(9527)
  155. )
  156. // NewClientWorker creates a new mux.Client.
  157. func NewClientWorker(stream transport.Link, s ClientStrategy) (*ClientWorker, error) {
  158. c := &ClientWorker{
  159. sessionManager: NewSessionManager(),
  160. link: stream,
  161. done: done.New(),
  162. timer: time.NewTicker(time.Second * 16),
  163. strategy: s,
  164. }
  165. go c.fetchOutput()
  166. go c.monitor()
  167. return c, nil
  168. }
  169. func (m *ClientWorker) TotalConnections() uint32 {
  170. return uint32(m.sessionManager.Count())
  171. }
  172. func (m *ClientWorker) ActiveConnections() uint32 {
  173. return uint32(m.sessionManager.Size())
  174. }
  175. // Closed returns true if this Client is closed.
  176. func (m *ClientWorker) Closed() bool {
  177. return m.done.Done()
  178. }
  179. func (m *ClientWorker) WaitClosed() <-chan struct{} {
  180. return m.done.Wait()
  181. }
  182. func (m *ClientWorker) Close() error {
  183. return m.done.Close()
  184. }
  185. func (m *ClientWorker) monitor() {
  186. defer m.timer.Stop()
  187. for {
  188. checkSize := m.sessionManager.Size()
  189. checkCount := m.sessionManager.Count()
  190. select {
  191. case <-m.done.Wait():
  192. m.sessionManager.Close()
  193. common.Interrupt(m.link.Writer)
  194. common.Interrupt(m.link.Reader)
  195. return
  196. case <-m.timer.C:
  197. if m.sessionManager.CloseIfNoSessionAndIdle(checkSize, checkCount) {
  198. common.Must(m.done.Close())
  199. }
  200. }
  201. }
  202. }
  203. func writeFirstPayload(reader buf.Reader, writer *Writer) error {
  204. err := buf.CopyOnceTimeout(reader, writer, time.Millisecond*100)
  205. if err == buf.ErrNotTimeoutReader || err == buf.ErrReadTimeout {
  206. return writer.WriteMultiBuffer(buf.MultiBuffer{})
  207. }
  208. if err != nil {
  209. return err
  210. }
  211. return nil
  212. }
  213. func fetchInput(ctx context.Context, s *Session, output buf.Writer) {
  214. outbounds := session.OutboundsFromContext(ctx)
  215. ob := outbounds[len(outbounds)-1]
  216. transferType := protocol.TransferTypeStream
  217. if ob.Target.Network == net.Network_UDP {
  218. transferType = protocol.TransferTypePacket
  219. }
  220. s.transferType = transferType
  221. var inbound *session.Inbound
  222. if session.IsReverseMuxFromContext(ctx) {
  223. inbound = session.InboundFromContext(ctx)
  224. }
  225. writer := NewWriter(s.ID, ob.Target, output, transferType, xudp.GetGlobalID(ctx), inbound)
  226. defer s.Close(false)
  227. defer writer.Close()
  228. errors.LogInfo(ctx, "dispatching request to ", ob.Target)
  229. if err := writeFirstPayload(s.input, writer); err != nil {
  230. errors.LogInfoInner(ctx, err, "failed to write first payload")
  231. writer.hasError = true
  232. return
  233. }
  234. if err := buf.Copy(s.input, writer); err != nil {
  235. errors.LogInfoInner(ctx, err, "failed to fetch all input")
  236. writer.hasError = true
  237. return
  238. }
  239. }
  240. func (m *ClientWorker) IsClosing() bool {
  241. sm := m.sessionManager
  242. if m.strategy.MaxConnection > 0 && sm.Count() >= int(m.strategy.MaxConnection) {
  243. return true
  244. }
  245. return false
  246. }
  247. // IsFull returns true if this ClientWorker is unable to accept more connections.
  248. // it might be because it is closing, or the number of connections has reached the limit.
  249. func (m *ClientWorker) IsFull() bool {
  250. if m.IsClosing() || m.Closed() {
  251. return true
  252. }
  253. sm := m.sessionManager
  254. if m.strategy.MaxConcurrency > 0 && sm.Size() >= int(m.strategy.MaxConcurrency) {
  255. return true
  256. }
  257. return false
  258. }
  259. func (m *ClientWorker) Dispatch(ctx context.Context, link *transport.Link) bool {
  260. if m.IsFull() {
  261. return false
  262. }
  263. sm := m.sessionManager
  264. s := sm.Allocate(&m.strategy)
  265. if s == nil {
  266. return false
  267. }
  268. s.input = link.Reader
  269. s.output = link.Writer
  270. go fetchInput(ctx, s, m.link.Writer)
  271. if _, ok := link.Reader.(*pipe.Reader); !ok {
  272. select {
  273. case <-ctx.Done():
  274. case <-s.done.Wait():
  275. }
  276. }
  277. return true
  278. }
  279. func (m *ClientWorker) handleStatueKeepAlive(meta *FrameMetadata, reader *buf.BufferedReader) error {
  280. if meta.Option.Has(OptionData) {
  281. return buf.Copy(NewStreamReader(reader), buf.Discard)
  282. }
  283. return nil
  284. }
  285. func (m *ClientWorker) handleStatusNew(meta *FrameMetadata, reader *buf.BufferedReader) error {
  286. if meta.Option.Has(OptionData) {
  287. return buf.Copy(NewStreamReader(reader), buf.Discard)
  288. }
  289. return nil
  290. }
  291. func (m *ClientWorker) handleStatusKeep(meta *FrameMetadata, reader *buf.BufferedReader) error {
  292. if !meta.Option.Has(OptionData) {
  293. return nil
  294. }
  295. s, found := m.sessionManager.Get(meta.SessionID)
  296. if !found {
  297. // Notify remote peer to close this session.
  298. closingWriter := NewResponseWriter(meta.SessionID, m.link.Writer, protocol.TransferTypeStream)
  299. closingWriter.Close()
  300. return buf.Copy(NewStreamReader(reader), buf.Discard)
  301. }
  302. rr := s.NewReader(reader, &meta.Target)
  303. err := buf.Copy(rr, s.output)
  304. if err != nil && buf.IsWriteError(err) {
  305. errors.LogInfoInner(context.Background(), err, "failed to write to downstream. closing session ", s.ID)
  306. s.Close(false)
  307. return buf.Copy(rr, buf.Discard)
  308. }
  309. return err
  310. }
  311. func (m *ClientWorker) handleStatusEnd(meta *FrameMetadata, reader *buf.BufferedReader) error {
  312. if s, found := m.sessionManager.Get(meta.SessionID); found {
  313. s.Close(false)
  314. }
  315. if meta.Option.Has(OptionData) {
  316. return buf.Copy(NewStreamReader(reader), buf.Discard)
  317. }
  318. return nil
  319. }
  320. func (m *ClientWorker) fetchOutput() {
  321. defer func() {
  322. common.Must(m.done.Close())
  323. }()
  324. reader := &buf.BufferedReader{Reader: m.link.Reader}
  325. var meta FrameMetadata
  326. for {
  327. err := meta.Unmarshal(reader, false)
  328. if err != nil {
  329. if errors.Cause(err) != io.EOF {
  330. errors.LogInfoInner(context.Background(), err, "failed to read metadata")
  331. }
  332. break
  333. }
  334. switch meta.SessionStatus {
  335. case SessionStatusKeepAlive:
  336. err = m.handleStatueKeepAlive(&meta, reader)
  337. case SessionStatusEnd:
  338. err = m.handleStatusEnd(&meta, reader)
  339. case SessionStatusNew:
  340. err = m.handleStatusNew(&meta, reader)
  341. case SessionStatusKeep:
  342. err = m.handleStatusKeep(&meta, reader)
  343. default:
  344. status := meta.SessionStatus
  345. errors.LogError(context.Background(), "unknown status: ", status)
  346. return
  347. }
  348. if err != nil {
  349. errors.LogInfoInner(context.Background(), err, "failed to process data")
  350. return
  351. }
  352. }
  353. }