client.go 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. package mux
  2. import (
  3. "context"
  4. "io"
  5. "sync"
  6. "time"
  7. "github.com/xtls/xray-core/common"
  8. "github.com/xtls/xray-core/common/buf"
  9. "github.com/xtls/xray-core/common/errors"
  10. "github.com/xtls/xray-core/common/net"
  11. "github.com/xtls/xray-core/common/protocol"
  12. "github.com/xtls/xray-core/common/session"
  13. "github.com/xtls/xray-core/common/signal/done"
  14. "github.com/xtls/xray-core/common/task"
  15. "github.com/xtls/xray-core/common/xudp"
  16. "github.com/xtls/xray-core/proxy"
  17. "github.com/xtls/xray-core/transport"
  18. "github.com/xtls/xray-core/transport/internet"
  19. "github.com/xtls/xray-core/transport/pipe"
  20. )
  21. type ClientManager struct {
  22. Enabled bool // whether mux is enabled from user config
  23. Picker WorkerPicker
  24. }
  25. func (m *ClientManager) Dispatch(ctx context.Context, link *transport.Link) error {
  26. for i := 0; i < 16; i++ {
  27. worker, err := m.Picker.PickAvailable()
  28. if err != nil {
  29. return err
  30. }
  31. if worker.Dispatch(ctx, link) {
  32. return nil
  33. }
  34. }
  35. return errors.New("unable to find an available mux client").AtWarning()
  36. }
  37. type WorkerPicker interface {
  38. PickAvailable() (*ClientWorker, error)
  39. }
  40. type IncrementalWorkerPicker struct {
  41. Factory ClientWorkerFactory
  42. access sync.Mutex
  43. workers []*ClientWorker
  44. cleanupTask *task.Periodic
  45. }
  46. func (p *IncrementalWorkerPicker) cleanupFunc() error {
  47. p.access.Lock()
  48. defer p.access.Unlock()
  49. if len(p.workers) == 0 {
  50. return errors.New("no worker")
  51. }
  52. p.cleanup()
  53. return nil
  54. }
  55. func (p *IncrementalWorkerPicker) cleanup() {
  56. var activeWorkers []*ClientWorker
  57. for _, w := range p.workers {
  58. if !w.Closed() {
  59. activeWorkers = append(activeWorkers, w)
  60. }
  61. }
  62. p.workers = activeWorkers
  63. }
  64. func (p *IncrementalWorkerPicker) findAvailable() int {
  65. for idx, w := range p.workers {
  66. if !w.IsFull() {
  67. return idx
  68. }
  69. }
  70. return -1
  71. }
  72. func (p *IncrementalWorkerPicker) pickInternal() (*ClientWorker, bool, error) {
  73. p.access.Lock()
  74. defer p.access.Unlock()
  75. idx := p.findAvailable()
  76. if idx >= 0 {
  77. n := len(p.workers)
  78. if n > 1 && idx != n-1 {
  79. p.workers[n-1], p.workers[idx] = p.workers[idx], p.workers[n-1]
  80. }
  81. return p.workers[idx], false, nil
  82. }
  83. p.cleanup()
  84. worker, err := p.Factory.Create()
  85. if err != nil {
  86. return nil, false, err
  87. }
  88. p.workers = append(p.workers, worker)
  89. if p.cleanupTask == nil {
  90. p.cleanupTask = &task.Periodic{
  91. Interval: time.Second * 30,
  92. Execute: p.cleanupFunc,
  93. }
  94. }
  95. return worker, true, nil
  96. }
  97. func (p *IncrementalWorkerPicker) PickAvailable() (*ClientWorker, error) {
  98. worker, start, err := p.pickInternal()
  99. if start {
  100. common.Must(p.cleanupTask.Start())
  101. }
  102. return worker, err
  103. }
  104. type ClientWorkerFactory interface {
  105. Create() (*ClientWorker, error)
  106. }
  107. type DialingWorkerFactory struct {
  108. Proxy proxy.Outbound
  109. Dialer internet.Dialer
  110. Strategy ClientStrategy
  111. }
  112. func (f *DialingWorkerFactory) Create() (*ClientWorker, error) {
  113. opts := []pipe.Option{pipe.WithSizeLimit(64 * 1024)}
  114. uplinkReader, upLinkWriter := pipe.New(opts...)
  115. downlinkReader, downlinkWriter := pipe.New(opts...)
  116. c, err := NewClientWorker(transport.Link{
  117. Reader: downlinkReader,
  118. Writer: upLinkWriter,
  119. }, f.Strategy)
  120. if err != nil {
  121. return nil, err
  122. }
  123. go func(p proxy.Outbound, d internet.Dialer, c common.Closable) {
  124. outbounds := []*session.Outbound{{
  125. Target: net.TCPDestination(muxCoolAddress, muxCoolPort),
  126. }}
  127. ctx := session.ContextWithOutbounds(context.Background(), outbounds)
  128. ctx, cancel := context.WithCancel(ctx)
  129. if err := p.Process(ctx, &transport.Link{Reader: uplinkReader, Writer: downlinkWriter}, d); err != nil {
  130. errors.LogInfoInner(ctx, err, "failed to handler mux client connection")
  131. }
  132. common.Must(c.Close())
  133. cancel()
  134. }(f.Proxy, f.Dialer, c.done)
  135. return c, nil
  136. }
  137. type ClientStrategy struct {
  138. MaxConcurrency uint32
  139. MaxConnection uint32
  140. }
  141. type ClientWorker struct {
  142. sessionManager *SessionManager
  143. link transport.Link
  144. done *done.Instance
  145. timer *time.Ticker
  146. strategy ClientStrategy
  147. }
  148. var (
  149. muxCoolAddress = net.DomainAddress("v1.mux.cool")
  150. muxCoolPort = net.Port(9527)
  151. )
  152. // NewClientWorker creates a new mux.Client.
  153. func NewClientWorker(stream transport.Link, s ClientStrategy) (*ClientWorker, error) {
  154. c := &ClientWorker{
  155. sessionManager: NewSessionManager(),
  156. link: stream,
  157. done: done.New(),
  158. timer: time.NewTicker(time.Second * 16),
  159. strategy: s,
  160. }
  161. go c.fetchOutput()
  162. go c.monitor()
  163. return c, nil
  164. }
  165. func (m *ClientWorker) TotalConnections() uint32 {
  166. return uint32(m.sessionManager.Count())
  167. }
  168. func (m *ClientWorker) ActiveConnections() uint32 {
  169. return uint32(m.sessionManager.Size())
  170. }
  171. // Closed returns true if this Client is closed.
  172. func (m *ClientWorker) Closed() bool {
  173. return m.done.Done()
  174. }
  175. func (m *ClientWorker) GetTimer() *time.Ticker {
  176. return m.timer
  177. }
  178. func (m *ClientWorker) monitor() {
  179. defer m.timer.Stop()
  180. for {
  181. select {
  182. case <-m.done.Wait():
  183. m.sessionManager.Close()
  184. common.Close(m.link.Writer)
  185. common.Interrupt(m.link.Reader)
  186. return
  187. case <-m.timer.C:
  188. size := m.sessionManager.Size()
  189. if size == 0 && m.sessionManager.CloseIfNoSession() {
  190. common.Must(m.done.Close())
  191. }
  192. }
  193. }
  194. }
  195. func writeFirstPayload(reader buf.Reader, writer *Writer) error {
  196. err := buf.CopyOnceTimeout(reader, writer, time.Millisecond*100)
  197. if err == buf.ErrNotTimeoutReader || err == buf.ErrReadTimeout {
  198. return writer.WriteMultiBuffer(buf.MultiBuffer{})
  199. }
  200. if err != nil {
  201. return err
  202. }
  203. return nil
  204. }
  205. func fetchInput(ctx context.Context, s *Session, output buf.Writer) {
  206. outbounds := session.OutboundsFromContext(ctx)
  207. ob := outbounds[len(outbounds)-1]
  208. transferType := protocol.TransferTypeStream
  209. if ob.Target.Network == net.Network_UDP {
  210. transferType = protocol.TransferTypePacket
  211. }
  212. s.transferType = transferType
  213. writer := NewWriter(s.ID, ob.Target, output, transferType, xudp.GetGlobalID(ctx))
  214. defer s.Close(false)
  215. defer writer.Close()
  216. errors.LogInfo(ctx, "dispatching request to ", ob.Target)
  217. if err := writeFirstPayload(s.input, writer); err != nil {
  218. errors.LogInfoInner(ctx, err, "failed to write first payload")
  219. writer.hasError = true
  220. return
  221. }
  222. if err := buf.Copy(s.input, writer); err != nil {
  223. errors.LogInfoInner(ctx, err, "failed to fetch all input")
  224. writer.hasError = true
  225. return
  226. }
  227. }
  228. func (m *ClientWorker) IsClosing() bool {
  229. sm := m.sessionManager
  230. if m.strategy.MaxConnection > 0 && sm.Count() >= int(m.strategy.MaxConnection) {
  231. return true
  232. }
  233. return false
  234. }
  235. // IsFull returns true if this ClientWorker is unable to accept more connections.
  236. // it might be because it is closing, or the number of connections has reached the limit.
  237. func (m *ClientWorker) IsFull() bool {
  238. if m.IsClosing() || m.Closed() {
  239. return true
  240. }
  241. sm := m.sessionManager
  242. if m.strategy.MaxConcurrency > 0 && sm.Size() >= int(m.strategy.MaxConcurrency) {
  243. return true
  244. }
  245. return false
  246. }
  247. func (m *ClientWorker) Dispatch(ctx context.Context, link *transport.Link) bool {
  248. if m.IsFull() {
  249. return false
  250. }
  251. sm := m.sessionManager
  252. s := sm.Allocate(&m.strategy)
  253. if s == nil {
  254. return false
  255. }
  256. s.input = link.Reader
  257. s.output = link.Writer
  258. if _, ok := link.Reader.(*pipe.Reader); ok {
  259. go fetchInput(ctx, s, m.link.Writer)
  260. } else {
  261. fetchInput(ctx, s, m.link.Writer)
  262. }
  263. return true
  264. }
  265. func (m *ClientWorker) handleStatueKeepAlive(meta *FrameMetadata, reader *buf.BufferedReader) error {
  266. if meta.Option.Has(OptionData) {
  267. return buf.Copy(NewStreamReader(reader), buf.Discard)
  268. }
  269. return nil
  270. }
  271. func (m *ClientWorker) handleStatusNew(meta *FrameMetadata, reader *buf.BufferedReader) error {
  272. if meta.Option.Has(OptionData) {
  273. return buf.Copy(NewStreamReader(reader), buf.Discard)
  274. }
  275. return nil
  276. }
  277. func (m *ClientWorker) handleStatusKeep(meta *FrameMetadata, reader *buf.BufferedReader) error {
  278. if !meta.Option.Has(OptionData) {
  279. return nil
  280. }
  281. s, found := m.sessionManager.Get(meta.SessionID)
  282. if !found {
  283. // Notify remote peer to close this session.
  284. closingWriter := NewResponseWriter(meta.SessionID, m.link.Writer, protocol.TransferTypeStream)
  285. closingWriter.Close()
  286. return buf.Copy(NewStreamReader(reader), buf.Discard)
  287. }
  288. rr := s.NewReader(reader, &meta.Target)
  289. err := buf.Copy(rr, s.output)
  290. if err != nil && buf.IsWriteError(err) {
  291. errors.LogInfoInner(context.Background(), err, "failed to write to downstream. closing session ", s.ID)
  292. s.Close(false)
  293. return buf.Copy(rr, buf.Discard)
  294. }
  295. return err
  296. }
  297. func (m *ClientWorker) handleStatusEnd(meta *FrameMetadata, reader *buf.BufferedReader) error {
  298. if s, found := m.sessionManager.Get(meta.SessionID); found {
  299. s.Close(false)
  300. }
  301. if meta.Option.Has(OptionData) {
  302. return buf.Copy(NewStreamReader(reader), buf.Discard)
  303. }
  304. return nil
  305. }
  306. func (m *ClientWorker) fetchOutput() {
  307. defer func() {
  308. common.Must(m.done.Close())
  309. }()
  310. reader := &buf.BufferedReader{Reader: m.link.Reader}
  311. var meta FrameMetadata
  312. for {
  313. err := meta.Unmarshal(reader)
  314. if err != nil {
  315. if errors.Cause(err) != io.EOF {
  316. errors.LogInfoInner(context.Background(), err, "failed to read metadata")
  317. }
  318. break
  319. }
  320. switch meta.SessionStatus {
  321. case SessionStatusKeepAlive:
  322. err = m.handleStatueKeepAlive(&meta, reader)
  323. case SessionStatusEnd:
  324. err = m.handleStatusEnd(&meta, reader)
  325. case SessionStatusNew:
  326. err = m.handleStatusNew(&meta, reader)
  327. case SessionStatusKeep:
  328. err = m.handleStatusKeep(&meta, reader)
  329. default:
  330. status := meta.SessionStatus
  331. errors.LogError(context.Background(), "unknown status: ", status)
  332. return
  333. }
  334. if err != nil {
  335. errors.LogInfoInner(context.Background(), err, "failed to process data")
  336. return
  337. }
  338. }
  339. }