client.go 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. package mux
  2. import (
  3. "context"
  4. "io"
  5. "sync"
  6. "time"
  7. "github.com/v2fly/v2ray-core/v5/common"
  8. "github.com/v2fly/v2ray-core/v5/common/buf"
  9. "github.com/v2fly/v2ray-core/v5/common/errors"
  10. "github.com/v2fly/v2ray-core/v5/common/net"
  11. "github.com/v2fly/v2ray-core/v5/common/protocol"
  12. "github.com/v2fly/v2ray-core/v5/common/session"
  13. "github.com/v2fly/v2ray-core/v5/common/signal/done"
  14. "github.com/v2fly/v2ray-core/v5/common/task"
  15. "github.com/v2fly/v2ray-core/v5/proxy"
  16. "github.com/v2fly/v2ray-core/v5/transport"
  17. "github.com/v2fly/v2ray-core/v5/transport/internet"
  18. "github.com/v2fly/v2ray-core/v5/transport/pipe"
  19. )
  20. type ClientManager struct {
  21. Enabled bool // wheather mux is enabled from user config
  22. Picker WorkerPicker
  23. }
  24. func (m *ClientManager) Dispatch(ctx context.Context, link *transport.Link) error {
  25. for i := 0; i < 16; i++ {
  26. worker, err := m.Picker.PickAvailable()
  27. if err != nil {
  28. return err
  29. }
  30. if worker.Dispatch(ctx, link) {
  31. return nil
  32. }
  33. }
  34. return newError("unable to find an available mux client").AtWarning()
  35. }
  36. type WorkerPicker interface {
  37. PickAvailable() (*ClientWorker, error)
  38. }
  39. type IncrementalWorkerPicker struct {
  40. Factory ClientWorkerFactory
  41. access sync.Mutex
  42. workers []*ClientWorker
  43. cleanupTask *task.Periodic
  44. }
  45. func (p *IncrementalWorkerPicker) cleanupFunc() error {
  46. p.access.Lock()
  47. defer p.access.Unlock()
  48. if len(p.workers) == 0 {
  49. return newError("no worker")
  50. }
  51. p.cleanup()
  52. return nil
  53. }
  54. func (p *IncrementalWorkerPicker) cleanup() {
  55. var activeWorkers []*ClientWorker
  56. for _, w := range p.workers {
  57. if !w.Closed() {
  58. activeWorkers = append(activeWorkers, w)
  59. }
  60. }
  61. p.workers = activeWorkers
  62. }
  63. func (p *IncrementalWorkerPicker) findAvailable() int {
  64. for idx, w := range p.workers {
  65. if !w.IsFull() {
  66. return idx
  67. }
  68. }
  69. return -1
  70. }
  71. func (p *IncrementalWorkerPicker) pickInternal() (*ClientWorker, bool, error) {
  72. p.access.Lock()
  73. defer p.access.Unlock()
  74. idx := p.findAvailable()
  75. if idx >= 0 {
  76. n := len(p.workers)
  77. if n > 1 && idx != n-1 {
  78. p.workers[n-1], p.workers[idx] = p.workers[idx], p.workers[n-1]
  79. }
  80. return p.workers[idx], false, nil
  81. }
  82. p.cleanup()
  83. worker, err := p.Factory.Create()
  84. if err != nil {
  85. return nil, false, err
  86. }
  87. p.workers = append(p.workers, worker)
  88. if p.cleanupTask == nil {
  89. p.cleanupTask = &task.Periodic{
  90. Interval: time.Second * 30,
  91. Execute: p.cleanupFunc,
  92. }
  93. }
  94. return worker, true, nil
  95. }
  96. func (p *IncrementalWorkerPicker) PickAvailable() (*ClientWorker, error) {
  97. worker, start, err := p.pickInternal()
  98. if start {
  99. common.Must(p.cleanupTask.Start())
  100. }
  101. return worker, err
  102. }
  103. type ClientWorkerFactory interface {
  104. Create() (*ClientWorker, error)
  105. }
  106. type DialingWorkerFactory struct {
  107. Proxy proxy.Outbound
  108. Dialer internet.Dialer
  109. Strategy ClientStrategy
  110. ctx context.Context
  111. }
  112. func NewDialingWorkerFactory(ctx context.Context, proxy proxy.Outbound, dialer internet.Dialer, strategy ClientStrategy) *DialingWorkerFactory {
  113. return &DialingWorkerFactory{
  114. Proxy: proxy,
  115. Dialer: dialer,
  116. Strategy: strategy,
  117. ctx: ctx,
  118. }
  119. }
  120. func (f *DialingWorkerFactory) Create() (*ClientWorker, error) {
  121. opts := []pipe.Option{pipe.WithSizeLimit(64 * 1024)}
  122. uplinkReader, upLinkWriter := pipe.New(opts...)
  123. downlinkReader, downlinkWriter := pipe.New(opts...)
  124. c, err := NewClientWorker(transport.Link{
  125. Reader: downlinkReader,
  126. Writer: upLinkWriter,
  127. }, f.Strategy)
  128. if err != nil {
  129. return nil, err
  130. }
  131. go func(p proxy.Outbound, d internet.Dialer, c common.Closable) {
  132. ctx := session.ContextWithOutbound(f.ctx, &session.Outbound{
  133. Target: net.TCPDestination(muxCoolAddress, muxCoolPort),
  134. })
  135. ctx, cancel := context.WithCancel(ctx)
  136. if err := p.Process(ctx, &transport.Link{Reader: uplinkReader, Writer: downlinkWriter}, d); err != nil {
  137. errors.New("failed to handler mux client connection").Base(err).WriteToLog()
  138. }
  139. common.Must(c.Close())
  140. cancel()
  141. }(f.Proxy, f.Dialer, c.done)
  142. return c, nil
  143. }
  144. type ClientStrategy struct {
  145. MaxConcurrency uint32
  146. MaxConnection uint32
  147. }
  148. type ClientWorker struct {
  149. sessionManager *SessionManager
  150. link transport.Link
  151. done *done.Instance
  152. strategy ClientStrategy
  153. }
  154. var (
  155. muxCoolAddress = net.DomainAddress("v1.mux.cool")
  156. muxCoolPort = net.Port(9527)
  157. )
  158. // NewClientWorker creates a new mux.Client.
  159. func NewClientWorker(stream transport.Link, s ClientStrategy) (*ClientWorker, error) {
  160. c := &ClientWorker{
  161. sessionManager: NewSessionManager(),
  162. link: stream,
  163. done: done.New(),
  164. strategy: s,
  165. }
  166. go c.fetchOutput()
  167. go c.monitor()
  168. return c, nil
  169. }
  170. func (m *ClientWorker) TotalConnections() uint32 {
  171. return uint32(m.sessionManager.Count())
  172. }
  173. func (m *ClientWorker) ActiveConnections() uint32 {
  174. return uint32(m.sessionManager.Size())
  175. }
  176. // Closed returns true if this Client is closed.
  177. func (m *ClientWorker) Closed() bool {
  178. return m.done.Done()
  179. }
  180. func (m *ClientWorker) monitor() {
  181. timer := time.NewTicker(time.Second * 16)
  182. defer timer.Stop()
  183. for {
  184. select {
  185. case <-m.done.Wait():
  186. m.sessionManager.Close()
  187. common.Close(m.link.Writer)
  188. common.Interrupt(m.link.Reader)
  189. return
  190. case <-timer.C:
  191. size := m.sessionManager.Size()
  192. if size == 0 && m.sessionManager.CloseIfNoSession() {
  193. common.Must(m.done.Close())
  194. }
  195. }
  196. }
  197. }
  198. func writeFirstPayload(reader buf.Reader, writer *Writer) error {
  199. err := buf.CopyOnceTimeout(reader, writer, time.Millisecond*100)
  200. if err == buf.ErrNotTimeoutReader || err == buf.ErrReadTimeout {
  201. return writer.WriteMultiBuffer(buf.MultiBuffer{})
  202. }
  203. if err != nil {
  204. return err
  205. }
  206. return nil
  207. }
  208. func fetchInput(ctx context.Context, s *Session, output buf.Writer) {
  209. dest := session.OutboundFromContext(ctx).Target
  210. transferType := protocol.TransferTypeStream
  211. if dest.Network == net.Network_UDP {
  212. transferType = protocol.TransferTypePacket
  213. }
  214. s.transferType = transferType
  215. writer := NewWriter(s.ID, dest, output, transferType)
  216. defer s.Close()
  217. defer writer.Close()
  218. newError("dispatching request to ", dest).WriteToLog(session.ExportIDToError(ctx))
  219. if err := writeFirstPayload(s.input, writer); err != nil {
  220. newError("failed to write first payload").Base(err).WriteToLog(session.ExportIDToError(ctx))
  221. writer.hasError = true
  222. common.Interrupt(s.input)
  223. return
  224. }
  225. if err := buf.Copy(s.input, writer); err != nil {
  226. newError("failed to fetch all input").Base(err).WriteToLog(session.ExportIDToError(ctx))
  227. writer.hasError = true
  228. common.Interrupt(s.input)
  229. return
  230. }
  231. }
  232. func (m *ClientWorker) IsClosing() bool {
  233. sm := m.sessionManager
  234. if m.strategy.MaxConnection > 0 && sm.Count() >= int(m.strategy.MaxConnection) {
  235. return true
  236. }
  237. return false
  238. }
  239. func (m *ClientWorker) IsFull() bool {
  240. if m.IsClosing() || m.Closed() {
  241. return true
  242. }
  243. sm := m.sessionManager
  244. if m.strategy.MaxConcurrency > 0 && sm.Size() >= int(m.strategy.MaxConcurrency) {
  245. return true
  246. }
  247. return false
  248. }
  249. func (m *ClientWorker) Dispatch(ctx context.Context, link *transport.Link) bool {
  250. if m.IsFull() || m.Closed() {
  251. return false
  252. }
  253. sm := m.sessionManager
  254. s := sm.Allocate()
  255. if s == nil {
  256. return false
  257. }
  258. s.input = link.Reader
  259. s.output = link.Writer
  260. go fetchInput(ctx, s, m.link.Writer)
  261. return true
  262. }
  263. func (m *ClientWorker) handleStatueKeepAlive(meta *FrameMetadata, reader *buf.BufferedReader) error {
  264. if meta.Option.Has(OptionData) {
  265. return buf.Copy(NewStreamReader(reader), buf.Discard)
  266. }
  267. return nil
  268. }
  269. func (m *ClientWorker) handleStatusNew(meta *FrameMetadata, reader *buf.BufferedReader) error {
  270. if meta.Option.Has(OptionData) {
  271. return buf.Copy(NewStreamReader(reader), buf.Discard)
  272. }
  273. return nil
  274. }
  275. func (m *ClientWorker) handleStatusKeep(meta *FrameMetadata, reader *buf.BufferedReader) error {
  276. if !meta.Option.Has(OptionData) {
  277. return nil
  278. }
  279. s, found := m.sessionManager.Get(meta.SessionID)
  280. if !found {
  281. // Notify remote peer to close this session.
  282. closingWriter := NewResponseWriter(meta.SessionID, m.link.Writer, protocol.TransferTypeStream)
  283. closingWriter.Close()
  284. return buf.Copy(NewStreamReader(reader), buf.Discard)
  285. }
  286. rr := s.NewReader(reader)
  287. err := buf.Copy(rr, s.output)
  288. if err != nil && buf.IsWriteError(err) {
  289. newError("failed to write to downstream. closing session ", s.ID).Base(err).WriteToLog()
  290. // Notify remote peer to close this session.
  291. closingWriter := NewResponseWriter(meta.SessionID, m.link.Writer, protocol.TransferTypeStream)
  292. closingWriter.Close()
  293. drainErr := buf.Copy(rr, buf.Discard)
  294. common.Interrupt(s.input)
  295. s.Close()
  296. return drainErr
  297. }
  298. return err
  299. }
  300. func (m *ClientWorker) handleStatusEnd(meta *FrameMetadata, reader *buf.BufferedReader) error {
  301. if s, found := m.sessionManager.Get(meta.SessionID); found {
  302. if meta.Option.Has(OptionError) {
  303. common.Interrupt(s.input)
  304. common.Interrupt(s.output)
  305. }
  306. s.Close()
  307. }
  308. if meta.Option.Has(OptionData) {
  309. return buf.Copy(NewStreamReader(reader), buf.Discard)
  310. }
  311. return nil
  312. }
  313. func (m *ClientWorker) fetchOutput() {
  314. defer func() {
  315. common.Must(m.done.Close())
  316. }()
  317. reader := &buf.BufferedReader{Reader: m.link.Reader}
  318. var meta FrameMetadata
  319. for {
  320. err := meta.Unmarshal(reader)
  321. if err != nil {
  322. if errors.Cause(err) != io.EOF {
  323. newError("failed to read metadata").Base(err).WriteToLog()
  324. }
  325. break
  326. }
  327. switch meta.SessionStatus {
  328. case SessionStatusKeepAlive:
  329. err = m.handleStatueKeepAlive(&meta, reader)
  330. case SessionStatusEnd:
  331. err = m.handleStatusEnd(&meta, reader)
  332. case SessionStatusNew:
  333. err = m.handleStatusNew(&meta, reader)
  334. case SessionStatusKeep:
  335. err = m.handleStatusKeep(&meta, reader)
  336. default:
  337. status := meta.SessionStatus
  338. newError("unknown status: ", status).AtError().WriteToLog()
  339. return
  340. }
  341. if err != nil {
  342. newError("failed to process data").Base(err).WriteToLog()
  343. return
  344. }
  345. }
  346. }