client.go 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. package mux
  2. import (
  3. "context"
  4. "io"
  5. "sync"
  6. "time"
  7. "v2ray.com/core/common"
  8. "v2ray.com/core/common/buf"
  9. "v2ray.com/core/common/errors"
  10. "v2ray.com/core/common/net"
  11. "v2ray.com/core/common/protocol"
  12. "v2ray.com/core/common/session"
  13. "v2ray.com/core/common/signal/done"
  14. "v2ray.com/core/common/task"
  15. "v2ray.com/core/common/vio"
  16. "v2ray.com/core/proxy"
  17. "v2ray.com/core/transport/internet"
  18. "v2ray.com/core/transport/pipe"
  19. )
  20. type ClientManager struct {
  21. Picker WorkerPicker
  22. }
  23. func (m *ClientManager) Dispatch(ctx context.Context, link *vio.Link) error {
  24. for {
  25. worker, err := m.Picker.PickAvailable()
  26. if err != nil {
  27. return err
  28. }
  29. if worker.Dispatch(ctx, link) {
  30. return nil
  31. }
  32. }
  33. }
  34. type WorkerPicker interface {
  35. PickAvailable() (*ClientWorker, error)
  36. }
  37. type IncrementalWorkerPicker struct {
  38. Factory ClientWorkerFactory
  39. access sync.Mutex
  40. workers []*ClientWorker
  41. cleanupTask *task.Periodic
  42. }
  43. func (p *IncrementalWorkerPicker) cleanupFunc() error {
  44. p.access.Lock()
  45. defer p.access.Unlock()
  46. if len(p.workers) == 0 {
  47. return newError("no worker")
  48. }
  49. p.cleanup()
  50. return nil
  51. }
  52. func (p *IncrementalWorkerPicker) cleanup() {
  53. var activeWorkers []*ClientWorker
  54. for _, w := range p.workers {
  55. if !w.Closed() {
  56. activeWorkers = append(activeWorkers, w)
  57. }
  58. }
  59. p.workers = activeWorkers
  60. }
  61. func (p *IncrementalWorkerPicker) findAvailable() int {
  62. for idx, w := range p.workers {
  63. if !w.IsFull() {
  64. return idx
  65. }
  66. }
  67. return -1
  68. }
  69. func (p *IncrementalWorkerPicker) pickInternal() (*ClientWorker, error, bool) {
  70. p.access.Lock()
  71. defer p.access.Unlock()
  72. idx := p.findAvailable()
  73. if idx >= 0 {
  74. n := len(p.workers)
  75. if n > 1 && idx != n-1 {
  76. p.workers[n-1], p.workers[idx] = p.workers[idx], p.workers[n-1]
  77. }
  78. return p.workers[idx], nil, false
  79. }
  80. p.cleanup()
  81. worker, err := p.Factory.Create()
  82. if err != nil {
  83. return nil, err, false
  84. }
  85. p.workers = append(p.workers, worker)
  86. if p.cleanupTask == nil {
  87. p.cleanupTask = &task.Periodic{
  88. Interval: time.Second * 30,
  89. Execute: p.cleanupFunc,
  90. }
  91. }
  92. return worker, nil, true
  93. }
  94. func (p *IncrementalWorkerPicker) PickAvailable() (*ClientWorker, error) {
  95. worker, err, start := p.pickInternal()
  96. if start {
  97. p.cleanupTask.Start()
  98. }
  99. return worker, err
  100. }
  101. type ClientWorkerFactory interface {
  102. Create() (*ClientWorker, error)
  103. }
  104. type DialingWorkerFactory struct {
  105. Proxy proxy.Outbound
  106. Dialer internet.Dialer
  107. Strategy ClientStrategy
  108. }
  109. func (f *DialingWorkerFactory) Create() (*ClientWorker, error) {
  110. opts := []pipe.Option{pipe.WithSizeLimit(64 * 1024)}
  111. uplinkReader, upLinkWriter := pipe.New(opts...)
  112. downlinkReader, downlinkWriter := pipe.New(opts...)
  113. c, err := NewClientWorker(vio.Link{
  114. Reader: downlinkReader,
  115. Writer: upLinkWriter,
  116. }, f.Strategy)
  117. if err != nil {
  118. return nil, err
  119. }
  120. go func(p proxy.Outbound, d internet.Dialer, c common.Closable) {
  121. ctx := session.ContextWithOutbound(context.Background(), &session.Outbound{
  122. Target: net.TCPDestination(muxCoolAddress, muxCoolPort),
  123. })
  124. ctx, cancel := context.WithCancel(ctx)
  125. if err := p.Process(ctx, &vio.Link{Reader: uplinkReader, Writer: downlinkWriter}, d); err != nil {
  126. errors.New("failed to handler mux client connection").Base(err).WriteToLog()
  127. }
  128. common.Must(c.Close())
  129. cancel()
  130. }(f.Proxy, f.Dialer, c.done)
  131. return c, nil
  132. }
  133. type ClientStrategy struct {
  134. MaxConcurrency uint32
  135. MaxConnection uint32
  136. }
  137. type ClientWorker struct {
  138. sessionManager *SessionManager
  139. link vio.Link
  140. done *done.Instance
  141. strategy ClientStrategy
  142. }
  143. var muxCoolAddress = net.DomainAddress("v1.mux.cool")
  144. var muxCoolPort = net.Port(9527)
  145. // NewClientWorker creates a new mux.Client.
  146. func NewClientWorker(stream vio.Link, s ClientStrategy) (*ClientWorker, error) {
  147. c := &ClientWorker{
  148. sessionManager: NewSessionManager(),
  149. link: stream,
  150. done: done.New(),
  151. strategy: s,
  152. }
  153. go c.fetchOutput()
  154. go c.monitor()
  155. return c, nil
  156. }
  157. func (m *ClientWorker) ActiveConnections() uint32 {
  158. return uint32(m.sessionManager.Size())
  159. }
  160. // Closed returns true if this Client is closed.
  161. func (m *ClientWorker) Closed() bool {
  162. return m.done.Done()
  163. }
  164. func (m *ClientWorker) monitor() {
  165. timer := time.NewTicker(time.Second * 16)
  166. defer timer.Stop()
  167. for {
  168. select {
  169. case <-m.done.Wait():
  170. m.sessionManager.Close()
  171. common.Close(m.link.Writer) // nolint: errcheck
  172. pipe.CloseError(m.link.Reader) // nolint: errcheck
  173. return
  174. case <-timer.C:
  175. size := m.sessionManager.Size()
  176. if size == 0 && m.sessionManager.CloseIfNoSession() {
  177. common.Must(m.done.Close())
  178. }
  179. }
  180. }
  181. }
  182. func writeFirstPayload(reader buf.Reader, writer *Writer) error {
  183. err := buf.CopyOnceTimeout(reader, writer, time.Millisecond*100)
  184. if err == buf.ErrNotTimeoutReader || err == buf.ErrReadTimeout {
  185. return writer.WriteMultiBuffer(buf.MultiBuffer{})
  186. }
  187. if err != nil {
  188. return err
  189. }
  190. return nil
  191. }
  192. func fetchInput(ctx context.Context, s *Session, output buf.Writer) {
  193. dest := session.OutboundFromContext(ctx).Target
  194. transferType := protocol.TransferTypeStream
  195. if dest.Network == net.Network_UDP {
  196. transferType = protocol.TransferTypePacket
  197. }
  198. s.transferType = transferType
  199. writer := NewWriter(s.ID, dest, output, transferType)
  200. defer s.Close() // nolint: errcheck
  201. defer writer.Close() // nolint: errcheck
  202. newError("dispatching request to ", dest).WriteToLog(session.ExportIDToError(ctx))
  203. if err := writeFirstPayload(s.input, writer); err != nil {
  204. newError("failed to write first payload").Base(err).WriteToLog(session.ExportIDToError(ctx))
  205. writer.hasError = true
  206. pipe.CloseError(s.input)
  207. return
  208. }
  209. if err := buf.Copy(s.input, writer); err != nil {
  210. newError("failed to fetch all input").Base(err).WriteToLog(session.ExportIDToError(ctx))
  211. writer.hasError = true
  212. pipe.CloseError(s.input)
  213. return
  214. }
  215. }
  216. func (m *ClientWorker) IsClosing() bool {
  217. sm := m.sessionManager
  218. if m.strategy.MaxConnection > 0 && sm.Count() >= int(m.strategy.MaxConnection) {
  219. return true
  220. }
  221. return false
  222. }
  223. func (m *ClientWorker) IsFull() bool {
  224. if m.IsClosing() {
  225. return true
  226. }
  227. sm := m.sessionManager
  228. if m.strategy.MaxConcurrency > 0 && sm.Size() >= int(m.strategy.MaxConcurrency) {
  229. return true
  230. }
  231. return false
  232. }
  233. func (m *ClientWorker) Dispatch(ctx context.Context, link *vio.Link) bool {
  234. if m.IsFull() || m.Closed() {
  235. return false
  236. }
  237. sm := m.sessionManager
  238. s := sm.Allocate()
  239. if s == nil {
  240. return false
  241. }
  242. s.input = link.Reader
  243. s.output = link.Writer
  244. go fetchInput(ctx, s, m.link.Writer)
  245. return true
  246. }
  247. func (m *ClientWorker) handleStatueKeepAlive(meta *FrameMetadata, reader *buf.BufferedReader) error {
  248. if meta.Option.Has(OptionData) {
  249. return buf.Copy(NewStreamReader(reader), buf.Discard)
  250. }
  251. return nil
  252. }
  253. func (m *ClientWorker) handleStatusNew(meta *FrameMetadata, reader *buf.BufferedReader) error {
  254. if meta.Option.Has(OptionData) {
  255. return buf.Copy(NewStreamReader(reader), buf.Discard)
  256. }
  257. return nil
  258. }
  259. func (m *ClientWorker) handleStatusKeep(meta *FrameMetadata, reader *buf.BufferedReader) error {
  260. if !meta.Option.Has(OptionData) {
  261. return nil
  262. }
  263. s, found := m.sessionManager.Get(meta.SessionID)
  264. if !found {
  265. return buf.Copy(NewStreamReader(reader), buf.Discard)
  266. }
  267. rr := s.NewReader(reader)
  268. err := buf.Copy(rr, s.output)
  269. if err != nil && buf.IsWriteError(err) {
  270. newError("failed to write to downstream. closing session ", s.ID).Base(err).WriteToLog()
  271. drainErr := buf.Copy(rr, buf.Discard)
  272. pipe.CloseError(s.input)
  273. s.Close()
  274. return drainErr
  275. }
  276. return err
  277. }
  278. func (m *ClientWorker) handleStatusEnd(meta *FrameMetadata, reader *buf.BufferedReader) error {
  279. if s, found := m.sessionManager.Get(meta.SessionID); found {
  280. if meta.Option.Has(OptionError) {
  281. pipe.CloseError(s.input)
  282. pipe.CloseError(s.output)
  283. }
  284. s.Close()
  285. }
  286. if meta.Option.Has(OptionData) {
  287. return buf.Copy(NewStreamReader(reader), buf.Discard)
  288. }
  289. return nil
  290. }
  291. func (m *ClientWorker) fetchOutput() {
  292. defer func() {
  293. common.Must(m.done.Close())
  294. }()
  295. reader := &buf.BufferedReader{Reader: m.link.Reader}
  296. var meta FrameMetadata
  297. for {
  298. err := meta.Unmarshal(reader)
  299. if err != nil {
  300. if errors.Cause(err) != io.EOF {
  301. newError("failed to read metadata").Base(err).WriteToLog()
  302. }
  303. break
  304. }
  305. switch meta.SessionStatus {
  306. case SessionStatusKeepAlive:
  307. err = m.handleStatueKeepAlive(&meta, reader)
  308. case SessionStatusEnd:
  309. err = m.handleStatusEnd(&meta, reader)
  310. case SessionStatusNew:
  311. err = m.handleStatusNew(&meta, reader)
  312. case SessionStatusKeep:
  313. err = m.handleStatusKeep(&meta, reader)
  314. default:
  315. status := meta.SessionStatus
  316. newError("unknown status: ", status).AtError().WriteToLog()
  317. return
  318. }
  319. if err != nil {
  320. newError("failed to process data").Base(err).WriteToLog()
  321. return
  322. }
  323. }
  324. }