kcp.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. // Package kcp - A Fast and Reliable ARQ Protocol
  2. //
  3. // Acknowledgement:
  4. // skywind3000@github for inventing the KCP protocol
  5. // xtaci@github for translating to Golang
  6. package kcp
  7. import (
  8. "encoding/binary"
  9. "github.com/v2ray/v2ray-core/common/alloc"
  10. )
  11. const (
  12. IKCP_RTO_NDL = 30 // no delay min rto
  13. IKCP_RTO_MIN = 100 // normal min rto
  14. IKCP_RTO_DEF = 200
  15. IKCP_RTO_MAX = 60000
  16. IKCP_CMD_PUSH = 81 // cmd: push data
  17. IKCP_CMD_ACK = 82 // cmd: ack
  18. IKCP_WND_SND = 32
  19. IKCP_WND_RCV = 32
  20. IKCP_MTU_DEF = 1350
  21. IKCP_ACK_FAST = 3
  22. IKCP_INTERVAL = 100
  23. IKCP_OVERHEAD = 24
  24. IKCP_DEADLINK = 20
  25. IKCP_THRESH_INIT = 2
  26. IKCP_THRESH_MIN = 2
  27. IKCP_PROBE_INIT = 7000 // 7 secs to probe window size
  28. IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
  29. )
  30. // Output is a closure which captures conn and calls conn.Write
  31. type Output func(buf []byte)
  32. /* encode 8 bits unsigned int */
  33. func ikcp_encode8u(p []byte, c byte) []byte {
  34. p[0] = c
  35. return p[1:]
  36. }
  37. /* decode 8 bits unsigned int */
  38. func ikcp_decode8u(p []byte, c *byte) []byte {
  39. *c = p[0]
  40. return p[1:]
  41. }
  42. /* encode 16 bits unsigned int (lsb) */
  43. func ikcp_encode16u(p []byte, w uint16) []byte {
  44. binary.LittleEndian.PutUint16(p, w)
  45. return p[2:]
  46. }
  47. /* decode 16 bits unsigned int (lsb) */
  48. func ikcp_decode16u(p []byte, w *uint16) []byte {
  49. *w = binary.LittleEndian.Uint16(p)
  50. return p[2:]
  51. }
  52. /* encode 32 bits unsigned int (lsb) */
  53. func ikcp_encode32u(p []byte, l uint32) []byte {
  54. binary.LittleEndian.PutUint32(p, l)
  55. return p[4:]
  56. }
  57. /* decode 32 bits unsigned int (lsb) */
  58. func ikcp_decode32u(p []byte, l *uint32) []byte {
  59. *l = binary.LittleEndian.Uint32(p)
  60. return p[4:]
  61. }
  62. func _imin_(a, b uint32) uint32 {
  63. if a <= b {
  64. return a
  65. } else {
  66. return b
  67. }
  68. }
  69. func _imax_(a, b uint32) uint32 {
  70. if a >= b {
  71. return a
  72. } else {
  73. return b
  74. }
  75. }
  76. func _itimediff(later, earlier uint32) int32 {
  77. return (int32)(later - earlier)
  78. }
  79. // Segment defines a KCP segment
  80. type Segment struct {
  81. conv uint32
  82. cmd uint32
  83. frg uint32
  84. wnd uint32
  85. ts uint32
  86. sn uint32
  87. una uint32
  88. resendts uint32
  89. fastack uint32
  90. xmit uint32
  91. data *alloc.Buffer
  92. }
  93. // encode a segment into buffer
  94. func (seg *Segment) encode(ptr []byte) []byte {
  95. ptr = ikcp_encode32u(ptr, seg.conv)
  96. ptr = ikcp_encode8u(ptr, uint8(seg.cmd))
  97. ptr = ikcp_encode8u(ptr, uint8(seg.frg))
  98. ptr = ikcp_encode16u(ptr, uint16(seg.wnd))
  99. ptr = ikcp_encode32u(ptr, seg.ts)
  100. ptr = ikcp_encode32u(ptr, seg.sn)
  101. ptr = ikcp_encode32u(ptr, seg.una)
  102. ptr = ikcp_encode16u(ptr, uint16(seg.data.Len()))
  103. return ptr
  104. }
  105. func (this *Segment) Release() {
  106. this.data.Release()
  107. this.data = nil
  108. }
  109. // NewSegment creates a KCP segment
  110. func NewSegment() *Segment {
  111. return &Segment{
  112. data: alloc.NewSmallBuffer().Clear(),
  113. }
  114. }
  115. // KCP defines a single KCP connection
  116. type KCP struct {
  117. conv, mtu, mss, state uint32
  118. snd_una, snd_nxt, rcv_nxt uint32
  119. ts_recent, ts_lastack, ssthresh uint32
  120. rx_rttvar, rx_srtt, rx_rto uint32
  121. snd_wnd, rcv_wnd, rmt_wnd, cwnd, probe uint32
  122. current, interval, ts_flush, xmit uint32
  123. updated bool
  124. ts_probe, probe_wait uint32
  125. dead_link, incr uint32
  126. snd_queue *SendingQueue
  127. rcv_queue []*Segment
  128. snd_buf []*Segment
  129. rcv_buf *ReceivingWindow
  130. acklist []uint32
  131. buffer []byte
  132. fastresend int32
  133. congestionControl bool
  134. output Output
  135. }
  136. // NewKCP create a new kcp control object, 'conv' must equal in two endpoint
  137. // from the same connection.
  138. func NewKCP(conv uint32, mtu uint32, sendingWindowSize uint32, receivingWindowSize uint32, sendingQueueSize uint32, output Output) *KCP {
  139. kcp := new(KCP)
  140. kcp.conv = conv
  141. kcp.snd_wnd = sendingWindowSize
  142. kcp.rcv_wnd = receivingWindowSize
  143. kcp.rmt_wnd = IKCP_WND_RCV
  144. kcp.mtu = mtu
  145. kcp.mss = kcp.mtu - IKCP_OVERHEAD
  146. kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3)
  147. kcp.rx_rto = IKCP_RTO_DEF
  148. kcp.interval = IKCP_INTERVAL
  149. kcp.ts_flush = IKCP_INTERVAL
  150. kcp.ssthresh = IKCP_THRESH_INIT
  151. kcp.dead_link = IKCP_DEADLINK
  152. kcp.output = output
  153. kcp.rcv_buf = NewReceivingWindow(receivingWindowSize)
  154. kcp.snd_queue = NewSendingQueue(sendingQueueSize)
  155. return kcp
  156. }
  157. // Recv is user/upper level recv: returns size, returns below zero for EAGAIN
  158. func (kcp *KCP) Recv(buffer []byte) (n int) {
  159. if len(kcp.rcv_queue) == 0 {
  160. return -1
  161. }
  162. // merge fragment
  163. count := 0
  164. for _, seg := range kcp.rcv_queue {
  165. dataLen := seg.data.Len()
  166. if dataLen > len(buffer) {
  167. break
  168. }
  169. copy(buffer, seg.data.Value)
  170. seg.Release()
  171. buffer = buffer[dataLen:]
  172. n += dataLen
  173. count++
  174. }
  175. kcp.rcv_queue = kcp.rcv_queue[count:]
  176. kcp.DumpReceivingBuf()
  177. return
  178. }
  179. // DumpReceivingBuf moves available data from rcv_buf -> rcv_queue
  180. // @Private
  181. func (kcp *KCP) DumpReceivingBuf() {
  182. for {
  183. seg := kcp.rcv_buf.RemoveFirst()
  184. if seg == nil {
  185. break
  186. }
  187. kcp.rcv_queue = append(kcp.rcv_queue, seg)
  188. kcp.rcv_buf.Advance()
  189. kcp.rcv_nxt++
  190. }
  191. }
  192. // Send is user/upper level send, returns below zero for error
  193. func (kcp *KCP) Send(buffer []byte) int {
  194. nBytes := 0
  195. for len(buffer) > 0 && !kcp.snd_queue.IsFull() {
  196. var size int
  197. if len(buffer) > int(kcp.mss) {
  198. size = int(kcp.mss)
  199. } else {
  200. size = len(buffer)
  201. }
  202. seg := NewSegment()
  203. seg.data.Append(buffer[:size])
  204. kcp.snd_queue.Push(seg)
  205. buffer = buffer[size:]
  206. nBytes += size
  207. }
  208. return nBytes
  209. }
  210. // https://tools.ietf.org/html/rfc6298
  211. func (kcp *KCP) update_ack(rtt int32) {
  212. var rto uint32 = 0
  213. if kcp.rx_srtt == 0 {
  214. kcp.rx_srtt = uint32(rtt)
  215. kcp.rx_rttvar = uint32(rtt) / 2
  216. } else {
  217. delta := rtt - int32(kcp.rx_srtt)
  218. if delta < 0 {
  219. delta = -delta
  220. }
  221. kcp.rx_rttvar = (3*kcp.rx_rttvar + uint32(delta)) / 4
  222. kcp.rx_srtt = (7*kcp.rx_srtt + uint32(rtt)) / 8
  223. if kcp.rx_srtt < kcp.interval {
  224. kcp.rx_srtt = kcp.interval
  225. }
  226. }
  227. rto = kcp.rx_srtt + _imax_(kcp.interval, 4*kcp.rx_rttvar)
  228. if rto > IKCP_RTO_MAX {
  229. rto = IKCP_RTO_MAX
  230. }
  231. kcp.rx_rto = rto * 3 / 2
  232. }
  233. func (kcp *KCP) shrink_buf() {
  234. if len(kcp.snd_buf) > 0 {
  235. seg := kcp.snd_buf[0]
  236. kcp.snd_una = seg.sn
  237. } else {
  238. kcp.snd_una = kcp.snd_nxt
  239. }
  240. }
  241. func (kcp *KCP) parse_ack(sn uint32) {
  242. if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
  243. return
  244. }
  245. for k, seg := range kcp.snd_buf {
  246. if sn == seg.sn {
  247. kcp.snd_buf = append(kcp.snd_buf[:k], kcp.snd_buf[k+1:]...)
  248. seg.Release()
  249. break
  250. }
  251. if _itimediff(sn, seg.sn) < 0 {
  252. break
  253. }
  254. }
  255. }
  256. func (kcp *KCP) parse_fastack(sn uint32) {
  257. if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
  258. return
  259. }
  260. for _, seg := range kcp.snd_buf {
  261. if _itimediff(sn, seg.sn) < 0 {
  262. break
  263. } else if sn != seg.sn {
  264. seg.fastack++
  265. }
  266. }
  267. }
  268. func (kcp *KCP) parse_una(una uint32) {
  269. count := 0
  270. for _, seg := range kcp.snd_buf {
  271. if _itimediff(una, seg.sn) > 0 {
  272. seg.Release()
  273. count++
  274. } else {
  275. break
  276. }
  277. }
  278. kcp.snd_buf = kcp.snd_buf[count:]
  279. }
  280. // ack append
  281. func (kcp *KCP) ack_push(sn, ts uint32) {
  282. kcp.acklist = append(kcp.acklist, sn, ts)
  283. }
  284. func (kcp *KCP) ack_get(p int) (sn, ts uint32) {
  285. return kcp.acklist[p*2+0], kcp.acklist[p*2+1]
  286. }
  287. func (kcp *KCP) parse_data(newseg *Segment) {
  288. sn := newseg.sn
  289. if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
  290. _itimediff(sn, kcp.rcv_nxt) < 0 {
  291. return
  292. }
  293. idx := sn - kcp.rcv_nxt
  294. if !kcp.rcv_buf.Set(idx, newseg) {
  295. newseg.Release()
  296. }
  297. kcp.DumpReceivingBuf()
  298. }
  299. // Input when you received a low level packet (eg. UDP packet), call it
  300. func (kcp *KCP) Input(data []byte) int {
  301. //una := kcp.snd_una
  302. if len(data) < IKCP_OVERHEAD {
  303. return -1
  304. }
  305. var maxack uint32
  306. var flag int
  307. for {
  308. var ts, sn, una, conv uint32
  309. var wnd, length uint16
  310. var cmd, frg uint8
  311. if len(data) < int(IKCP_OVERHEAD) {
  312. break
  313. }
  314. data = ikcp_decode32u(data, &conv)
  315. if conv != kcp.conv {
  316. return -1
  317. }
  318. data = ikcp_decode8u(data, &cmd)
  319. data = ikcp_decode8u(data, &frg)
  320. data = ikcp_decode16u(data, &wnd)
  321. data = ikcp_decode32u(data, &ts)
  322. data = ikcp_decode32u(data, &sn)
  323. data = ikcp_decode32u(data, &una)
  324. data = ikcp_decode16u(data, &length)
  325. if len(data) < int(length) {
  326. return -2
  327. }
  328. if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK {
  329. return -3
  330. }
  331. if kcp.rmt_wnd < uint32(wnd) {
  332. kcp.rmt_wnd = uint32(wnd)
  333. }
  334. kcp.parse_una(una)
  335. kcp.shrink_buf()
  336. if cmd == IKCP_CMD_ACK {
  337. if _itimediff(kcp.current, ts) >= 0 {
  338. kcp.update_ack(_itimediff(kcp.current, ts))
  339. }
  340. kcp.parse_ack(sn)
  341. kcp.shrink_buf()
  342. if flag == 0 {
  343. flag = 1
  344. maxack = sn
  345. } else if _itimediff(sn, maxack) > 0 {
  346. maxack = sn
  347. }
  348. } else if cmd == IKCP_CMD_PUSH {
  349. if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
  350. kcp.ack_push(sn, ts)
  351. if _itimediff(sn, kcp.rcv_nxt) >= 0 {
  352. seg := NewSegment()
  353. seg.conv = conv
  354. seg.cmd = uint32(cmd)
  355. seg.frg = uint32(frg)
  356. seg.wnd = uint32(wnd)
  357. seg.ts = ts
  358. seg.sn = sn
  359. seg.una = una
  360. seg.data.Append(data[:length])
  361. kcp.parse_data(seg)
  362. }
  363. }
  364. } else {
  365. return -3
  366. }
  367. data = data[length:]
  368. }
  369. if flag != 0 {
  370. kcp.parse_fastack(maxack)
  371. }
  372. /*
  373. if _itimediff(kcp.snd_una, una) > 0 {
  374. if kcp.cwnd < kcp.rmt_wnd {
  375. mss := kcp.mss
  376. if kcp.cwnd < kcp.ssthresh {
  377. kcp.cwnd++
  378. kcp.incr += mss
  379. } else {
  380. if kcp.incr < mss {
  381. kcp.incr = mss
  382. }
  383. kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
  384. if (kcp.cwnd+1)*mss <= kcp.incr {
  385. kcp.cwnd++
  386. }
  387. }
  388. if kcp.cwnd > kcp.rmt_wnd {
  389. kcp.cwnd = kcp.rmt_wnd
  390. kcp.incr = kcp.rmt_wnd * mss
  391. }
  392. }
  393. }*/
  394. return 0
  395. }
  396. // flush pending data
  397. func (kcp *KCP) flush() {
  398. current := kcp.current
  399. buffer := kcp.buffer
  400. change := 0
  401. //lost := false
  402. if !kcp.updated {
  403. return
  404. }
  405. var seg Segment
  406. seg.conv = kcp.conv
  407. seg.cmd = IKCP_CMD_ACK
  408. seg.wnd = uint32(kcp.rcv_nxt + kcp.rcv_wnd)
  409. seg.una = kcp.rcv_nxt
  410. // flush acknowledges
  411. count := len(kcp.acklist) / 2
  412. ptr := buffer
  413. for i := 0; i < count; i++ {
  414. size := len(buffer) - len(ptr)
  415. if size+IKCP_OVERHEAD > int(kcp.mtu) {
  416. kcp.output(buffer[:size])
  417. ptr = buffer
  418. }
  419. seg.sn, seg.ts = kcp.ack_get(i)
  420. ptr = seg.encode(ptr)
  421. }
  422. kcp.acklist = nil
  423. // calculate window size
  424. cwnd := _imin_(kcp.snd_una+kcp.snd_wnd, kcp.rmt_wnd)
  425. if kcp.congestionControl {
  426. cwnd = _imin_(kcp.cwnd, cwnd)
  427. }
  428. for !kcp.snd_queue.IsEmpty() && _itimediff(kcp.snd_nxt, cwnd) < 0 {
  429. newseg := kcp.snd_queue.Pop()
  430. newseg.conv = kcp.conv
  431. newseg.cmd = IKCP_CMD_PUSH
  432. newseg.wnd = seg.wnd
  433. newseg.ts = current
  434. newseg.sn = kcp.snd_nxt
  435. newseg.una = kcp.rcv_nxt
  436. newseg.resendts = current
  437. newseg.fastack = 0
  438. newseg.xmit = 0
  439. kcp.snd_buf = append(kcp.snd_buf, newseg)
  440. kcp.snd_nxt++
  441. }
  442. // calculate resent
  443. resent := uint32(kcp.fastresend)
  444. if kcp.fastresend <= 0 {
  445. resent = 0xffffffff
  446. }
  447. // flush data segments
  448. for _, segment := range kcp.snd_buf {
  449. needsend := false
  450. if segment.xmit == 0 {
  451. needsend = true
  452. segment.xmit++
  453. segment.resendts = current + kcp.rx_rto
  454. } else if _itimediff(current, segment.resendts) >= 0 {
  455. needsend = true
  456. segment.xmit++
  457. kcp.xmit++
  458. segment.resendts = current + kcp.rx_rto
  459. //lost = true
  460. } else if segment.fastack >= resent {
  461. needsend = true
  462. segment.xmit++
  463. segment.fastack = 0
  464. segment.resendts = current + kcp.rx_rto
  465. change++
  466. }
  467. if needsend {
  468. segment.ts = current
  469. segment.wnd = seg.wnd
  470. segment.una = kcp.rcv_nxt
  471. size := len(buffer) - len(ptr)
  472. need := IKCP_OVERHEAD + segment.data.Len()
  473. if size+need >= int(kcp.mtu) {
  474. kcp.output(buffer[:size])
  475. ptr = buffer
  476. }
  477. ptr = segment.encode(ptr)
  478. copy(ptr, segment.data.Value)
  479. ptr = ptr[segment.data.Len():]
  480. if segment.xmit >= kcp.dead_link {
  481. kcp.state = 0xFFFFFFFF
  482. }
  483. }
  484. }
  485. // flash remain segments
  486. size := len(buffer) - len(ptr)
  487. if size > 0 {
  488. kcp.output(buffer[:size])
  489. }
  490. // update ssthresh
  491. // rate halving, https://tools.ietf.org/html/rfc6937
  492. /*
  493. if change != 0 {
  494. inflight := kcp.snd_nxt - kcp.snd_una
  495. kcp.ssthresh = inflight / 2
  496. if kcp.ssthresh < IKCP_THRESH_MIN {
  497. kcp.ssthresh = IKCP_THRESH_MIN
  498. }
  499. kcp.cwnd = kcp.ssthresh + resent
  500. kcp.incr = kcp.cwnd * kcp.mss
  501. }*/
  502. // congestion control, https://tools.ietf.org/html/rfc5681
  503. /*
  504. if lost {
  505. kcp.ssthresh = cwnd / 2
  506. if kcp.ssthresh < IKCP_THRESH_MIN {
  507. kcp.ssthresh = IKCP_THRESH_MIN
  508. }
  509. kcp.cwnd = 1
  510. kcp.incr = kcp.mss
  511. }
  512. if kcp.cwnd < 1 {
  513. kcp.cwnd = 1
  514. kcp.incr = kcp.mss
  515. }*/
  516. }
  517. // Update updates state (call it repeatedly, every 10ms-100ms), or you can ask
  518. // ikcp_check when to call it again (without ikcp_input/_send calling).
  519. // 'current' - current timestamp in millisec.
  520. func (kcp *KCP) Update(current uint32) {
  521. var slap int32
  522. kcp.current = current
  523. if !kcp.updated {
  524. kcp.updated = true
  525. kcp.ts_flush = kcp.current
  526. }
  527. slap = _itimediff(kcp.current, kcp.ts_flush)
  528. if slap >= 10000 || slap < -10000 {
  529. kcp.ts_flush = kcp.current
  530. slap = 0
  531. }
  532. if slap >= 0 {
  533. kcp.ts_flush += kcp.interval
  534. if _itimediff(kcp.current, kcp.ts_flush) >= 0 {
  535. kcp.ts_flush = kcp.current + kcp.interval
  536. }
  537. kcp.flush()
  538. }
  539. }
  540. // Check determines when should you invoke ikcp_update:
  541. // returns when you should invoke ikcp_update in millisec, if there
  542. // is no ikcp_input/_send calling. you can call ikcp_update in that
  543. // time, instead of call update repeatly.
  544. // Important to reduce unnacessary ikcp_update invoking. use it to
  545. // schedule ikcp_update (eg. implementing an epoll-like mechanism,
  546. // or optimize ikcp_update when handling massive kcp connections)
  547. func (kcp *KCP) Check(current uint32) uint32 {
  548. ts_flush := kcp.ts_flush
  549. tm_flush := int32(0x7fffffff)
  550. tm_packet := int32(0x7fffffff)
  551. minimal := uint32(0)
  552. if !kcp.updated {
  553. return current
  554. }
  555. if _itimediff(current, ts_flush) >= 10000 ||
  556. _itimediff(current, ts_flush) < -10000 {
  557. ts_flush = current
  558. }
  559. if _itimediff(current, ts_flush) >= 0 {
  560. return current
  561. }
  562. tm_flush = _itimediff(ts_flush, current)
  563. for _, seg := range kcp.snd_buf {
  564. diff := _itimediff(seg.resendts, current)
  565. if diff <= 0 {
  566. return current
  567. }
  568. if diff < tm_packet {
  569. tm_packet = diff
  570. }
  571. }
  572. minimal = uint32(tm_packet)
  573. if tm_packet >= tm_flush {
  574. minimal = uint32(tm_flush)
  575. }
  576. if minimal >= kcp.interval {
  577. minimal = kcp.interval
  578. }
  579. return current + minimal
  580. }
  581. // NoDelay options
  582. // fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
  583. // nodelay: 0:disable(default), 1:enable
  584. // interval: internal update timer interval in millisec, default is 100ms
  585. // resend: 0:disable fast resend(default), 1:enable fast resend
  586. // nc: 0:normal congestion control(default), 1:disable congestion control
  587. func (kcp *KCP) NoDelay(interval uint32, resend int, congestionControl bool) int {
  588. kcp.interval = interval
  589. if resend >= 0 {
  590. kcp.fastresend = int32(resend)
  591. }
  592. kcp.congestionControl = congestionControl
  593. return 0
  594. }
  595. // WaitSnd gets how many packet is waiting to be sent
  596. func (kcp *KCP) WaitSnd() uint32 {
  597. return uint32(len(kcp.snd_buf)) + kcp.snd_queue.Len()
  598. }
  599. func (this *KCP) ClearSendQueue() {
  600. this.snd_queue.Clear()
  601. for _, seg := range this.snd_buf {
  602. seg.Release()
  603. }
  604. this.snd_buf = nil
  605. }