kcp.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
  1. // Package kcp - A Fast and Reliable ARQ Protocol
  2. //
  3. // Acknowledgement:
  4. // skywind3000@github for inventing the KCP protocol
  5. // xtaci@github for translating to Golang
  6. package kcp
  7. import (
  8. "encoding/binary"
  9. "github.com/v2ray/v2ray-core/common/alloc"
  10. )
  11. const (
  12. IKCP_RTO_NDL = 30 // no delay min rto
  13. IKCP_RTO_MIN = 100 // normal min rto
  14. IKCP_RTO_DEF = 200
  15. IKCP_RTO_MAX = 60000
  16. IKCP_CMD_PUSH = 81 // cmd: push data
  17. IKCP_CMD_ACK = 82 // cmd: ack
  18. IKCP_CMD_WASK = 83 // cmd: window probe (ask)
  19. IKCP_CMD_WINS = 84 // cmd: window size (tell)
  20. IKCP_ASK_SEND = 1 // need to send IKCP_CMD_WASK
  21. IKCP_ASK_TELL = 2 // need to send IKCP_CMD_WINS
  22. IKCP_WND_SND = 32
  23. IKCP_WND_RCV = 32
  24. IKCP_MTU_DEF = 1350
  25. IKCP_ACK_FAST = 3
  26. IKCP_INTERVAL = 100
  27. IKCP_OVERHEAD = 24
  28. IKCP_DEADLINK = 20
  29. IKCP_THRESH_INIT = 2
  30. IKCP_THRESH_MIN = 2
  31. IKCP_PROBE_INIT = 7000 // 7 secs to probe window size
  32. IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
  33. )
  34. // Output is a closure which captures conn and calls conn.Write
  35. type Output func(buf []byte)
  36. /* encode 8 bits unsigned int */
  37. func ikcp_encode8u(p []byte, c byte) []byte {
  38. p[0] = c
  39. return p[1:]
  40. }
  41. /* decode 8 bits unsigned int */
  42. func ikcp_decode8u(p []byte, c *byte) []byte {
  43. *c = p[0]
  44. return p[1:]
  45. }
  46. /* encode 16 bits unsigned int (lsb) */
  47. func ikcp_encode16u(p []byte, w uint16) []byte {
  48. binary.LittleEndian.PutUint16(p, w)
  49. return p[2:]
  50. }
  51. /* decode 16 bits unsigned int (lsb) */
  52. func ikcp_decode16u(p []byte, w *uint16) []byte {
  53. *w = binary.LittleEndian.Uint16(p)
  54. return p[2:]
  55. }
  56. /* encode 32 bits unsigned int (lsb) */
  57. func ikcp_encode32u(p []byte, l uint32) []byte {
  58. binary.LittleEndian.PutUint32(p, l)
  59. return p[4:]
  60. }
  61. /* decode 32 bits unsigned int (lsb) */
  62. func ikcp_decode32u(p []byte, l *uint32) []byte {
  63. *l = binary.LittleEndian.Uint32(p)
  64. return p[4:]
  65. }
  66. func _imin_(a, b uint32) uint32 {
  67. if a <= b {
  68. return a
  69. } else {
  70. return b
  71. }
  72. }
  73. func _imax_(a, b uint32) uint32 {
  74. if a >= b {
  75. return a
  76. } else {
  77. return b
  78. }
  79. }
  80. func _itimediff(later, earlier uint32) int32 {
  81. return (int32)(later - earlier)
  82. }
  83. // Segment defines a KCP segment
  84. type Segment struct {
  85. conv uint32
  86. cmd uint32
  87. frg uint32
  88. wnd uint32
  89. ts uint32
  90. sn uint32
  91. una uint32
  92. resendts uint32
  93. rto uint32
  94. fastack uint32
  95. xmit uint32
  96. data *alloc.Buffer
  97. }
  98. // encode a segment into buffer
  99. func (seg *Segment) encode(ptr []byte) []byte {
  100. ptr = ikcp_encode32u(ptr, seg.conv)
  101. ptr = ikcp_encode8u(ptr, uint8(seg.cmd))
  102. ptr = ikcp_encode8u(ptr, uint8(seg.frg))
  103. ptr = ikcp_encode16u(ptr, uint16(seg.wnd))
  104. ptr = ikcp_encode32u(ptr, seg.ts)
  105. ptr = ikcp_encode32u(ptr, seg.sn)
  106. ptr = ikcp_encode32u(ptr, seg.una)
  107. ptr = ikcp_encode32u(ptr, uint32(seg.data.Len()))
  108. return ptr
  109. }
  110. func (this *Segment) Release() {
  111. this.data.Release()
  112. this.data = nil
  113. }
  114. // NewSegment creates a KCP segment
  115. func NewSegment() *Segment {
  116. return &Segment{
  117. data: alloc.NewSmallBuffer().Clear(),
  118. }
  119. }
  120. // KCP defines a single KCP connection
  121. type KCP struct {
  122. conv, mtu, mss, state uint32
  123. snd_una, snd_nxt, rcv_nxt uint32
  124. ts_recent, ts_lastack, ssthresh uint32
  125. rx_rttval, rx_srtt, rx_rto uint32
  126. snd_wnd, rcv_wnd, rmt_wnd, cwnd, probe uint32
  127. current, interval, ts_flush, xmit uint32
  128. updated uint32
  129. ts_probe, probe_wait uint32
  130. dead_link, incr uint32
  131. snd_queue []*Segment
  132. rcv_queue []*Segment
  133. snd_buf []*Segment
  134. rcv_buf []*Segment
  135. acklist []uint32
  136. buffer []byte
  137. fastresend int32
  138. congestionControl bool
  139. output Output
  140. }
  141. // NewKCP create a new kcp control object, 'conv' must equal in two endpoint
  142. // from the same connection.
  143. func NewKCP(conv uint32, mtu uint32, output Output) *KCP {
  144. kcp := new(KCP)
  145. kcp.conv = conv
  146. kcp.snd_wnd = IKCP_WND_SND
  147. kcp.rcv_wnd = IKCP_WND_RCV
  148. kcp.rmt_wnd = IKCP_WND_RCV
  149. kcp.mtu = mtu
  150. kcp.mss = kcp.mtu - IKCP_OVERHEAD
  151. kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3)
  152. kcp.rx_rto = IKCP_RTO_DEF
  153. kcp.interval = IKCP_INTERVAL
  154. kcp.ts_flush = IKCP_INTERVAL
  155. kcp.ssthresh = IKCP_THRESH_INIT
  156. kcp.dead_link = IKCP_DEADLINK
  157. kcp.output = output
  158. return kcp
  159. }
  160. // Recv is user/upper level recv: returns size, returns below zero for EAGAIN
  161. func (kcp *KCP) Recv(buffer []byte) (n int) {
  162. if len(kcp.rcv_queue) == 0 {
  163. return -1
  164. }
  165. var fast_recover bool
  166. if len(kcp.rcv_queue) >= int(kcp.rcv_wnd) {
  167. fast_recover = true
  168. }
  169. // merge fragment
  170. count := 0
  171. for _, seg := range kcp.rcv_queue {
  172. dataLen := seg.data.Len()
  173. if dataLen > len(buffer) {
  174. break
  175. }
  176. copy(buffer, seg.data.Value)
  177. seg.Release()
  178. buffer = buffer[dataLen:]
  179. n += dataLen
  180. count++
  181. }
  182. kcp.rcv_queue = kcp.rcv_queue[count:]
  183. // move available data from rcv_buf -> rcv_queue
  184. count = 0
  185. for _, seg := range kcp.rcv_buf {
  186. if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
  187. kcp.rcv_queue = append(kcp.rcv_queue, seg)
  188. kcp.rcv_nxt++
  189. count++
  190. } else {
  191. break
  192. }
  193. }
  194. kcp.rcv_buf = kcp.rcv_buf[count:]
  195. // fast recover
  196. if len(kcp.rcv_queue) < int(kcp.rcv_wnd) && fast_recover {
  197. // ready to send back IKCP_CMD_WINS in ikcp_flush
  198. // tell remote my window size
  199. kcp.probe |= IKCP_ASK_TELL
  200. }
  201. return
  202. }
  203. // Send is user/upper level send, returns below zero for error
  204. func (kcp *KCP) Send(buffer []byte) int {
  205. var count int
  206. if len(buffer) == 0 {
  207. return -1
  208. }
  209. if len(buffer) < int(kcp.mss) {
  210. count = 1
  211. } else {
  212. count = (len(buffer) + int(kcp.mss) - 1) / int(kcp.mss)
  213. }
  214. if count > 255 {
  215. return -2
  216. }
  217. if count == 0 {
  218. count = 1
  219. }
  220. for i := 0; i < count; i++ {
  221. var size int
  222. if len(buffer) > int(kcp.mss) {
  223. size = int(kcp.mss)
  224. } else {
  225. size = len(buffer)
  226. }
  227. seg := NewSegment()
  228. seg.data.Append(buffer[:size])
  229. seg.frg = uint32(count - i - 1)
  230. kcp.snd_queue = append(kcp.snd_queue, seg)
  231. buffer = buffer[size:]
  232. }
  233. return 0
  234. }
  235. // https://tools.ietf.org/html/rfc6298
  236. func (kcp *KCP) update_ack(rtt int32) {
  237. var rto uint32 = 0
  238. if kcp.rx_srtt == 0 {
  239. kcp.rx_srtt = uint32(rtt)
  240. kcp.rx_rttval = uint32(rtt) / 2
  241. } else {
  242. delta := rtt - int32(kcp.rx_srtt)
  243. if delta < 0 {
  244. delta = -delta
  245. }
  246. kcp.rx_rttval = (3*kcp.rx_rttval + uint32(delta)) / 4
  247. kcp.rx_srtt = (7*kcp.rx_srtt + uint32(rtt)) / 8
  248. if kcp.rx_srtt < 1 {
  249. kcp.rx_srtt = 1
  250. }
  251. }
  252. rto = kcp.rx_srtt + _imax_(kcp.interval, 4*kcp.rx_rttval)
  253. if rto > IKCP_RTO_MAX {
  254. rto = IKCP_RTO_MAX
  255. }
  256. kcp.rx_rto = rto
  257. }
  258. func (kcp *KCP) shrink_buf() {
  259. if len(kcp.snd_buf) > 0 {
  260. seg := kcp.snd_buf[0]
  261. kcp.snd_una = seg.sn
  262. } else {
  263. kcp.snd_una = kcp.snd_nxt
  264. }
  265. }
  266. func (kcp *KCP) parse_ack(sn uint32) {
  267. if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
  268. return
  269. }
  270. for k, seg := range kcp.snd_buf {
  271. if sn == seg.sn {
  272. kcp.snd_buf = append(kcp.snd_buf[:k], kcp.snd_buf[k+1:]...)
  273. seg.Release()
  274. break
  275. }
  276. if _itimediff(sn, seg.sn) < 0 {
  277. break
  278. }
  279. }
  280. }
  281. func (kcp *KCP) parse_fastack(sn uint32) {
  282. if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
  283. return
  284. }
  285. for _, seg := range kcp.snd_buf {
  286. if _itimediff(sn, seg.sn) < 0 {
  287. break
  288. } else if sn != seg.sn {
  289. seg.fastack++
  290. }
  291. }
  292. }
  293. func (kcp *KCP) parse_una(una uint32) {
  294. count := 0
  295. for _, seg := range kcp.snd_buf {
  296. if _itimediff(una, seg.sn) > 0 {
  297. seg.Release()
  298. count++
  299. } else {
  300. break
  301. }
  302. }
  303. kcp.snd_buf = kcp.snd_buf[count:]
  304. }
  305. // ack append
  306. func (kcp *KCP) ack_push(sn, ts uint32) {
  307. kcp.acklist = append(kcp.acklist, sn, ts)
  308. }
  309. func (kcp *KCP) ack_get(p int) (sn, ts uint32) {
  310. return kcp.acklist[p*2+0], kcp.acklist[p*2+1]
  311. }
  312. func (kcp *KCP) parse_data(newseg *Segment) {
  313. sn := newseg.sn
  314. if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
  315. _itimediff(sn, kcp.rcv_nxt) < 0 {
  316. return
  317. }
  318. n := len(kcp.rcv_buf) - 1
  319. insert_idx := 0
  320. repeat := false
  321. for i := n; i >= 0; i-- {
  322. seg := kcp.rcv_buf[i]
  323. if seg.sn == sn {
  324. repeat = true
  325. break
  326. }
  327. if _itimediff(sn, seg.sn) > 0 {
  328. insert_idx = i + 1
  329. break
  330. }
  331. }
  332. if !repeat {
  333. if insert_idx == n+1 {
  334. kcp.rcv_buf = append(kcp.rcv_buf, newseg)
  335. } else {
  336. kcp.rcv_buf = append(kcp.rcv_buf, &Segment{})
  337. copy(kcp.rcv_buf[insert_idx+1:], kcp.rcv_buf[insert_idx:])
  338. kcp.rcv_buf[insert_idx] = newseg
  339. }
  340. }
  341. // move available data from rcv_buf -> rcv_queue
  342. count := 0
  343. for k, seg := range kcp.rcv_buf {
  344. if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
  345. kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[k])
  346. kcp.rcv_nxt++
  347. count++
  348. } else {
  349. break
  350. }
  351. }
  352. kcp.rcv_buf = kcp.rcv_buf[count:]
  353. }
  354. // Input when you received a low level packet (eg. UDP packet), call it
  355. func (kcp *KCP) Input(data []byte) int {
  356. //una := kcp.snd_una
  357. if len(data) < IKCP_OVERHEAD {
  358. return -1
  359. }
  360. var maxack uint32
  361. var flag int
  362. for {
  363. var ts, sn, length, una, conv uint32
  364. var wnd uint16
  365. var cmd, frg uint8
  366. if len(data) < int(IKCP_OVERHEAD) {
  367. break
  368. }
  369. data = ikcp_decode32u(data, &conv)
  370. if conv != kcp.conv {
  371. return -1
  372. }
  373. data = ikcp_decode8u(data, &cmd)
  374. data = ikcp_decode8u(data, &frg)
  375. data = ikcp_decode16u(data, &wnd)
  376. data = ikcp_decode32u(data, &ts)
  377. data = ikcp_decode32u(data, &sn)
  378. data = ikcp_decode32u(data, &una)
  379. data = ikcp_decode32u(data, &length)
  380. if len(data) < int(length) {
  381. return -2
  382. }
  383. if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK &&
  384. cmd != IKCP_CMD_WASK && cmd != IKCP_CMD_WINS {
  385. return -3
  386. }
  387. if kcp.rmt_wnd < uint32(wnd) {
  388. kcp.rmt_wnd = uint32(wnd)
  389. }
  390. //kcp.rmt_wnd = uint32(wnd)
  391. kcp.parse_una(una)
  392. kcp.shrink_buf()
  393. if cmd == IKCP_CMD_ACK {
  394. if _itimediff(kcp.current, ts) >= 0 {
  395. kcp.update_ack(_itimediff(kcp.current, ts))
  396. }
  397. kcp.parse_ack(sn)
  398. kcp.shrink_buf()
  399. if flag == 0 {
  400. flag = 1
  401. maxack = sn
  402. } else if _itimediff(sn, maxack) > 0 {
  403. maxack = sn
  404. }
  405. } else if cmd == IKCP_CMD_PUSH {
  406. if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
  407. kcp.ack_push(sn, ts)
  408. if _itimediff(sn, kcp.rcv_nxt) >= 0 {
  409. seg := NewSegment()
  410. seg.conv = conv
  411. seg.cmd = uint32(cmd)
  412. seg.frg = uint32(frg)
  413. seg.wnd = uint32(wnd)
  414. seg.ts = ts
  415. seg.sn = sn
  416. seg.una = una
  417. seg.data.Append(data[:length])
  418. kcp.parse_data(seg)
  419. }
  420. }
  421. } else if cmd == IKCP_CMD_WASK {
  422. // ready to send back IKCP_CMD_WINS in Ikcp_flush
  423. // tell remote my window size
  424. kcp.probe |= IKCP_ASK_TELL
  425. } else if cmd == IKCP_CMD_WINS {
  426. // do nothing
  427. } else {
  428. return -3
  429. }
  430. data = data[length:]
  431. }
  432. if flag != 0 {
  433. kcp.parse_fastack(maxack)
  434. }
  435. /*
  436. if _itimediff(kcp.snd_una, una) > 0 {
  437. if kcp.cwnd < kcp.rmt_wnd {
  438. mss := kcp.mss
  439. if kcp.cwnd < kcp.ssthresh {
  440. kcp.cwnd++
  441. kcp.incr += mss
  442. } else {
  443. if kcp.incr < mss {
  444. kcp.incr = mss
  445. }
  446. kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
  447. if (kcp.cwnd+1)*mss <= kcp.incr {
  448. kcp.cwnd++
  449. }
  450. }
  451. if kcp.cwnd > kcp.rmt_wnd {
  452. kcp.cwnd = kcp.rmt_wnd
  453. kcp.incr = kcp.rmt_wnd * mss
  454. }
  455. }
  456. }*/
  457. return 0
  458. }
  459. // flush pending data
  460. func (kcp *KCP) flush() {
  461. current := kcp.current
  462. buffer := kcp.buffer
  463. change := 0
  464. //lost := false
  465. if kcp.updated == 0 {
  466. return
  467. }
  468. var seg Segment
  469. seg.conv = kcp.conv
  470. seg.cmd = IKCP_CMD_ACK
  471. seg.wnd = uint32(kcp.rcv_nxt + kcp.rcv_wnd)
  472. seg.una = kcp.rcv_nxt
  473. // flush acknowledges
  474. count := len(kcp.acklist) / 2
  475. ptr := buffer
  476. for i := 0; i < count; i++ {
  477. size := len(buffer) - len(ptr)
  478. if size+IKCP_OVERHEAD > int(kcp.mtu) {
  479. kcp.output(buffer[:size])
  480. ptr = buffer
  481. }
  482. seg.sn, seg.ts = kcp.ack_get(i)
  483. ptr = seg.encode(ptr)
  484. }
  485. kcp.acklist = nil
  486. // probe window size (if remote window size equals zero)
  487. /*
  488. if kcp.rmt_wnd == 0 {
  489. if kcp.probe_wait == 0 {
  490. kcp.probe_wait = IKCP_PROBE_INIT
  491. kcp.ts_probe = kcp.current + kcp.probe_wait
  492. } else {
  493. if _itimediff(kcp.current, kcp.ts_probe) >= 0 {
  494. if kcp.probe_wait < IKCP_PROBE_INIT {
  495. kcp.probe_wait = IKCP_PROBE_INIT
  496. }
  497. kcp.probe_wait += kcp.probe_wait / 2
  498. if kcp.probe_wait > IKCP_PROBE_LIMIT {
  499. kcp.probe_wait = IKCP_PROBE_LIMIT
  500. }
  501. kcp.ts_probe = kcp.current + kcp.probe_wait
  502. kcp.probe |= IKCP_ASK_SEND
  503. }
  504. }
  505. } else {
  506. kcp.ts_probe = 0
  507. kcp.probe_wait = 0
  508. }*/
  509. // flush window probing commands
  510. /*
  511. if (kcp.probe & IKCP_ASK_SEND) != 0 {
  512. seg.cmd = IKCP_CMD_WASK
  513. size := len(buffer) - len(ptr)
  514. if size+IKCP_OVERHEAD > int(kcp.mtu) {
  515. kcp.output(buffer[:size])
  516. ptr = buffer
  517. }
  518. ptr = seg.encode(ptr)
  519. }*/
  520. // flush window probing commands
  521. /*
  522. if (kcp.probe & IKCP_ASK_TELL) != 0 {
  523. seg.cmd = IKCP_CMD_WINS
  524. size := len(buffer) - len(ptr)
  525. if size+IKCP_OVERHEAD > int(kcp.mtu) {
  526. kcp.output(buffer[:size])
  527. ptr = buffer
  528. }
  529. ptr = seg.encode(ptr)
  530. }
  531. kcp.probe = 0*/
  532. // calculate window size
  533. cwnd := _imin_(kcp.snd_nxt+kcp.snd_wnd, kcp.rmt_wnd)
  534. if kcp.congestionControl {
  535. cwnd = _imin_(kcp.cwnd, cwnd)
  536. }
  537. count = 0
  538. for k := range kcp.snd_queue {
  539. if _itimediff(kcp.snd_nxt, cwnd) >= 0 {
  540. break
  541. }
  542. newseg := kcp.snd_queue[k]
  543. newseg.conv = kcp.conv
  544. newseg.cmd = IKCP_CMD_PUSH
  545. newseg.wnd = seg.wnd
  546. newseg.ts = current
  547. newseg.sn = kcp.snd_nxt
  548. newseg.una = kcp.rcv_nxt
  549. newseg.resendts = current
  550. newseg.rto = kcp.rx_rto
  551. newseg.fastack = 0
  552. newseg.xmit = 0
  553. kcp.snd_buf = append(kcp.snd_buf, newseg)
  554. kcp.snd_nxt++
  555. count++
  556. }
  557. kcp.snd_queue = kcp.snd_queue[count:]
  558. // calculate resent
  559. resent := uint32(kcp.fastresend)
  560. if kcp.fastresend <= 0 {
  561. resent = 0xffffffff
  562. }
  563. // flush data segments
  564. for _, segment := range kcp.snd_buf {
  565. needsend := false
  566. if segment.xmit == 0 {
  567. needsend = true
  568. segment.xmit++
  569. segment.rto = kcp.rx_rto
  570. segment.resendts = current + segment.rto + kcp.interval
  571. } else if _itimediff(current, segment.resendts) >= 0 {
  572. needsend = true
  573. segment.xmit++
  574. kcp.xmit++
  575. segment.rto += kcp.rx_rto
  576. segment.resendts = current + segment.rto + kcp.interval
  577. //lost = true
  578. } else if segment.fastack >= resent {
  579. needsend = true
  580. segment.xmit++
  581. segment.fastack = 0
  582. segment.resendts = current + segment.rto + kcp.interval
  583. change++
  584. }
  585. if needsend {
  586. segment.ts = current
  587. segment.wnd = seg.wnd
  588. segment.una = kcp.rcv_nxt
  589. size := len(buffer) - len(ptr)
  590. need := IKCP_OVERHEAD + segment.data.Len()
  591. if size+need >= int(kcp.mtu) {
  592. kcp.output(buffer[:size])
  593. ptr = buffer
  594. }
  595. ptr = segment.encode(ptr)
  596. copy(ptr, segment.data.Value)
  597. ptr = ptr[segment.data.Len():]
  598. if segment.xmit >= kcp.dead_link {
  599. kcp.state = 0xFFFFFFFF
  600. }
  601. }
  602. }
  603. // flash remain segments
  604. size := len(buffer) - len(ptr)
  605. if size > 0 {
  606. kcp.output(buffer[:size])
  607. }
  608. // update ssthresh
  609. // rate halving, https://tools.ietf.org/html/rfc6937
  610. /*
  611. if change != 0 {
  612. inflight := kcp.snd_nxt - kcp.snd_una
  613. kcp.ssthresh = inflight / 2
  614. if kcp.ssthresh < IKCP_THRESH_MIN {
  615. kcp.ssthresh = IKCP_THRESH_MIN
  616. }
  617. kcp.cwnd = kcp.ssthresh + resent
  618. kcp.incr = kcp.cwnd * kcp.mss
  619. }*/
  620. // congestion control, https://tools.ietf.org/html/rfc5681
  621. /*
  622. if lost {
  623. kcp.ssthresh = cwnd / 2
  624. if kcp.ssthresh < IKCP_THRESH_MIN {
  625. kcp.ssthresh = IKCP_THRESH_MIN
  626. }
  627. kcp.cwnd = 1
  628. kcp.incr = kcp.mss
  629. }
  630. if kcp.cwnd < 1 {
  631. kcp.cwnd = 1
  632. kcp.incr = kcp.mss
  633. }*/
  634. }
  635. // Update updates state (call it repeatedly, every 10ms-100ms), or you can ask
  636. // ikcp_check when to call it again (without ikcp_input/_send calling).
  637. // 'current' - current timestamp in millisec.
  638. func (kcp *KCP) Update(current uint32) {
  639. var slap int32
  640. kcp.current = current
  641. if kcp.updated == 0 {
  642. kcp.updated = 1
  643. kcp.ts_flush = kcp.current
  644. }
  645. slap = _itimediff(kcp.current, kcp.ts_flush)
  646. if slap >= 10000 || slap < -10000 {
  647. kcp.ts_flush = kcp.current
  648. slap = 0
  649. }
  650. if slap >= 0 {
  651. kcp.ts_flush += kcp.interval
  652. if _itimediff(kcp.current, kcp.ts_flush) >= 0 {
  653. kcp.ts_flush = kcp.current + kcp.interval
  654. }
  655. kcp.flush()
  656. }
  657. }
  658. // Check determines when should you invoke ikcp_update:
  659. // returns when you should invoke ikcp_update in millisec, if there
  660. // is no ikcp_input/_send calling. you can call ikcp_update in that
  661. // time, instead of call update repeatly.
  662. // Important to reduce unnacessary ikcp_update invoking. use it to
  663. // schedule ikcp_update (eg. implementing an epoll-like mechanism,
  664. // or optimize ikcp_update when handling massive kcp connections)
  665. func (kcp *KCP) Check(current uint32) uint32 {
  666. ts_flush := kcp.ts_flush
  667. tm_flush := int32(0x7fffffff)
  668. tm_packet := int32(0x7fffffff)
  669. minimal := uint32(0)
  670. if kcp.updated == 0 {
  671. return current
  672. }
  673. if _itimediff(current, ts_flush) >= 10000 ||
  674. _itimediff(current, ts_flush) < -10000 {
  675. ts_flush = current
  676. }
  677. if _itimediff(current, ts_flush) >= 0 {
  678. return current
  679. }
  680. tm_flush = _itimediff(ts_flush, current)
  681. for _, seg := range kcp.snd_buf {
  682. diff := _itimediff(seg.resendts, current)
  683. if diff <= 0 {
  684. return current
  685. }
  686. if diff < tm_packet {
  687. tm_packet = diff
  688. }
  689. }
  690. minimal = uint32(tm_packet)
  691. if tm_packet >= tm_flush {
  692. minimal = uint32(tm_flush)
  693. }
  694. if minimal >= kcp.interval {
  695. minimal = kcp.interval
  696. }
  697. return current + minimal
  698. }
  699. // NoDelay options
  700. // fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
  701. // nodelay: 0:disable(default), 1:enable
  702. // interval: internal update timer interval in millisec, default is 100ms
  703. // resend: 0:disable fast resend(default), 1:enable fast resend
  704. // nc: 0:normal congestion control(default), 1:disable congestion control
  705. func (kcp *KCP) NoDelay(interval, resend int, congestionControl bool) int {
  706. if interval >= 0 {
  707. if interval > 5000 {
  708. interval = 5000
  709. } else if interval < 10 {
  710. interval = 10
  711. }
  712. kcp.interval = uint32(interval)
  713. }
  714. if resend >= 0 {
  715. kcp.fastresend = int32(resend)
  716. }
  717. kcp.congestionControl = congestionControl
  718. return 0
  719. }
  720. // WndSize sets maximum window size: sndwnd=32, rcvwnd=32 by default
  721. func (kcp *KCP) WndSize(sndwnd, rcvwnd int) int {
  722. if sndwnd > 0 {
  723. kcp.snd_wnd = uint32(sndwnd)
  724. }
  725. if rcvwnd > 0 {
  726. kcp.rcv_wnd = uint32(rcvwnd)
  727. }
  728. return 0
  729. }
  730. // WaitSnd gets how many packet is waiting to be sent
  731. func (kcp *KCP) WaitSnd() int {
  732. return len(kcp.snd_buf) + len(kcp.snd_queue)
  733. }
  734. func (this *KCP) ClearSendQueue() {
  735. for _, seg := range this.snd_queue {
  736. seg.Release()
  737. }
  738. this.snd_queue = nil
  739. for _, seg := range this.snd_buf {
  740. seg.Release()
  741. }
  742. this.snd_buf = nil
  743. }