|
|
@@ -6,9 +6,9 @@
|
|
|
package kcp
|
|
|
|
|
|
import (
|
|
|
- "encoding/binary"
|
|
|
-
|
|
|
"github.com/v2ray/v2ray-core/common/alloc"
|
|
|
+ v2io "github.com/v2ray/v2ray-core/common/io"
|
|
|
+ "github.com/v2ray/v2ray-core/common/log"
|
|
|
)
|
|
|
|
|
|
const (
|
|
|
@@ -31,45 +31,6 @@ const (
|
|
|
IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
|
|
|
)
|
|
|
|
|
|
-// Output is a closure which captures conn and calls conn.Write
|
|
|
-type Output func(buf []byte)
|
|
|
-
|
|
|
-/* encode 8 bits unsigned int */
|
|
|
-func ikcp_encode8u(p []byte, c byte) []byte {
|
|
|
- p[0] = c
|
|
|
- return p[1:]
|
|
|
-}
|
|
|
-
|
|
|
-/* decode 8 bits unsigned int */
|
|
|
-func ikcp_decode8u(p []byte, c *byte) []byte {
|
|
|
- *c = p[0]
|
|
|
- return p[1:]
|
|
|
-}
|
|
|
-
|
|
|
-/* encode 16 bits unsigned int (lsb) */
|
|
|
-func ikcp_encode16u(p []byte, w uint16) []byte {
|
|
|
- binary.LittleEndian.PutUint16(p, w)
|
|
|
- return p[2:]
|
|
|
-}
|
|
|
-
|
|
|
-/* decode 16 bits unsigned int (lsb) */
|
|
|
-func ikcp_decode16u(p []byte, w *uint16) []byte {
|
|
|
- *w = binary.LittleEndian.Uint16(p)
|
|
|
- return p[2:]
|
|
|
-}
|
|
|
-
|
|
|
-/* encode 32 bits unsigned int (lsb) */
|
|
|
-func ikcp_encode32u(p []byte, l uint32) []byte {
|
|
|
- binary.LittleEndian.PutUint32(p, l)
|
|
|
- return p[4:]
|
|
|
-}
|
|
|
-
|
|
|
-/* decode 32 bits unsigned int (lsb) */
|
|
|
-func ikcp_decode32u(p []byte, l *uint32) []byte {
|
|
|
- *l = binary.LittleEndian.Uint32(p)
|
|
|
- return p[4:]
|
|
|
-}
|
|
|
-
|
|
|
func _imin_(a, b uint32) uint32 {
|
|
|
if a <= b {
|
|
|
return a
|
|
|
@@ -90,49 +51,22 @@ func _itimediff(later, earlier uint32) int32 {
|
|
|
return (int32)(later - earlier)
|
|
|
}
|
|
|
|
|
|
-// Segment defines a KCP segment
|
|
|
-type Segment struct {
|
|
|
- conv uint32
|
|
|
- cmd uint32
|
|
|
- frg uint32
|
|
|
- wnd uint32
|
|
|
- ts uint32
|
|
|
- sn uint32
|
|
|
- una uint32
|
|
|
- resendts uint32
|
|
|
- fastack uint32
|
|
|
- xmit uint32
|
|
|
- data *alloc.Buffer
|
|
|
-}
|
|
|
-
|
|
|
-// encode a segment into buffer
|
|
|
-func (seg *Segment) encode(ptr []byte) []byte {
|
|
|
- ptr = ikcp_encode32u(ptr, seg.conv)
|
|
|
- ptr = ikcp_encode8u(ptr, uint8(seg.cmd))
|
|
|
- ptr = ikcp_encode8u(ptr, uint8(seg.frg))
|
|
|
- ptr = ikcp_encode16u(ptr, uint16(seg.wnd))
|
|
|
- ptr = ikcp_encode32u(ptr, seg.ts)
|
|
|
- ptr = ikcp_encode32u(ptr, seg.sn)
|
|
|
- ptr = ikcp_encode32u(ptr, seg.una)
|
|
|
- ptr = ikcp_encode16u(ptr, uint16(seg.data.Len()))
|
|
|
- return ptr
|
|
|
-}
|
|
|
-
|
|
|
-func (this *Segment) Release() {
|
|
|
- this.data.Release()
|
|
|
- this.data = nil
|
|
|
-}
|
|
|
+type State int
|
|
|
|
|
|
-// NewSegment creates a KCP segment
|
|
|
-func NewSegment() *Segment {
|
|
|
- return &Segment{
|
|
|
- data: alloc.NewSmallBuffer().Clear(),
|
|
|
- }
|
|
|
-}
|
|
|
+const (
|
|
|
+ StateActive State = 0
|
|
|
+ StateReadyToClose State = 1
|
|
|
+ StatePeerClosed State = 2
|
|
|
+ StateTerminating State = 3
|
|
|
+ StateTerminated State = 4
|
|
|
+)
|
|
|
|
|
|
// KCP defines a single KCP connection
|
|
|
type KCP struct {
|
|
|
- conv, mtu, mss, state uint32
|
|
|
+ conv uint16
|
|
|
+ state State
|
|
|
+ stateBeginTime uint32
|
|
|
+ mtu, mss uint32
|
|
|
snd_una, snd_nxt, rcv_nxt uint32
|
|
|
ts_recent, ts_lastack, ssthresh uint32
|
|
|
rx_rttvar, rx_srtt, rx_rto uint32
|
|
|
@@ -143,21 +77,21 @@ type KCP struct {
|
|
|
dead_link, incr uint32
|
|
|
|
|
|
snd_queue *SendingQueue
|
|
|
- rcv_queue []*Segment
|
|
|
- snd_buf []*Segment
|
|
|
+ rcv_queue []*DataSegment
|
|
|
+ snd_buf []*DataSegment
|
|
|
rcv_buf *ReceivingWindow
|
|
|
|
|
|
- acklist []uint32
|
|
|
+ acklist *ACKList
|
|
|
|
|
|
buffer []byte
|
|
|
fastresend int32
|
|
|
congestionControl bool
|
|
|
- output Output
|
|
|
+ output *SegmentWriter
|
|
|
}
|
|
|
|
|
|
// NewKCP create a new kcp control object, 'conv' must equal in two endpoint
|
|
|
// from the same connection.
|
|
|
-func NewKCP(conv uint32, mtu uint32, sendingWindowSize uint32, receivingWindowSize uint32, sendingQueueSize uint32, output Output) *KCP {
|
|
|
+func NewKCP(conv uint16, mtu uint32, sendingWindowSize uint32, receivingWindowSize uint32, sendingQueueSize uint32, output v2io.Writer) *KCP {
|
|
|
kcp := new(KCP)
|
|
|
kcp.conv = conv
|
|
|
kcp.snd_wnd = sendingWindowSize
|
|
|
@@ -165,18 +99,51 @@ func NewKCP(conv uint32, mtu uint32, sendingWindowSize uint32, receivingWindowSi
|
|
|
kcp.rmt_wnd = IKCP_WND_RCV
|
|
|
kcp.mtu = mtu
|
|
|
kcp.mss = kcp.mtu - IKCP_OVERHEAD
|
|
|
- kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3)
|
|
|
kcp.rx_rto = IKCP_RTO_DEF
|
|
|
kcp.interval = IKCP_INTERVAL
|
|
|
kcp.ts_flush = IKCP_INTERVAL
|
|
|
kcp.ssthresh = IKCP_THRESH_INIT
|
|
|
kcp.dead_link = IKCP_DEADLINK
|
|
|
- kcp.output = output
|
|
|
+ kcp.output = NewSegmentWriter(mtu, output)
|
|
|
kcp.rcv_buf = NewReceivingWindow(receivingWindowSize)
|
|
|
kcp.snd_queue = NewSendingQueue(sendingQueueSize)
|
|
|
+ kcp.acklist = new(ACKList)
|
|
|
return kcp
|
|
|
}
|
|
|
|
|
|
+func (kcp *KCP) HandleOption(opt SegmentOption) {
|
|
|
+ if (opt & SegmentOptionClose) == SegmentOptionClose {
|
|
|
+ kcp.OnPeerClosed()
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+func (kcp *KCP) OnPeerClosed() {
|
|
|
+ if kcp.state == StateReadyToClose {
|
|
|
+ kcp.state = StateTerminating
|
|
|
+ kcp.stateBeginTime = kcp.current
|
|
|
+ log.Info("KCP terminating at ", kcp.current)
|
|
|
+ }
|
|
|
+ if kcp.state == StateActive {
|
|
|
+ kcp.ClearSendQueue()
|
|
|
+ kcp.state = StatePeerClosed
|
|
|
+ kcp.stateBeginTime = kcp.current
|
|
|
+ log.Info("KCP peer close at ", kcp.current)
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+func (kcp *KCP) OnClose() {
|
|
|
+ if kcp.state == StateActive {
|
|
|
+ kcp.state = StateReadyToClose
|
|
|
+ kcp.stateBeginTime = kcp.current
|
|
|
+ log.Info("KCP ready close at ", kcp.current)
|
|
|
+ }
|
|
|
+ if kcp.state == StatePeerClosed {
|
|
|
+ kcp.state = StateTerminating
|
|
|
+ kcp.stateBeginTime = kcp.current
|
|
|
+ log.Info("KCP terminating at ", kcp.current)
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
// Recv is user/upper level recv: returns size, returns below zero for EAGAIN
|
|
|
func (kcp *KCP) Recv(buffer []byte) (n int) {
|
|
|
if len(kcp.rcv_queue) == 0 {
|
|
|
@@ -186,11 +153,11 @@ func (kcp *KCP) Recv(buffer []byte) (n int) {
|
|
|
// merge fragment
|
|
|
count := 0
|
|
|
for _, seg := range kcp.rcv_queue {
|
|
|
- dataLen := seg.data.Len()
|
|
|
+ dataLen := seg.Data.Len()
|
|
|
if dataLen > len(buffer) {
|
|
|
break
|
|
|
}
|
|
|
- copy(buffer, seg.data.Value)
|
|
|
+ copy(buffer, seg.Data.Value)
|
|
|
seg.Release()
|
|
|
buffer = buffer[dataLen:]
|
|
|
n += dataLen
|
|
|
@@ -226,8 +193,9 @@ func (kcp *KCP) Send(buffer []byte) int {
|
|
|
} else {
|
|
|
size = len(buffer)
|
|
|
}
|
|
|
- seg := NewSegment()
|
|
|
- seg.data.Append(buffer[:size])
|
|
|
+ seg := &DataSegment{
|
|
|
+ Data: alloc.NewSmallBuffer().Clear().Append(buffer[:size]),
|
|
|
+ }
|
|
|
kcp.snd_queue.Push(seg)
|
|
|
buffer = buffer[size:]
|
|
|
nBytes += size
|
|
|
@@ -262,7 +230,7 @@ func (kcp *KCP) update_ack(rtt int32) {
|
|
|
func (kcp *KCP) shrink_buf() {
|
|
|
if len(kcp.snd_buf) > 0 {
|
|
|
seg := kcp.snd_buf[0]
|
|
|
- kcp.snd_una = seg.sn
|
|
|
+ kcp.snd_una = seg.Number
|
|
|
} else {
|
|
|
kcp.snd_una = kcp.snd_nxt
|
|
|
}
|
|
|
@@ -274,12 +242,12 @@ func (kcp *KCP) parse_ack(sn uint32) {
|
|
|
}
|
|
|
|
|
|
for k, seg := range kcp.snd_buf {
|
|
|
- if sn == seg.sn {
|
|
|
+ if sn == seg.Number {
|
|
|
kcp.snd_buf = append(kcp.snd_buf[:k], kcp.snd_buf[k+1:]...)
|
|
|
seg.Release()
|
|
|
break
|
|
|
}
|
|
|
- if _itimediff(sn, seg.sn) < 0 {
|
|
|
+ if _itimediff(sn, seg.Number) < 0 {
|
|
|
break
|
|
|
}
|
|
|
}
|
|
|
@@ -291,18 +259,18 @@ func (kcp *KCP) parse_fastack(sn uint32) {
|
|
|
}
|
|
|
|
|
|
for _, seg := range kcp.snd_buf {
|
|
|
- if _itimediff(sn, seg.sn) < 0 {
|
|
|
+ if _itimediff(sn, seg.Number) < 0 {
|
|
|
break
|
|
|
- } else if sn != seg.sn {
|
|
|
- seg.fastack++
|
|
|
+ } else if sn != seg.Number {
|
|
|
+ seg.ackSkipped++
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-func (kcp *KCP) parse_una(una uint32) {
|
|
|
+func (kcp *KCP) HandleReceivingNext(receivingNext uint32) {
|
|
|
count := 0
|
|
|
for _, seg := range kcp.snd_buf {
|
|
|
- if _itimediff(una, seg.sn) > 0 {
|
|
|
+ if _itimediff(receivingNext, seg.Number) > 0 {
|
|
|
seg.Release()
|
|
|
count++
|
|
|
} else {
|
|
|
@@ -312,17 +280,12 @@ func (kcp *KCP) parse_una(una uint32) {
|
|
|
kcp.snd_buf = kcp.snd_buf[count:]
|
|
|
}
|
|
|
|
|
|
-// ack append
|
|
|
-func (kcp *KCP) ack_push(sn, ts uint32) {
|
|
|
- kcp.acklist = append(kcp.acklist, sn, ts)
|
|
|
-}
|
|
|
-
|
|
|
-func (kcp *KCP) ack_get(p int) (sn, ts uint32) {
|
|
|
- return kcp.acklist[p*2+0], kcp.acklist[p*2+1]
|
|
|
+func (kcp *KCP) HandleSendingNext(sendingNext uint32) {
|
|
|
+ kcp.acklist.Clear(sendingNext)
|
|
|
}
|
|
|
|
|
|
-func (kcp *KCP) parse_data(newseg *Segment) {
|
|
|
- sn := newseg.sn
|
|
|
+func (kcp *KCP) parse_data(newseg *DataSegment) {
|
|
|
+ sn := newseg.Number
|
|
|
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
|
|
|
_itimediff(sn, kcp.rcv_nxt) < 0 {
|
|
|
return
|
|
|
@@ -338,163 +301,132 @@ func (kcp *KCP) parse_data(newseg *Segment) {
|
|
|
|
|
|
// Input when you received a low level packet (eg. UDP packet), call it
|
|
|
func (kcp *KCP) Input(data []byte) int {
|
|
|
- //una := kcp.snd_una
|
|
|
- if len(data) < IKCP_OVERHEAD {
|
|
|
- return -1
|
|
|
- }
|
|
|
-
|
|
|
+ log.Info("KCP input at ", kcp.current)
|
|
|
+ var seg ISegment
|
|
|
var maxack uint32
|
|
|
var flag int
|
|
|
for {
|
|
|
- var ts, sn, una, conv uint32
|
|
|
- var wnd, length uint16
|
|
|
- var cmd, frg uint8
|
|
|
-
|
|
|
- if len(data) < int(IKCP_OVERHEAD) {
|
|
|
+ seg, data = ReadSegment(data)
|
|
|
+ if seg == nil {
|
|
|
break
|
|
|
}
|
|
|
|
|
|
- data = ikcp_decode32u(data, &conv)
|
|
|
- if conv != kcp.conv {
|
|
|
- return -1
|
|
|
- }
|
|
|
-
|
|
|
- data = ikcp_decode8u(data, &cmd)
|
|
|
- data = ikcp_decode8u(data, &frg)
|
|
|
- data = ikcp_decode16u(data, &wnd)
|
|
|
- data = ikcp_decode32u(data, &ts)
|
|
|
- data = ikcp_decode32u(data, &sn)
|
|
|
- data = ikcp_decode32u(data, &una)
|
|
|
- data = ikcp_decode16u(data, &length)
|
|
|
- if len(data) < int(length) {
|
|
|
- return -2
|
|
|
- }
|
|
|
-
|
|
|
- if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK {
|
|
|
- return -3
|
|
|
- }
|
|
|
-
|
|
|
- if kcp.rmt_wnd < uint32(wnd) {
|
|
|
- kcp.rmt_wnd = uint32(wnd)
|
|
|
- }
|
|
|
-
|
|
|
- kcp.parse_una(una)
|
|
|
- kcp.shrink_buf()
|
|
|
-
|
|
|
- if cmd == IKCP_CMD_ACK {
|
|
|
- if _itimediff(kcp.current, ts) >= 0 {
|
|
|
- kcp.update_ack(_itimediff(kcp.current, ts))
|
|
|
- }
|
|
|
- kcp.parse_ack(sn)
|
|
|
+ switch seg := seg.(type) {
|
|
|
+ case *DataSegment:
|
|
|
+ kcp.HandleOption(seg.Opt)
|
|
|
+ kcp.HandleSendingNext(seg.SendingNext)
|
|
|
kcp.shrink_buf()
|
|
|
- if flag == 0 {
|
|
|
- flag = 1
|
|
|
- maxack = sn
|
|
|
- } else if _itimediff(sn, maxack) > 0 {
|
|
|
- maxack = sn
|
|
|
+ kcp.acklist.Add(seg.Number, seg.Timestamp)
|
|
|
+ kcp.parse_data(seg)
|
|
|
+ case *ACKSegment:
|
|
|
+ kcp.HandleOption(seg.Opt)
|
|
|
+ if kcp.rmt_wnd < seg.ReceivingWindow {
|
|
|
+ kcp.rmt_wnd = seg.ReceivingWindow
|
|
|
}
|
|
|
- } else if cmd == IKCP_CMD_PUSH {
|
|
|
- if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
|
|
|
- kcp.ack_push(sn, ts)
|
|
|
- if _itimediff(sn, kcp.rcv_nxt) >= 0 {
|
|
|
- seg := NewSegment()
|
|
|
- seg.conv = conv
|
|
|
- seg.cmd = uint32(cmd)
|
|
|
- seg.frg = uint32(frg)
|
|
|
- seg.wnd = uint32(wnd)
|
|
|
- seg.ts = ts
|
|
|
- seg.sn = sn
|
|
|
- seg.una = una
|
|
|
- seg.data.Append(data[:length])
|
|
|
- kcp.parse_data(seg)
|
|
|
+ kcp.HandleReceivingNext(seg.ReceivingNext)
|
|
|
+ for i := 0; i < int(seg.Count); i++ {
|
|
|
+ ts := seg.TimestampList[i]
|
|
|
+ sn := seg.NumberList[i]
|
|
|
+ if _itimediff(kcp.current, ts) >= 0 {
|
|
|
+ kcp.update_ack(_itimediff(kcp.current, ts))
|
|
|
+ }
|
|
|
+ kcp.parse_ack(sn)
|
|
|
+ if flag == 0 {
|
|
|
+ flag = 1
|
|
|
+ maxack = sn
|
|
|
+ } else if _itimediff(sn, maxack) > 0 {
|
|
|
+ maxack = sn
|
|
|
}
|
|
|
}
|
|
|
- } else {
|
|
|
- return -3
|
|
|
+ kcp.shrink_buf()
|
|
|
+ case *CmdOnlySegment:
|
|
|
+ kcp.HandleOption(seg.Opt)
|
|
|
+ if seg.Cmd == SegmentCommandTerminated {
|
|
|
+ if kcp.state == StateActive ||
|
|
|
+ kcp.state == StateReadyToClose ||
|
|
|
+ kcp.state == StatePeerClosed {
|
|
|
+ kcp.state = StateTerminating
|
|
|
+ kcp.stateBeginTime = kcp.current
|
|
|
+ log.Info("KCP terminating at ", kcp.current)
|
|
|
+ } else if kcp.state == StateTerminating {
|
|
|
+ kcp.state = StateTerminated
|
|
|
+ kcp.stateBeginTime = kcp.current
|
|
|
+ log.Info("KCP terminated at ", kcp.current)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ kcp.HandleReceivingNext(seg.ReceivinNext)
|
|
|
+ kcp.HandleSendingNext(seg.SendingNext)
|
|
|
+ default:
|
|
|
}
|
|
|
-
|
|
|
- data = data[length:]
|
|
|
}
|
|
|
|
|
|
if flag != 0 {
|
|
|
kcp.parse_fastack(maxack)
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- if _itimediff(kcp.snd_una, una) > 0 {
|
|
|
- if kcp.cwnd < kcp.rmt_wnd {
|
|
|
- mss := kcp.mss
|
|
|
- if kcp.cwnd < kcp.ssthresh {
|
|
|
- kcp.cwnd++
|
|
|
- kcp.incr += mss
|
|
|
- } else {
|
|
|
- if kcp.incr < mss {
|
|
|
- kcp.incr = mss
|
|
|
- }
|
|
|
- kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
|
|
|
- if (kcp.cwnd+1)*mss <= kcp.incr {
|
|
|
- kcp.cwnd++
|
|
|
- }
|
|
|
- }
|
|
|
- if kcp.cwnd > kcp.rmt_wnd {
|
|
|
- kcp.cwnd = kcp.rmt_wnd
|
|
|
- kcp.incr = kcp.rmt_wnd * mss
|
|
|
- }
|
|
|
- }
|
|
|
- }*/
|
|
|
-
|
|
|
return 0
|
|
|
}
|
|
|
|
|
|
// flush pending data
|
|
|
func (kcp *KCP) flush() {
|
|
|
+ if kcp.state == StateTerminated {
|
|
|
+ return
|
|
|
+ }
|
|
|
+ if kcp.state == StateTerminating {
|
|
|
+ kcp.output.Write(&CmdOnlySegment{
|
|
|
+ Conv: kcp.conv,
|
|
|
+ Cmd: SegmentCommandTerminated,
|
|
|
+ })
|
|
|
+ kcp.output.Flush()
|
|
|
+
|
|
|
+ if _itimediff(kcp.current, kcp.stateBeginTime) > 8000 {
|
|
|
+ kcp.state = StateTerminated
|
|
|
+ log.Info("KCP terminated at ", kcp.current)
|
|
|
+ kcp.stateBeginTime = kcp.current
|
|
|
+ }
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ if kcp.state == StateReadyToClose && _itimediff(kcp.current, kcp.stateBeginTime) > 15000 {
|
|
|
+ kcp.state = StateTerminating
|
|
|
+ log.Info("KCP terminating at ", kcp.current)
|
|
|
+ kcp.stateBeginTime = kcp.current
|
|
|
+ }
|
|
|
+
|
|
|
current := kcp.current
|
|
|
- buffer := kcp.buffer
|
|
|
- change := 0
|
|
|
+ segSent := false
|
|
|
//lost := false
|
|
|
|
|
|
- if !kcp.updated {
|
|
|
- return
|
|
|
- }
|
|
|
- var seg Segment
|
|
|
- seg.conv = kcp.conv
|
|
|
- seg.cmd = IKCP_CMD_ACK
|
|
|
- seg.wnd = uint32(kcp.rcv_nxt + kcp.rcv_wnd)
|
|
|
- seg.una = kcp.rcv_nxt
|
|
|
+ //var seg Segment
|
|
|
+ //seg.conv = kcp.conv
|
|
|
+ //seg.cmd = IKCP_CMD_ACK
|
|
|
+ //seg.wnd = uint32(kcp.rcv_nxt + kcp.rcv_wnd)
|
|
|
+ //seg.una = kcp.rcv_nxt
|
|
|
|
|
|
// flush acknowledges
|
|
|
- count := len(kcp.acklist) / 2
|
|
|
- ptr := buffer
|
|
|
- for i := 0; i < count; i++ {
|
|
|
- size := len(buffer) - len(ptr)
|
|
|
- if size+IKCP_OVERHEAD > int(kcp.mtu) {
|
|
|
- kcp.output(buffer[:size])
|
|
|
- ptr = buffer
|
|
|
- }
|
|
|
- seg.sn, seg.ts = kcp.ack_get(i)
|
|
|
- ptr = seg.encode(ptr)
|
|
|
+ ackSeg := kcp.acklist.AsSegment()
|
|
|
+ if ackSeg != nil {
|
|
|
+ ackSeg.Conv = kcp.conv
|
|
|
+ ackSeg.ReceivingWindow = uint32(kcp.rcv_nxt + kcp.rcv_wnd)
|
|
|
+ ackSeg.ReceivingNext = kcp.rcv_nxt
|
|
|
+ kcp.output.Write(ackSeg)
|
|
|
+ segSent = true
|
|
|
}
|
|
|
- kcp.acklist = nil
|
|
|
|
|
|
// calculate window size
|
|
|
-
|
|
|
cwnd := _imin_(kcp.snd_una+kcp.snd_wnd, kcp.rmt_wnd)
|
|
|
if kcp.congestionControl {
|
|
|
cwnd = _imin_(kcp.cwnd, cwnd)
|
|
|
}
|
|
|
|
|
|
for !kcp.snd_queue.IsEmpty() && _itimediff(kcp.snd_nxt, cwnd) < 0 {
|
|
|
- newseg := kcp.snd_queue.Pop()
|
|
|
- newseg.conv = kcp.conv
|
|
|
- newseg.cmd = IKCP_CMD_PUSH
|
|
|
- newseg.wnd = seg.wnd
|
|
|
- newseg.ts = current
|
|
|
- newseg.sn = kcp.snd_nxt
|
|
|
- newseg.una = kcp.rcv_nxt
|
|
|
- newseg.resendts = current
|
|
|
- newseg.fastack = 0
|
|
|
- newseg.xmit = 0
|
|
|
- kcp.snd_buf = append(kcp.snd_buf, newseg)
|
|
|
+ seg := kcp.snd_queue.Pop()
|
|
|
+ seg.Conv = kcp.conv
|
|
|
+ seg.Number = kcp.snd_nxt
|
|
|
+ seg.timeout = current
|
|
|
+ seg.ackSkipped = 0
|
|
|
+ seg.transmit = 0
|
|
|
+ kcp.snd_buf = append(kcp.snd_buf, seg)
|
|
|
kcp.snd_nxt++
|
|
|
}
|
|
|
|
|
|
@@ -507,51 +439,75 @@ func (kcp *KCP) flush() {
|
|
|
// flush data segments
|
|
|
for _, segment := range kcp.snd_buf {
|
|
|
needsend := false
|
|
|
- if segment.xmit == 0 {
|
|
|
+ if segment.transmit == 0 {
|
|
|
needsend = true
|
|
|
- segment.xmit++
|
|
|
- segment.resendts = current + kcp.rx_rto
|
|
|
- } else if _itimediff(current, segment.resendts) >= 0 {
|
|
|
+ segment.transmit++
|
|
|
+ segment.timeout = current + kcp.rx_rto
|
|
|
+ } else if _itimediff(current, segment.timeout) >= 0 {
|
|
|
needsend = true
|
|
|
- segment.xmit++
|
|
|
+ segment.transmit++
|
|
|
kcp.xmit++
|
|
|
- segment.resendts = current + kcp.rx_rto
|
|
|
+ segment.timeout = current + kcp.rx_rto
|
|
|
//lost = true
|
|
|
- } else if segment.fastack >= resent {
|
|
|
+ } else if segment.ackSkipped >= resent {
|
|
|
needsend = true
|
|
|
- segment.xmit++
|
|
|
- segment.fastack = 0
|
|
|
- segment.resendts = current + kcp.rx_rto
|
|
|
- change++
|
|
|
+ segment.transmit++
|
|
|
+ segment.ackSkipped = 0
|
|
|
+ segment.timeout = current + kcp.rx_rto
|
|
|
}
|
|
|
|
|
|
if needsend {
|
|
|
- segment.ts = current
|
|
|
- segment.wnd = seg.wnd
|
|
|
- segment.una = kcp.rcv_nxt
|
|
|
-
|
|
|
- size := len(buffer) - len(ptr)
|
|
|
- need := IKCP_OVERHEAD + segment.data.Len()
|
|
|
-
|
|
|
- if size+need >= int(kcp.mtu) {
|
|
|
- kcp.output(buffer[:size])
|
|
|
- ptr = buffer
|
|
|
+ segment.Timestamp = current
|
|
|
+ segment.SendingNext = kcp.snd_una
|
|
|
+ segment.Opt = 0
|
|
|
+ if kcp.state == StateReadyToClose {
|
|
|
+ segment.Opt = SegmentOptionClose
|
|
|
}
|
|
|
|
|
|
- ptr = segment.encode(ptr)
|
|
|
- copy(ptr, segment.data.Value)
|
|
|
- ptr = ptr[segment.data.Len():]
|
|
|
+ kcp.output.Write(segment)
|
|
|
+ segSent = true
|
|
|
|
|
|
- if segment.xmit >= kcp.dead_link {
|
|
|
+ if segment.transmit >= kcp.dead_link {
|
|
|
kcp.state = 0xFFFFFFFF
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// flash remain segments
|
|
|
- size := len(buffer) - len(ptr)
|
|
|
- if size > 0 {
|
|
|
- kcp.output(buffer[:size])
|
|
|
+ kcp.output.Flush()
|
|
|
+
|
|
|
+ if !segSent && kcp.state == StateReadyToClose {
|
|
|
+ kcp.output.Write(&CmdOnlySegment{
|
|
|
+ Conv: kcp.conv,
|
|
|
+ Cmd: SegmentCommandPing,
|
|
|
+ Opt: SegmentOptionClose,
|
|
|
+ ReceivinNext: kcp.rcv_nxt,
|
|
|
+ SendingNext: kcp.snd_nxt,
|
|
|
+ })
|
|
|
+ kcp.output.Flush()
|
|
|
+ segSent = true
|
|
|
+ }
|
|
|
+
|
|
|
+ if !segSent && kcp.state == StateTerminating {
|
|
|
+ kcp.output.Write(&CmdOnlySegment{
|
|
|
+ Conv: kcp.conv,
|
|
|
+ Cmd: SegmentCommandTerminated,
|
|
|
+ ReceivinNext: kcp.rcv_nxt,
|
|
|
+ SendingNext: kcp.snd_una,
|
|
|
+ })
|
|
|
+ kcp.output.Flush()
|
|
|
+ segSent = true
|
|
|
+ }
|
|
|
+
|
|
|
+ if !segSent {
|
|
|
+ kcp.output.Write(&CmdOnlySegment{
|
|
|
+ Conv: kcp.conv,
|
|
|
+ Cmd: SegmentCommandPing,
|
|
|
+ ReceivinNext: kcp.rcv_nxt,
|
|
|
+ SendingNext: kcp.snd_una,
|
|
|
+ })
|
|
|
+ kcp.output.Flush()
|
|
|
+ segSent = true
|
|
|
}
|
|
|
|
|
|
// update ssthresh
|
|
|
@@ -613,54 +569,6 @@ func (kcp *KCP) Update(current uint32) {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-// Check determines when should you invoke ikcp_update:
|
|
|
-// returns when you should invoke ikcp_update in millisec, if there
|
|
|
-// is no ikcp_input/_send calling. you can call ikcp_update in that
|
|
|
-// time, instead of call update repeatly.
|
|
|
-// Important to reduce unnacessary ikcp_update invoking. use it to
|
|
|
-// schedule ikcp_update (eg. implementing an epoll-like mechanism,
|
|
|
-// or optimize ikcp_update when handling massive kcp connections)
|
|
|
-func (kcp *KCP) Check(current uint32) uint32 {
|
|
|
- ts_flush := kcp.ts_flush
|
|
|
- tm_flush := int32(0x7fffffff)
|
|
|
- tm_packet := int32(0x7fffffff)
|
|
|
- minimal := uint32(0)
|
|
|
- if !kcp.updated {
|
|
|
- return current
|
|
|
- }
|
|
|
-
|
|
|
- if _itimediff(current, ts_flush) >= 10000 ||
|
|
|
- _itimediff(current, ts_flush) < -10000 {
|
|
|
- ts_flush = current
|
|
|
- }
|
|
|
-
|
|
|
- if _itimediff(current, ts_flush) >= 0 {
|
|
|
- return current
|
|
|
- }
|
|
|
-
|
|
|
- tm_flush = _itimediff(ts_flush, current)
|
|
|
-
|
|
|
- for _, seg := range kcp.snd_buf {
|
|
|
- diff := _itimediff(seg.resendts, current)
|
|
|
- if diff <= 0 {
|
|
|
- return current
|
|
|
- }
|
|
|
- if diff < tm_packet {
|
|
|
- tm_packet = diff
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- minimal = uint32(tm_packet)
|
|
|
- if tm_packet >= tm_flush {
|
|
|
- minimal = uint32(tm_flush)
|
|
|
- }
|
|
|
- if minimal >= kcp.interval {
|
|
|
- minimal = kcp.interval
|
|
|
- }
|
|
|
-
|
|
|
- return current + minimal
|
|
|
-}
|
|
|
-
|
|
|
// NoDelay options
|
|
|
// fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
|
|
|
// nodelay: 0:disable(default), 1:enable
|