cubic.go 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. package congestion
  2. import (
  3. "math"
  4. "time"
  5. "v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
  6. "v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
  7. )
  8. // This cubic implementation is based on the one found in Chromiums's QUIC
  9. // implementation, in the files net/quic/congestion_control/cubic.{hh,cc}.
  10. // Constants based on TCP defaults.
  11. // The following constants are in 2^10 fractions of a second instead of ms to
  12. // allow a 10 shift right to divide.
  13. // 1024*1024^3 (first 1024 is from 0.100^3)
  14. // where 0.100 is 100 ms which is the scaling round trip time.
  15. const cubeScale = 40
  16. const cubeCongestionWindowScale = 410
  17. const cubeFactor protocol.ByteCount = 1 << cubeScale / cubeCongestionWindowScale / protocol.DefaultTCPMSS
  18. const defaultNumConnections = 2
  19. // Default Cubic backoff factor
  20. const beta float32 = 0.7
  21. // Additional backoff factor when loss occurs in the concave part of the Cubic
  22. // curve. This additional backoff factor is expected to give up bandwidth to
  23. // new concurrent flows and speed up convergence.
  24. const betaLastMax float32 = 0.85
  25. // Cubic implements the cubic algorithm from TCP
  26. type Cubic struct {
  27. clock Clock
  28. // Number of connections to simulate.
  29. numConnections int
  30. // Time when this cycle started, after last loss event.
  31. epoch time.Time
  32. // Max congestion window used just before last loss event.
  33. // Note: to improve fairness to other streams an additional back off is
  34. // applied to this value if the new value is below our latest value.
  35. lastMaxCongestionWindow protocol.ByteCount
  36. // Number of acked bytes since the cycle started (epoch).
  37. ackedBytesCount protocol.ByteCount
  38. // TCP Reno equivalent congestion window in packets.
  39. estimatedTCPcongestionWindow protocol.ByteCount
  40. // Origin point of cubic function.
  41. originPointCongestionWindow protocol.ByteCount
  42. // Time to origin point of cubic function in 2^10 fractions of a second.
  43. timeToOriginPoint uint32
  44. // Last congestion window in packets computed by cubic function.
  45. lastTargetCongestionWindow protocol.ByteCount
  46. }
  47. // NewCubic returns a new Cubic instance
  48. func NewCubic(clock Clock) *Cubic {
  49. c := &Cubic{
  50. clock: clock,
  51. numConnections: defaultNumConnections,
  52. }
  53. c.Reset()
  54. return c
  55. }
  56. // Reset is called after a timeout to reset the cubic state
  57. func (c *Cubic) Reset() {
  58. c.epoch = time.Time{}
  59. c.lastMaxCongestionWindow = 0
  60. c.ackedBytesCount = 0
  61. c.estimatedTCPcongestionWindow = 0
  62. c.originPointCongestionWindow = 0
  63. c.timeToOriginPoint = 0
  64. c.lastTargetCongestionWindow = 0
  65. }
  66. func (c *Cubic) alpha() float32 {
  67. // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that
  68. // beta here is a cwnd multiplier, and is equal to 1-beta from the paper.
  69. // We derive the equivalent alpha for an N-connection emulation as:
  70. b := c.beta()
  71. return 3 * float32(c.numConnections) * float32(c.numConnections) * (1 - b) / (1 + b)
  72. }
  73. func (c *Cubic) beta() float32 {
  74. // kNConnectionBeta is the backoff factor after loss for our N-connection
  75. // emulation, which emulates the effective backoff of an ensemble of N
  76. // TCP-Reno connections on a single loss event. The effective multiplier is
  77. // computed as:
  78. return (float32(c.numConnections) - 1 + beta) / float32(c.numConnections)
  79. }
  80. func (c *Cubic) betaLastMax() float32 {
  81. // betaLastMax is the additional backoff factor after loss for our
  82. // N-connection emulation, which emulates the additional backoff of
  83. // an ensemble of N TCP-Reno connections on a single loss event. The
  84. // effective multiplier is computed as:
  85. return (float32(c.numConnections) - 1 + betaLastMax) / float32(c.numConnections)
  86. }
  87. // OnApplicationLimited is called on ack arrival when sender is unable to use
  88. // the available congestion window. Resets Cubic state during quiescence.
  89. func (c *Cubic) OnApplicationLimited() {
  90. // When sender is not using the available congestion window, the window does
  91. // not grow. But to be RTT-independent, Cubic assumes that the sender has been
  92. // using the entire window during the time since the beginning of the current
  93. // "epoch" (the end of the last loss recovery period). Since
  94. // application-limited periods break this assumption, we reset the epoch when
  95. // in such a period. This reset effectively freezes congestion window growth
  96. // through application-limited periods and allows Cubic growth to continue
  97. // when the entire window is being used.
  98. c.epoch = time.Time{}
  99. }
  100. // CongestionWindowAfterPacketLoss computes a new congestion window to use after
  101. // a loss event. Returns the new congestion window in packets. The new
  102. // congestion window is a multiplicative decrease of our current window.
  103. func (c *Cubic) CongestionWindowAfterPacketLoss(currentCongestionWindow protocol.ByteCount) protocol.ByteCount {
  104. if currentCongestionWindow+protocol.DefaultTCPMSS < c.lastMaxCongestionWindow {
  105. // We never reached the old max, so assume we are competing with another
  106. // flow. Use our extra back off factor to allow the other flow to go up.
  107. c.lastMaxCongestionWindow = protocol.ByteCount(c.betaLastMax() * float32(currentCongestionWindow))
  108. } else {
  109. c.lastMaxCongestionWindow = currentCongestionWindow
  110. }
  111. c.epoch = time.Time{} // Reset time.
  112. return protocol.ByteCount(float32(currentCongestionWindow) * c.beta())
  113. }
  114. // CongestionWindowAfterAck computes a new congestion window to use after a received ACK.
  115. // Returns the new congestion window in packets. The new congestion window
  116. // follows a cubic function that depends on the time passed since last
  117. // packet loss.
  118. func (c *Cubic) CongestionWindowAfterAck(
  119. ackedBytes protocol.ByteCount,
  120. currentCongestionWindow protocol.ByteCount,
  121. delayMin time.Duration,
  122. eventTime time.Time,
  123. ) protocol.ByteCount {
  124. c.ackedBytesCount += ackedBytes
  125. if c.epoch.IsZero() {
  126. // First ACK after a loss event.
  127. c.epoch = eventTime // Start of epoch.
  128. c.ackedBytesCount = ackedBytes // Reset count.
  129. // Reset estimated_tcp_congestion_window_ to be in sync with cubic.
  130. c.estimatedTCPcongestionWindow = currentCongestionWindow
  131. if c.lastMaxCongestionWindow <= currentCongestionWindow {
  132. c.timeToOriginPoint = 0
  133. c.originPointCongestionWindow = currentCongestionWindow
  134. } else {
  135. c.timeToOriginPoint = uint32(math.Cbrt(float64(cubeFactor * (c.lastMaxCongestionWindow - currentCongestionWindow))))
  136. c.originPointCongestionWindow = c.lastMaxCongestionWindow
  137. }
  138. }
  139. // Change the time unit from microseconds to 2^10 fractions per second. Take
  140. // the round trip time in account. This is done to allow us to use shift as a
  141. // divide operator.
  142. elapsedTime := int64(eventTime.Add(delayMin).Sub(c.epoch)/time.Microsecond) << 10 / (1000 * 1000)
  143. // Right-shifts of negative, signed numbers have implementation-dependent
  144. // behavior, so force the offset to be positive, as is done in the kernel.
  145. offset := int64(c.timeToOriginPoint) - elapsedTime
  146. if offset < 0 {
  147. offset = -offset
  148. }
  149. deltaCongestionWindow := protocol.ByteCount(cubeCongestionWindowScale*offset*offset*offset) * protocol.DefaultTCPMSS >> cubeScale
  150. var targetCongestionWindow protocol.ByteCount
  151. if elapsedTime > int64(c.timeToOriginPoint) {
  152. targetCongestionWindow = c.originPointCongestionWindow + deltaCongestionWindow
  153. } else {
  154. targetCongestionWindow = c.originPointCongestionWindow - deltaCongestionWindow
  155. }
  156. // Limit the CWND increase to half the acked bytes.
  157. targetCongestionWindow = utils.MinByteCount(targetCongestionWindow, currentCongestionWindow+c.ackedBytesCount/2)
  158. // Increase the window by approximately Alpha * 1 MSS of bytes every
  159. // time we ack an estimated tcp window of bytes. For small
  160. // congestion windows (less than 25), the formula below will
  161. // increase slightly slower than linearly per estimated tcp window
  162. // of bytes.
  163. c.estimatedTCPcongestionWindow += protocol.ByteCount(float32(c.ackedBytesCount) * c.alpha() * float32(protocol.DefaultTCPMSS) / float32(c.estimatedTCPcongestionWindow))
  164. c.ackedBytesCount = 0
  165. // We have a new cubic congestion window.
  166. c.lastTargetCongestionWindow = targetCongestionWindow
  167. // Compute target congestion_window based on cubic target and estimated TCP
  168. // congestion_window, use highest (fastest).
  169. if targetCongestionWindow < c.estimatedTCPcongestionWindow {
  170. targetCongestionWindow = c.estimatedTCPcongestionWindow
  171. }
  172. return targetCongestionWindow
  173. }
  174. // SetNumConnections sets the number of emulated connections
  175. func (c *Cubic) SetNumConnections(n int) {
  176. c.numConnections = n
  177. }