WebRTC基於延遲的碼率控制-Trendline

一、Trendline算法分析

1.1 測試用例

爲了瞭解google trendline估計分析。我們以webrtc的trendline_estimator_unittest.cc文件爲入口,可以更好的瞭解其對應功能和入參含義。

trendline estimator的測試用例:

// 網絡正常情況,發送和接收端分別對應的發送和接受時間一致
TEST_F(TrendlineEstimatorTest, Normal)
// 網絡過載情況,接收端以10%的延後時間接收數據
TEST_F(TrendlineEstimatorTest, Overusing)
// 網絡未飽和,接收端以15%的提前時間接收數據
TEST_F(TrendlineEstimatorTest, Underusing)
// 小包發送,網絡未飽和,測試用例中測試包大小爲100字節,小於設定的1200字節包大小
TEST_F(TrendlineEstimatorTest, IncludesSmallPacketsByDefault)

測試用例函數體,通過測試函數體,我們可以瞭解到對於TrendlineEstimator需要的入參含義。

  void RunTestUntilStateChange() {
    RTC_DCHECK_EQ(send_times.size(), kPacketCount);
    RTC_DCHECK_EQ(recv_times.size(), kPacketCount);
    RTC_DCHECK_EQ(packet_sizes.size(), kPacketCount);
    RTC_DCHECK_GE(count, 1);
    RTC_DCHECK_LT(count, kPacketCount);

    auto initial_state = estimator.State();
    for (; count < kPacketCount; count++) {
      // 計算前後兩次接收的間隔時間 ms
      double recv_delta = recv_times[count] - recv_times[count - 1];
      // 計算前後兩次發送的間隔時間 ms
      double send_delta = send_times[count] - send_times[count - 1];
      /* 傳參:
      recv_delta,與上個包的接收間隔時間,ms
      send_delta,與上個包的發送間隔時間,ms
      send_times[count],當前包的發送時間,ms
      recv_times[count],當前包的接收時間,ms
      packet_sizes[count],當前接收包的大小,字節
      */
      estimator.Update(recv_delta, send_delta, send_times[count],
                       recv_times[count], packet_sizes[count], true);
      if (estimator.State() != initial_state) {
        // 預測器發現網絡狀態發生變化,退出測試
        return;
      }
    }
  }

1.2 算法分析

從測試用例中,我們發現,對於Trendline,提供了個重要的外部接口:

/**
 * 更新輸入採樣,
 * recv_delta_ms,與上個包組的接收間隔時間,ms(接收端計算,需要回傳)
 * send_delta_ms,與上個包組的發送間隔時間,ms (發送端計算)
 * send_time_ms, 當前包組的發送時間,ms      (發送端計算)
 * arrival_time_ms,當前包組的接收時間,ms    (接收端計算,需要回傳)
 * packet_size,當前接收包組的大小,字節
 * calculated_deltas,測試用例中傳遞true
 */
void TrendlineEstimator::Update(double recv_delta_ms,
                                double send_delta_ms,
                                int64_t send_time_ms,
                                int64_t arrival_time_ms,
                                size_t packet_size,
                                bool calculated_deltas);

enum class BandwidthUsage {
  kBwNormal = 0, //正常
  kBwUnderusing = 1,//未飽和
  kBwOverusing = 2,//過載
  kLast
};
/**
 * 獲取當前網絡狀況
 */
BandwidthUsage State() const override;

1.2.1 更新採樣數據

void TrendlineEstimator::Update(double recv_delta_ms,
                                double send_delta_ms,
                                int64_t send_time_ms,
                                int64_t arrival_time_ms,
                                size_t packet_size,
                                bool calculated_deltas) {
  if (calculated_deltas) {
    UpdateTrendline(recv_delta_ms, send_delta_ms, send_time_ms, arrival_time_ms,
                    packet_size);
  }
  if (network_state_predictor_) {
    hypothesis_predicted_ = network_state_predictor_->Update(
        send_time_ms, arrival_time_ms, hypothesis_);
  }
}
void TrendlineEstimator::UpdateTrendline(double recv_delta_ms,
                                         double send_delta_ms,
                                         int64_t send_time_ms,
                                         int64_t arrival_time_ms,
                                         size_t packet_size) {
  // 接收延遲,值爲接收間隔與發送間隔的差值
  const double delta_ms = recv_delta_ms - send_delta_ms;
  // 延遲統計加1
  ++num_of_deltas_;
  num_of_deltas_ = std::min(num_of_deltas_, kDeltaCounterMax);//最大值爲1000
  // 第一個包的接收時間
  if (first_arrival_time_ms_ == -1)
    first_arrival_time_ms_ = arrival_time_ms;

  // Exponential backoff filter.
  // 指數退避
  // 延遲的累計
  accumulated_delay_ += delta_ms;
  BWE_TEST_LOGGING_PLOT(1, "accumulated_delay_ms", arrival_time_ms,
                        accumulated_delay_);
  // 平滑後的延遲累加值,平滑因子爲0.9
  smoothed_delay_ = smoothing_coef_ * smoothed_delay_ +
                    (1 - smoothing_coef_) * accumulated_delay_;
  BWE_TEST_LOGGING_PLOT(1, "smoothed_delay_ms", arrival_time_ms,
                        smoothed_delay_);

  // Maintain packet window
  // arrival_time_ms - first_arrival_time_ms_: 當前包與第一個包的相對時間,可以理解爲到達時間
  // 將統計採樣數據(到達時間,平滑後延遲累計時間,當前延遲累計時間)放入隊列中保存
  delay_hist_.emplace_back(
      static_cast<double>(arrival_time_ms - first_arrival_time_ms_),
      smoothed_delay_, accumulated_delay_);
  // 根據到達時間從低到高排序
  if (settings_.enable_sort) {
    for (size_t i = delay_hist_.size() - 1;
         i > 0 &&
         delay_hist_[i].arrival_time_ms < delay_hist_[i - 1].arrival_time_ms;
         --i) {
      std::swap(delay_hist_[i], delay_hist_[i - 1]);
    }
  }
  // 限制採樣窗口大小
  if (delay_hist_.size() > settings_.window_size)
    delay_hist_.pop_front();

  // Simple linear regression.
  // 簡單線性迴歸
  double trend = prev_trend_;
  // 當隊列delay_hist_大小等於設定的窗口大小時,開始進行延遲變化趨勢計算,得到直線斜率
  if (delay_hist_.size() == settings_.window_size) {
    // Update trend_ if it is possible to fit a line to the data. The delay
    // trend can be seen as an estimate of (send_rate - capacity)/capacity.
    // 0 < trend < 1   ->  the delay increases, queues are filling up
    //   trend == 0    ->  the delay does not change
    //   trend < 0     ->  the delay decreases, queues are being emptied
    // 斜率可以理解爲:發送速率與實際帶寬的增長量
    trend = LinearFitSlope(delay_hist_).value_or(trend);
    if (settings_.enable_cap) {
      absl::optional<double> cap = ComputeSlopeCap(delay_hist_, settings_);
      // We only use the cap to filter out overuse detections, not
      // to detect additional underuses.
      if (trend >= 0 && cap.has_value() && trend > cap.value()) {
        trend = cap.value();
      }
    }
  }
  BWE_TEST_LOGGING_PLOT(1, "trendline_slope", arrival_time_ms, trend);

  // 得到延遲梯度後,進行網絡檢測
  Detect(trend, send_delta_ms, arrival_time_ms);
}

accumulated_delay_:當前延遲累計時間

smoothed_delay_:平滑後的遲累計時間

struct PacketTiming {
    double arrival_time_ms;//到達時間
    double smoothed_delay_ms;//平滑後延遲累計時間
    double raw_delay_ms;//當前延遲累計時間
  };
absl::optional<double> LinearFitSlope(
    const std::deque<TrendlineEstimator::PacketTiming>& packets) {
  RTC_DCHECK(packets.size() >= 2);//統計採樣至少爲2個
  // Compute the "center of mass".
  // 線性迴歸公式:y = k * x + b
  // x : 包組達到時間
  // y : 平滑累計延遲
  double sum_x = 0;
  double sum_y = 0;
  for (const auto& packet : packets) {
    sum_x += packet.arrival_time_ms;
    sum_y += packet.smoothed_delay_ms;
  }
  double x_avg = sum_x / packets.size();// x均值
  double y_avg = sum_y / packets.size();// y均值
  // Compute the slope k = \sum (x_i-x_avg)(y_i-y_avg) / \sum (x_i-x_avg)^2
  // 計算斜率 k = \sum (x_i-x_avg)(y_i-y_avg) / \sum (x_i-x_avg)^2
  double numerator = 0;
  double denominator = 0;
  for (const auto& packet : packets) {
    double x = packet.arrival_time_ms;
    double y = packet.smoothed_delay_ms;
    numerator += (x - x_avg) * (y - y_avg);
    denominator += (x - x_avg) * (x - x_avg);
  }
  if (denominator == 0)
    return absl::nullopt;
  //返回延遲變化趨勢直線斜率k,delay的斜率;
  //>0:網絡發生擁塞;
  //=0:發送速率正好符合當前帶寬;
  //<=網絡未飽和;
  return numerator / denominator;
}

1.2.2 監測網絡狀況

/**
 * trend : 延遲變化趨勢直線斜率
 * ts_delta : 與上個包組的發送間隔時間
 * now_ms : 包組到達時間
 */
void TrendlineEstimator::Detect(double trend, double ts_delta, int64_t now_ms) {
  if (num_of_deltas_ < 2) {
    //統計採樣至少爲2個
    hypothesis_ = BandwidthUsage::kBwNormal;
    return;
  }
  // kMinNumDeltas :    60
  // trend :         傳入的斜率值
  // threshold_gain_ : 4.0
  const double modified_trend =
      std::min(num_of_deltas_, kMinNumDeltas) * trend * threshold_gain_;
  prev_modified_trend_ = modified_trend;
  BWE_TEST_LOGGING_PLOT(1, "T", now_ms, modified_trend);
  BWE_TEST_LOGGING_PLOT(1, "threshold", now_ms, threshold_);
  // threshold_初始值爲12.5
  /**
   * 與一個動態閾值threshold_作對比,從而得到網絡狀態
   * modified_trend > threshold_,表示overuse狀態
   * modified_trend < -threshold_,表示underuse狀態
   * -threshold_ <= modified_trend <= threshold_,表示normal狀態
   */
  if (modified_trend > threshold_) {
    if (time_over_using_ == -1) {
      // Initialize the timer. Assume that we've been
      // over-using half of the time since the previous
      // sample.
      time_over_using_ = ts_delta / 2;
    } else {
      // Increment timer
      time_over_using_ += ts_delta;
    }
    overuse_counter_++;
    if (time_over_using_ > overusing_time_threshold_ && overuse_counter_ > 1) {
      if (trend >= prev_trend_) {
        time_over_using_ = 0;
        overuse_counter_ = 0;
        hypothesis_ = BandwidthUsage::kBwOverusing;
      }
    }
  } else if (modified_trend < -threshold_) {
    time_over_using_ = -1;
    overuse_counter_ = 0;
    hypothesis_ = BandwidthUsage::kBwUnderusing;
  } else {
    time_over_using_ = -1;
    overuse_counter_ = 0;
    hypothesis_ = BandwidthUsage::kBwNormal;
  }
  prev_trend_ = trend;
  // 閾值threshold_是動態調整的,代碼實現在UpdateThreshold函數中
  UpdateThreshold(modified_trend, now_ms);
}
/**
 閾值自適應調整爲了改變算法對延遲梯度的敏感度。主要有以下兩方面原因:
 1)延遲梯度是變化的,有時很大,有時很小,如果閾值是固定的,對於延遲梯度來說可能太多或者太小,這樣就會出現不夠敏感,
 無法檢測到網絡擁塞,或者過於敏感,導致一直檢測爲網絡擁塞;
 2)固定的閾值會導致與TCP(採用基於丟包的擁塞控制)的競爭中被餓死。
 */
void TrendlineEstimator::UpdateThreshold(double modified_trend,
                                         int64_t now_ms) {
  if (last_update_ms_ == -1)
    last_update_ms_ = now_ms;

  // kMaxAdaptOffsetMs : 15.0
  if (fabs(modified_trend) > threshold_ + kMaxAdaptOffsetMs) {
    // Avoid adapting the threshold to big latency spikes, caused e.g.,
    // by a sudden capacity drop.
    last_update_ms_ = now_ms;
    return;
  }

  // k_down_ : 0.039
  // k_up_   : 0.0087
  // kγ(ti)值:kd,ku
  const double k = fabs(modified_trend) < threshold_ ? k_down_ : k_up_;
  const int64_t kMaxTimeDeltaMs = 100;
  // 距離上一次閾值更新經過的時間∆T
  int64_t time_delta_ms = std::min(now_ms - last_update_ms_, kMaxTimeDeltaMs);
  threshold_ += k * (fabs(modified_trend) - threshold_) * time_delta_ms;
  threshold_ = rtc::SafeClamp(threshold_, 6.f, 600.f);
  last_update_ms_ = now_ms;
}

二、包組延遲

包組延遲是通過InterArrival類來統計計算的

2.1 測試用例

爲了解InterArrival的用法和傳遞參數的含義,分析其測試代碼modules/remote_bitrate_estimator/inter_arrival_unittest.cc。

TEST_F(InterArrivalTest, OutOfOrderPacket) {
  // G1
  int64_t arrival_time = 17;//數據到達時間,ms
  int64_t timestamp = 0;//數據包的時間戳,發送時間,us。
  ExpectFalse(timestamp, arrival_time, 1);
  int64_t g1_timestamp = timestamp;
  int64_t g1_arrival_time = arrival_time;

  // G2
  //同一時刻發送11個數據包,當前包組比起上個包組發送時間超過5ms,到達時間相差11毫秒
  //各個包發送時間相差20us,相差很小
  //各個包到達時間相差6ms
  arrival_time += 11;//數據包到達時間,ms
  timestamp += kTriggerNewGroupUs;//數據包的時間戳,us。比G1包組大5ms,區分兩個包組
  ExpectFalse(timestamp, 28, 2);
  for (int i = 0; i < 10; ++i) {
    arrival_time += kBurstThresholdMs + 1;
    timestamp += kMinStep;//20us
    ExpectFalse(timestamp, arrival_time, 1);
  }
  int64_t g2_timestamp = timestamp;//賦值最後一個數據包的發送時間戳
  int64_t g2_arrival_time = arrival_time;//賦值最後一個數據包的接收時間戳

  // This packet is out of order and should be dropped.
  //第1組數據包未按照順序到達,需要丟棄
  arrival_time = 281;
  ExpectFalse(g1_timestamp, arrival_time, 100);

  // G3
  //第3組數據包到達,到達時間爲500ms,當前包組比起上個包組發送時間超過5ms
  arrival_time = 500;
  timestamp = 2 * kTriggerNewGroupUs;
  ExpectTrue(timestamp, arrival_time, 100,
             // Delta G2-G1
             // 第2與第1組數據包組的發送和到達時間差值
             g2_timestamp - g1_timestamp, g2_arrival_time - g1_arrival_time,
             // 第2與第1組數據包組數據大小的差值
             (2 + 10) - 1, 0);
}

// Test that neither inter_arrival instance complete the timestamp group from
// the given data.
void ExpectFalse(int64_t timestamp_us,
                 int64_t arrival_time_ms,
                 size_t packet_size) {
  InternalExpectFalse(inter_arrival_rtp_.get(),
                      MakeRtpTimestamp(timestamp_us), arrival_time_ms,
                      packet_size);
  InternalExpectFalse(inter_arrival_ast_.get(), MakeAbsSendTime(timestamp_us),
                      arrival_time_ms, packet_size);
}

// Test that both inter_arrival instances complete the timestamp group from
// the given data and that all returned deltas are as expected (except
// timestamp delta, which is rounded from us to different ranges and must
// match within an interval, given in |timestamp_near].
void ExpectTrue(int64_t timestamp_us,
                int64_t arrival_time_ms,
                size_t packet_size,
                int64_t expected_timestamp_delta_us,
                int64_t expected_arrival_time_delta_ms,
                int expected_packet_size_delta,
                uint32_t timestamp_near) {
    //us轉換成rtp形式的時間戳
    InternalExpectTrue(inter_arrival_rtp_.get(), MakeRtpTimestamp(timestamp_us),
                       arrival_time_ms, packet_size,
                       MakeRtpTimestamp(expected_timestamp_delta_us),
                       expected_arrival_time_delta_ms,
                       expected_packet_size_delta, timestamp_near);
    //參考AbsoluteSendTime::MsTo24Bits(now_ms)
    InternalExpectTrue(inter_arrival_ast_.get(), MakeAbsSendTime(timestamp_us),
                       arrival_time_ms, packet_size,
                       MakeAbsSendTime(expected_timestamp_delta_us),
                       expected_arrival_time_delta_ms,
                       expected_packet_size_delta, timestamp_near << 8);
}

/**
   timestamp:發送時間戳
   arrival_time_ms:到達時間
   packet_size:數據包大小
*/
static void InternalExpectFalse(InterArrival* inter_arrival,
                                uint32_t timestamp,
                                int64_t arrival_time_ms,
                                size_t packet_size) {
    uint32_t dummy_timestamp = 101;
    int64_t dummy_arrival_time_ms = 303;
    int dummy_packet_size = 909;
    bool computed = inter_arrival->ComputeDeltas(
        timestamp, arrival_time_ms, arrival_time_ms, packet_size,
        &dummy_timestamp, &dummy_arrival_time_ms, &dummy_packet_size);
    EXPECT_EQ(computed, false);
    EXPECT_EQ(101ul, dummy_timestamp);
    EXPECT_EQ(303, dummy_arrival_time_ms);
    EXPECT_EQ(909, dummy_packet_size);
}

/**
timestamp:發送時間戳
arrival_time_ms:到達時間戳
packet_size:數據包大小
expected_timestamp_delta:期望發送時間戳差值
expected_arrival_time_delta_ms:期望到達時間戳差值
expected_packet_size_delta:期望數據包大小差值
timestamp_near:爲0
*/
static void InternalExpectTrue(InterArrival* inter_arrival,
                               uint32_t timestamp,
                               int64_t arrival_time_ms,
                               size_t packet_size,
                               uint32_t expected_timestamp_delta,
                               int64_t expected_arrival_time_delta_ms,
                               int expected_packet_size_delta,
                               uint32_t timestamp_near) {
    uint32_t delta_timestamp = 101;//發送時間差值
    int64_t delta_arrival_time_ms = 303;//到達時間差值
    int delta_packet_size = 909;//數據包大小差值
    bool computed = inter_arrival->ComputeDeltas(
        timestamp, arrival_time_ms, arrival_time_ms, packet_size,
        &delta_timestamp, &delta_arrival_time_ms, &delta_packet_size);
    EXPECT_EQ(true, computed);
    EXPECT_NEAR(expected_timestamp_delta, delta_timestamp, timestamp_near);
    EXPECT_EQ(expected_arrival_time_delta_ms, delta_arrival_time_ms);
    EXPECT_EQ(expected_packet_size_delta, delta_packet_size);
}

2.2 InterArrival分析

InterArrival根據輸入的數據包發送、接收時間和大小。計算包組的發送、接收和大小的差值。內部自動區分包組信息。

數據輸入接口:InterArrival::ComputeDeltas

/**
 new group的判斷,以及group時間的更新和差值計算。
 
 timestamp:數據包發送時間戳,單位不明確
 arrival_time_ms:數據包到達時間,ms
 system_time_ms:系統時間,測試用例中與arrival_time_ms相等
 packet_size:數據包大小
 timestamp_delta:發送時間戳差值,輸出
 arrival_time_delta_ms:到達時間差值,ms,輸出
 packet_size_delta:數據包差值
 */
bool InterArrival::ComputeDeltas(uint32_t timestamp,
                                 int64_t arrival_time_ms,
                                 int64_t system_time_ms,
                                 size_t packet_size,
                                 uint32_t* timestamp_delta,
                                 int64_t* arrival_time_delta_ms,
                                 int* packet_size_delta) {
  assert(timestamp_delta != NULL);
  assert(arrival_time_delta_ms != NULL);
  assert(packet_size_delta != NULL);
  bool calculated_deltas = false;
  if (current_timestamp_group_.IsFirstPacket()) {
    // We don't have enough data to update the filter, so we store it until we
    // have two frames of data to process.
    current_timestamp_group_.timestamp = timestamp;
    current_timestamp_group_.first_timestamp = timestamp;
    current_timestamp_group_.first_arrival_ms = arrival_time_ms;
  } else if (!PacketInOrder(timestamp)) {
    return false;
  } else if (NewTimestampGroup(arrival_time_ms, timestamp)) {
    // First packet of a later frame, the previous frame sample is ready.
    // 需要新開一個group,此時計算prev_group和current_group的差值。
    if (prev_timestamp_group_.complete_time_ms >= 0) {
      *timestamp_delta =
          current_timestamp_group_.timestamp - prev_timestamp_group_.timestamp;
      *arrival_time_delta_ms = current_timestamp_group_.complete_time_ms -
                               prev_timestamp_group_.complete_time_ms;
      // Check system time differences to see if we have an unproportional jump
      // in arrival time. In that case reset the inter-arrival computations.
      int64_t system_time_delta_ms =
          current_timestamp_group_.last_system_time_ms -
          prev_timestamp_group_.last_system_time_ms;
      if (*arrival_time_delta_ms - system_time_delta_ms >=
          kArrivalTimeOffsetThresholdMs) {
        RTC_LOG(LS_WARNING)
            << "The arrival time clock offset has changed (diff = "
            << *arrival_time_delta_ms - system_time_delta_ms
            << " ms), resetting.";
        Reset();
        return false;
      }
      if (*arrival_time_delta_ms < 0) {
        // The group of packets has been reordered since receiving its local
        // arrival timestamp.
        ++num_consecutive_reordered_packets_;
        if (num_consecutive_reordered_packets_ >= kReorderedResetThreshold) {
          RTC_LOG(LS_WARNING)
              << "Packets are being reordered on the path from the "
                 "socket to the bandwidth estimator. Ignoring this "
                 "packet for bandwidth estimation, resetting.";
          Reset();
        }
        return false;
      } else {
        num_consecutive_reordered_packets_ = 0;
      }
      assert(*arrival_time_delta_ms >= 0);
      *packet_size_delta = static_cast<int>(current_timestamp_group_.size) -
                           static_cast<int>(prev_timestamp_group_.size);
      calculated_deltas = true;
    }
    prev_timestamp_group_ = current_timestamp_group_;
    // The new timestamp is now the current frame.
    current_timestamp_group_.first_timestamp = timestamp;
    current_timestamp_group_.timestamp = timestamp;
    current_timestamp_group_.first_arrival_ms = arrival_time_ms;
    current_timestamp_group_.size = 0;
  } else {
    current_timestamp_group_.timestamp =
        LatestTimestamp(current_timestamp_group_.timestamp, timestamp);
  }
  // Accumulate the frame size.
  current_timestamp_group_.size += packet_size;
  current_timestamp_group_.complete_time_ms = arrival_time_ms;
  current_timestamp_group_.last_system_time_ms = system_time_ms;

  return calculated_deltas;
}

InterArrival::NewTimestampGroup

// Assumes that |timestamp| is not reordered compared to
// |current_timestamp_group_|.
/**
 WebRTC的實現中, 並不是單純的測量單個數據包彼此之間的延遲梯度,
 而是將數據包按發送時間間隔和到達時間間隔分組,計算組間的整體延遲梯度。
 分組規則是:
 1、發送時間間隔小於5ms的數據包被歸爲一組
由於WebRTC的發送端實現了一個平滑發送模塊,該模塊的發送間隔是5ms發送一批數據包。
 2、到達時間間隔小於5ms的數據包被歸爲一組
由於在wifi網絡下,某些wifi設備的轉發模式是,在某個固定時間片內纔有機會轉發數據包,
這個時間片的間隔可能長達100ms, 造成的結果是100ms的數據包堆積,並在發送時形成burst,
這個busrt內的所有數據包就會被視爲一組。
 */
bool InterArrival::NewTimestampGroup(int64_t arrival_time_ms,
                                     uint32_t timestamp) const {
  if (current_timestamp_group_.IsFirstPacket()) {
    return false;
  } else if (BelongsToBurst(arrival_time_ms, timestamp)) {
    return false;
  } else {
    //根根據發送時間戳是否超過5ms判斷是否爲一個新的group
    uint32_t timestamp_diff =
        timestamp - current_timestamp_group_.first_timestamp;
    return timestamp_diff > kTimestampGroupLengthTicks;
  }
}

InterArrival::BelongsToBurst

/**
到達時間間隔小於5ms的數據包被歸爲一組
由於在wifi網絡下,某些wifi設備的轉發模式是,在某個固定時間片內纔有機會轉發數據包,
這個時間片的間隔可能長達100ms, 造成的結果是100ms的數據包堆積,並在發送時形成burst,
這個busrt內的所有數據包就會被視爲一組。
 */
bool InterArrival::BelongsToBurst(int64_t arrival_time_ms,
                                  uint32_t timestamp) const {
  if (!burst_grouping_) {
    return false;
  }
  assert(current_timestamp_group_.complete_time_ms >= 0);
  int64_t arrival_time_delta_ms =
      arrival_time_ms - current_timestamp_group_.complete_time_ms;
  uint32_t timestamp_diff = timestamp - current_timestamp_group_.timestamp;
  int64_t ts_delta_ms = timestamp_to_ms_coeff_ * timestamp_diff + 0.5;
  if (ts_delta_ms == 0)
    return true;
  int propagation_delta_ms = arrival_time_delta_ms - ts_delta_ms;
  if (propagation_delta_ms < 0 &&
      arrival_time_delta_ms <= kBurstDeltaThresholdMs &&
      arrival_time_ms - current_timestamp_group_.first_arrival_ms <
          kMaxBurstDurationMs)
    return true;
  return false;
}

三、基於延遲的碼率控制

TrendlineEstimator對象是作爲成員變量存在於DelayBasedBwe中,Update接口調用的代碼入口如下:

void DelayBasedBwe::IncomingPacketFeedback(const PacketResult& packet_feedback,
                                           Timestamp at_time) {
  uint32_t send_time_24bits =
      static_cast<uint32_t>(
          ((static_cast<uint64_t>(packet_feedback.sent_packet.send_time.ms())
            << kAbsSendTimeFraction) +
           500) /
          1000) &
      0x00FFFFFF;
  // Shift up send time to use the full 32 bits that inter_arrival works with,
  // so wrapping works properly.
  uint32_t timestamp = send_time_24bits << kAbsSendTimeInterArrivalUpshift;

  uint32_t timestamp_delta = 0;
  int64_t recv_delta_ms = 0;
  int size_delta = 0;
  bool calculated_deltas = inter_arrival_for_packet->ComputeDeltas(
      timestamp, packet_feedback.receive_time.ms(), at_time.ms(),
      packet_size.bytes(), &timestamp_delta, &recv_delta_ms, &size_delta);
  double send_delta_ms = (1000.0 * timestamp_delta) / (1 << kInterArrivalShift);
  delay_detector_for_packet->Update(recv_delta_ms, send_delta_ms,
                                    packet_feedback.sent_packet.send_time.ms(),
                                    packet_feedback.receive_time.ms(),
                                    packet_size.bytes(), calculated_deltas);
}

3.1 測試用例

具體瞭解碼率控制的整體流程,可以參考DelayBasedBwe的測試用例delay_based_bwe_unittest.cc進行分析

數據到達延遲,高碼率:

TEST_F(DelayBasedBweTest, ProbeDetectionSlowerArrivalHighBitrate) {
  int64_t now_ms = clock_.TimeInMilliseconds();
  // Burst sent at 8 * 1000 / 1 = 8000 kbps.
  // Arriving at 8 * 1000 / 2 = 4000 kbps.
  // Since the receive rate is significantly below the send rate, we expect to
  // use 95% of the estimated capacity.
  int64_t send_time_ms = 0;
  for (int i = 0; i < kNumProbesCluster1; ++i) {
    clock_.AdvanceTimeMilliseconds(2);//調整時鐘,數值爲2ms
    send_time_ms += 1;//每隔1ms發送一個數據包
    now_ms = clock_.TimeInMilliseconds();//每隔2ms接收到一個數據包
    // const PacedPacketInfo kPacingInfo1(1, 8, 4000);
    IncomingFeedback(now_ms, send_time_ms, 1000, kPacingInfo1);
  }

  EXPECT_TRUE(bitrate_observer_.updated());
  EXPECT_NEAR(bitrate_observer_.latest_bitrate(),
              kTargetUtilizationFraction * 4000000u, 10000u);
}

/**
 arrival_time_ms : 到達時間
 send_time_ms : 發送時間
 payload_size : 數據包大小
 pacing_info :  pacer sender相關信息,暫時不分析
 */
void DelayBasedBweTest::IncomingFeedback(int64_t arrival_time_ms,
                                         int64_t send_time_ms,
                                         size_t payload_size,
                                         const PacedPacketInfo& pacing_info) {
  //檢測到達時間戳是否大於0
  //arrival_time_offset_ms_初始值爲0,暫不分析
  RTC_CHECK_GE(arrival_time_ms + arrival_time_offset_ms_, 0);
  PacketResult packet; //用於包含數據包的信息
  //賦值接收時間
  packet.receive_time =
      Timestamp::Millis(arrival_time_ms + arrival_time_offset_ms_);
  //賦值發送時間
  packet.sent_packet.send_time = Timestamp::Millis(send_time_ms);
  //賦值數據包大小
  packet.sent_packet.size = DataSize::Bytes(payload_size);
  packet.sent_packet.pacing_info = pacing_info;
  if (packet.sent_packet.pacing_info.probe_cluster_id !=
      PacedPacketInfo::kNotAProbe)
    probe_bitrate_estimator_->HandleProbeAndEstimateBitrate(packet);

  TransportPacketsFeedback msg;
  //反饋包的接收時間,本地時間戳
  msg.feedback_time = Timestamp::Millis(clock_.TimeInMilliseconds());
  //將數據包的信息放入隊列中
  msg.packet_feedbacks.push_back(packet);
  //調用ALR碼率估計器得到一個實時碼率
  acknowledged_bitrate_estimator_->IncomingPacketFeedbackVector(
      msg.SortedByReceiveTime());
  //bitrate_estimator_即爲DelayBasedBwe。輸入反饋包的信息、當前真實的發送碼率。
  DelayBasedBwe::Result result =
      bitrate_estimator_->IncomingPacketFeedbackVector(
          msg, acknowledged_bitrate_estimator_->bitrate(),
          probe_bitrate_estimator_->FetchAndResetLastEstimatedBitrate(),
          /*network_estimate*/ absl::nullopt, /*in_alr*/ false);
  if (result.updated) {
    bitrate_observer_.OnReceiveBitrateChanged(result.target_bitrate.bps());
  }
}

3.2 DelayBasedBwe分析

DelayBasedBwe::IncomingPacketFeedbackVector:信息數據的入口:

/**
 msg : 包含數據包信息
 acked_bitrate : 當前實時發送碼率
 probe_bitrate : 碼率,待分析
 network_estimate : 值爲null
 in_alr : false
 */
DelayBasedBwe::Result DelayBasedBwe::IncomingPacketFeedbackVector(
    const TransportPacketsFeedback& msg,
    absl::optional<DataRate> acked_bitrate,
    absl::optional<DataRate> probe_bitrate,
    absl::optional<NetworkStateEstimate> network_estimate,
    bool in_alr) {
  RTC_DCHECK_RUNS_SERIALIZED(&network_race_);

  //根據接收時間從低到高排序
  auto packet_feedback_vector = msg.SortedByReceiveTime();
  // TODO(holmer): An empty feedback vector here likely means that
  // all acks were too late and that the send time history had
  // timed out. We should reduce the rate when this occurs.
  if (packet_feedback_vector.empty()) {
    RTC_LOG(LS_WARNING) << "Very late feedback received.";
    return DelayBasedBwe::Result();
  }

  if (!uma_recorded_) {
    RTC_HISTOGRAM_ENUMERATION(kBweTypeHistogram,
                              BweNames::kSendSideTransportSeqNum,
                              BweNames::kBweNamesMax);
    uma_recorded_ = true;
  }
  bool delayed_feedback = true;
  bool recovered_from_overuse = false;
  //保存當前的帶寬使用狀況
  BandwidthUsage prev_detector_state = active_delay_detector_->State();
  for (const auto& packet_feedback : packet_feedback_vector) {
    delayed_feedback = false;
    //輸入每個反饋信息
    IncomingPacketFeedback(packet_feedback, msg.feedback_time);
    if (prev_detector_state == BandwidthUsage::kBwUnderusing &&
        active_delay_detector_->State() == BandwidthUsage::kBwNormal) {
      //帶寬狀態從“未滿足”切換到“正常”時,需要從“過載”狀態恢復
      recovered_from_overuse = true;
    }
    prev_detector_state = active_delay_detector_->State();
  }

  if (delayed_feedback) {
    // TODO(bugs.webrtc.org/10125): Design a better mechanism to safe-guard
    // against building very large network queues.
    return Result();
  }
  rate_control_.SetInApplicationLimitedRegion(in_alr);
  rate_control_.SetNetworkStateEstimate(network_estimate);
  return MaybeUpdateEstimate(acked_bitrate, probe_bitrate,
                             std::move(network_estimate),
                             recovered_from_overuse, in_alr, msg.feedback_time);
}

DelayBasedBwe::IncomingPacketFeedback

/**
 packet_feedback:反饋信息,包含數據包接收時間、發送時間、數據包的大小
 at_time:反饋包的接收時間,本地時間戳
 */
void DelayBasedBwe::IncomingPacketFeedback(const PacketResult& packet_feedback,
                                           Timestamp at_time) {
  // Reset if the stream has timed out.
  // 根據反饋信息的接收時間,檢查流是否超時,閾值2s
  if (last_seen_packet_.IsInfinite() ||
      at_time - last_seen_packet_ > kStreamTimeOut) {
    video_inter_arrival_.reset(
        new InterArrival(kTimestampGroupTicks, kTimestampToMs, true));
    video_delay_detector_.reset(
        new TrendlineEstimator(key_value_config_, network_state_predictor_));
    audio_inter_arrival_.reset(
        new InterArrival(kTimestampGroupTicks, kTimestampToMs, true));
    audio_delay_detector_.reset(
        new TrendlineEstimator(key_value_config_, network_state_predictor_));
    active_delay_detector_ = video_delay_detector_.get();
  }
  last_seen_packet_ = at_time;

  // Ignore "small" packets if many/most packets in the call are "large". The
  // packet size may have a significant effect on the propagation delay,
  // especially at low bandwidths. Variations in packet size will then show up
  // as noise in the delay measurement. By default, we include all packets.
  // 數據包大小
  DataSize packet_size = packet_feedback.sent_packet.size;
  if (!ignore_small_.small_threshold.IsZero()) {
    double is_large =
        static_cast<double>(packet_size >= ignore_small_.large_threshold);
    fraction_large_packets_ +=
        ignore_small_.smoothing_factor * (is_large - fraction_large_packets_);
    if (packet_size <= ignore_small_.small_threshold &&
        fraction_large_packets_ >= ignore_small_.fraction_large) {
      return;
    }
  }

  // As an alternative to ignoring small packets, we can separate audio and
  // video packets for overuse detection.
  InterArrival* inter_arrival_for_packet = video_inter_arrival_.get();
  DelayIncreaseDetectorInterface* delay_detector_for_packet =
      video_delay_detector_.get();
  if (separate_audio_.enabled) {
    if (packet_feedback.sent_packet.audio) {
      inter_arrival_for_packet = audio_inter_arrival_.get();
      delay_detector_for_packet = audio_delay_detector_.get();
      audio_packets_since_last_video_++;
      if (audio_packets_since_last_video_ > separate_audio_.packet_threshold &&
          packet_feedback.receive_time - last_video_packet_recv_time_ >
              separate_audio_.time_threshold) {
        active_delay_detector_ = audio_delay_detector_.get();
      }
    } else {
      audio_packets_since_last_video_ = 0;
      last_video_packet_recv_time_ =
          std::max(last_video_packet_recv_time_, packet_feedback.receive_time);
      active_delay_detector_ = video_delay_detector_.get();
    }
  }

  //發送時間,轉換成inter_arrival需要的時間格式,後面對inter_arrival再進行分析
  uint32_t send_time_24bits =
      static_cast<uint32_t>(
          ((static_cast<uint64_t>(packet_feedback.sent_packet.send_time.ms())
            << kAbsSendTimeFraction) +
           500) /
          1000) &
      0x00FFFFFF;
  // Shift up send time to use the full 32 bits that inter_arrival works with,
  // so wrapping works properly.
  uint32_t timestamp = send_time_24bits << kAbsSendTimeInterArrivalUpshift;

  uint32_t timestamp_delta = 0;
  int64_t recv_delta_ms = 0; //與上個包組的接收間隔時間,ms
  int size_delta = 0;
  bool calculated_deltas = inter_arrival_for_packet->ComputeDeltas(
      timestamp, packet_feedback.receive_time.ms(), at_time.ms(),
      packet_size.bytes(), &timestamp_delta, &recv_delta_ms, &size_delta);
  //與上個包組的發送間隔時間,ms
  double send_delta_ms = (1000.0 * timestamp_delta) / (1 << kInterArrivalShift);
  delay_detector_for_packet->Update(recv_delta_ms, send_delta_ms,
                                    packet_feedback.sent_packet.send_time.ms(),//當前包組的發送時間,ms
                                    packet_feedback.receive_time.ms(),//當前包組的接收時間,ms
                                    packet_size.bytes(), calculated_deltas);
}

DelayBasedBwe::MaybeUpdateEstimate

/**
 acked_bitrate:當前實時碼率
 probe_bitrate:
 state_estimate:爲null
 recovered_from_overuse:是否從過載恢復到正常
 in_alr:爲false
 at_time:反饋包的接收時間,本地時間戳
 */
DelayBasedBwe::Result DelayBasedBwe::MaybeUpdateEstimate(
    absl::optional<DataRate> acked_bitrate,
    absl::optional<DataRate> probe_bitrate,
    absl::optional<NetworkStateEstimate> state_estimate,
    bool recovered_from_overuse,
    bool in_alr,
    Timestamp at_time) {
  Result result;

  // Currently overusing the bandwidth.
  if (active_delay_detector_->State() == BandwidthUsage::kBwOverusing) {
    //當前網絡狀態爲過載
    if (has_once_detected_overuse_ && in_alr && alr_limited_backoff_enabled_) {
      if (rate_control_.TimeToReduceFurther(at_time, prev_bitrate_)) {
        result.updated =
            UpdateEstimate(at_time, prev_bitrate_, &result.target_bitrate);
        result.backoff_in_alr = true;
      }
    } else if (acked_bitrate &&
               rate_control_.TimeToReduceFurther(at_time, *acked_bitrate)) {
      result.updated =
          UpdateEstimate(at_time, acked_bitrate, &result.target_bitrate);
    } else if (!acked_bitrate && rate_control_.ValidEstimate() &&
               rate_control_.InitialTimeToReduceFurther(at_time)) {
      // Overusing before we have a measured acknowledged bitrate. Reduce send
      // rate by 50% every 200 ms.
      // TODO(tschumim): Improve this and/or the acknowledged bitrate estimator
      // so that we (almost) always have a bitrate estimate.
      rate_control_.SetEstimate(rate_control_.LatestEstimate() / 2, at_time);
      result.updated = true;
      result.probe = false;
      result.target_bitrate = rate_control_.LatestEstimate();
    }
    has_once_detected_overuse_ = true;
  } else {
    //當前網絡狀態未過載
    if (probe_bitrate) {
      result.probe = true;
      result.updated = true;
      result.target_bitrate = *probe_bitrate;
      rate_control_.SetEstimate(*probe_bitrate, at_time);
    } else {
      result.updated =
          UpdateEstimate(at_time, acked_bitrate, &result.target_bitrate);
      result.recovered_from_overuse = recovered_from_overuse;
    }
  }
  BandwidthUsage detector_state = active_delay_detector_->State();
  if ((result.updated && prev_bitrate_ != result.target_bitrate) ||
      detector_state != prev_state_) {
    DataRate bitrate = result.updated ? result.target_bitrate : prev_bitrate_;

    BWE_TEST_LOGGING_PLOT(1, "target_bitrate_bps", at_time.ms(), bitrate.bps());

    if (event_log_) {
      event_log_->Log(std::make_unique<RtcEventBweUpdateDelayBased>(
          bitrate.bps(), detector_state));
    }

    prev_bitrate_ = bitrate;
    prev_state_ = detector_state;
  }
  return result;
}

DelayBasedBwe::UpdateEstimate

/**
 at_time:當前時間
 acked_bitrate:試試接收網速
 target_rate:用於輸出最終的帶寬
 return,帶寬是否更新
 */
bool DelayBasedBwe::UpdateEstimate(Timestamp at_time,
                                   absl::optional<DataRate> acked_bitrate,
                                   DataRate* target_rate) {
  const RateControlInput input(active_delay_detector_->State(), acked_bitrate);
  //將當前的網絡狀況信息更新入AimdRateControl
  *target_rate = rate_control_.Update(&input, at_time);
  return rate_control_.ValidEstimate();
}

3.3 AimdRateControl分析

aimd的全稱是Additive Increase Multiplicative Decrease,意思是:和式增加,積式減少。

AimdRateControl::TimeToReduceFurther

/**
 用於判斷是否該減小碼率
 */
bool AimdRateControl::TimeToReduceFurther(Timestamp at_time,
                                          DataRate estimated_throughput) const {
  //檢測時間爲一個rtt
  const TimeDelta bitrate_reduction_interval =
      rtt_.Clamped(TimeDelta::Millis(10), TimeDelta::Millis(200));
  if (at_time - time_last_bitrate_change_ >= bitrate_reduction_interval) {
    return true;
  }
  if (ValidEstimate()) {
    // TODO(terelius/holmer): Investigate consequences of increasing
    // the threshold to 0.95 * LatestEstimate().
    const DataRate threshold = 0.5 * LatestEstimate();
    return estimated_throughput < threshold;//當前網速爲上一次的一半
  }
  return false;
}

AimdRateControl::Update

/**
 input:當前的網絡狀況信息
 at_time:當前時間
 */
DataRate AimdRateControl::Update(const RateControlInput* input,
                                 Timestamp at_time) {
  RTC_CHECK(input);

  // Set the initial bit rate value to what we're receiving the first half
  // second.
  // TODO(bugs.webrtc.org/9379): The comment above doesn't match to the code.
  if (!bitrate_is_initialized_) {
    //碼率還未初始化
    const TimeDelta kInitializationTime = TimeDelta::Seconds(5);
    RTC_DCHECK_LE(kBitrateWindowMs, kInitializationTime.ms());
    if (time_first_throughput_estimate_.IsInfinite()) {
      //更新第一次時間
      if (input->estimated_throughput)
        time_first_throughput_estimate_ = at_time;
    } else if (at_time - time_first_throughput_estimate_ >
                   kInitializationTime &&
               input->estimated_throughput) {
      //經過5s時間後,更新當前碼率值
      current_bitrate_ = *input->estimated_throughput;
      bitrate_is_initialized_ = true;
    }
  }

  ChangeBitrate(*input, at_time);
  return current_bitrate_;
}

AimdRateControl::ChangeState

/**
 狀態轉移,根據得到的網絡狀態,進一步得到現在的碼率控制狀態,即保持,減少和增加。
 一共維持三個狀態,增長、保持、衰減,狀態轉換根據OveruseDetector的三個狀態(Normal, Overuse, Underuse)來進行判斷。
 當Overuse發生時,無論什麼狀態都進入衰減。
 當Underuse發生時,無論什麼狀態都進入保持狀態。
 當Normal發生時,在保持階段,將進入增長。
 */
void AimdRateControl::ChangeState(const RateControlInput& input,
                                  Timestamp at_time) {
  switch (input.bw_state) {
    case BandwidthUsage::kBwNormal:
      if (rate_control_state_ == kRcHold) {
        time_last_bitrate_change_ = at_time;
        rate_control_state_ = kRcIncrease;
      }
      break;
    case BandwidthUsage::kBwOverusing:
      if (rate_control_state_ != kRcDecrease) {
        rate_control_state_ = kRcDecrease;
      }
      break;
    case BandwidthUsage::kBwUnderusing:
      rate_control_state_ = kRcHold;
      break;
    default:
      assert(false);
  }
}

AimdRateControl::ChangeBitrate

// 根據當前的網絡狀態,更新現在的碼率控制狀態,並調整目標碼率。
void AimdRateControl::ChangeBitrate(const RateControlInput& input,
                                    Timestamp at_time) {
  absl::optional<DataRate> new_bitrate;
  DataRate estimated_throughput =
      input.estimated_throughput.value_or(latest_estimated_throughput_);
  if (input.estimated_throughput)
    latest_estimated_throughput_ = *input.estimated_throughput;

  // An over-use should always trigger us to reduce the bitrate, even though
  // we have not yet established our first estimate. By acting on the over-use,
  // we will end up with a valid estimate.
  if (!bitrate_is_initialized_ &&
      input.bw_state != BandwidthUsage::kBwOverusing)
    return;

  //根據網絡狀況,進行狀態轉移
  ChangeState(input, at_time);

  // We limit the new bitrate based on the troughput to avoid unlimited bitrate
  // increases. We allow a bit more lag at very low rates to not too easily get
  // stuck if the encoder produces uneven outputs.
  //限制碼率不要上升太快,限制在當前碼率的1.5倍
  const DataRate troughput_based_limit =
      1.5 * estimated_throughput + DataRate::KilobitsPerSec(10);

  switch (rate_control_state_) {
    case kRcHold:
      //保持狀態,不作處理
      break;

    case kRcIncrease:
      //link_capacity_.UpperBound()爲平均碼率+3倍的標準差
      //超過最大值,認爲這個均值無效,復位。
      if (estimated_throughput > link_capacity_.UpperBound())
        link_capacity_.Reset();

      // Do not increase the delay based estimate in alr since the estimator
      // will not be able to get transport feedback necessary to detect if
      // the new estimate is correct.
      // If we have previously increased above the limit (for instance due to
      // probing), we don't allow further changes.
      if (current_bitrate_ < troughput_based_limit &&
          !(send_side_ && in_alr_ && no_bitrate_increase_in_alr_)) {
        DataRate increased_bitrate = DataRate::MinusInfinity();
        if (link_capacity_.has_estimate()) {
          // The link_capacity estimate is reset if the measured throughput
          // is too far from the estimate. We can therefore assume that our
          // target rate is reasonably close to link capacity and use additive
          // increase.
          // 如果當前碼率遠超過統計的平均碼率,前面就會復位
          // 因此我們可以假定目標帶寬接近統計的平均碼率,所以此時增長需謹慎,使用加性增加。
          DataRate additive_increase =
              AdditiveRateIncrease(at_time, time_last_bitrate_change_);
          increased_bitrate = current_bitrate_ + additive_increase;
        } else {
          // If we don't have an estimate of the link capacity, use faster ramp
          // up to discover the capacity.
          // 還未統計出帶寬的上界,可以倍增碼率,加快增加
          DataRate multiplicative_increase = MultiplicativeRateIncrease(
              at_time, time_last_bitrate_change_, current_bitrate_);
          increased_bitrate = current_bitrate_ + multiplicative_increase;
        }
        new_bitrate = std::min(increased_bitrate, troughput_based_limit);
      }

      time_last_bitrate_change_ = at_time;
      break;

    case kRcDecrease: {
      DataRate decreased_bitrate = DataRate::PlusInfinity();

      // Set bit rate to something slightly lower than the measured throughput
      // to get rid of any self-induced delay.
      // beta_=0.85
      decreased_bitrate = estimated_throughput * beta_;
      if (decreased_bitrate > current_bitrate_ && !link_capacity_fix_) {
        // TODO(terelius): The link_capacity estimate may be based on old
        // throughput measurements. Relying on them may lead to unnecessary
        // BWE drops.
        if (link_capacity_.has_estimate()) {
          //採用平均碼率的85%
          decreased_bitrate = beta_ * link_capacity_.estimate();
        }
      }
      if (estimate_bounded_backoff_ && network_estimate_) {
        decreased_bitrate = std::max(
            decreased_bitrate, network_estimate_->link_capacity_lower * beta_);
      }

      // Avoid increasing the rate when over-using.
      // 新的碼率需要是當前碼率和計算後碼率中較低的值
      if (decreased_bitrate < current_bitrate_) {
        new_bitrate = decreased_bitrate;
      }

      if (bitrate_is_initialized_ && estimated_throughput < current_bitrate_) {
        if (!new_bitrate.has_value()) {
          last_decrease_ = DataRate::Zero();
        } else {
          last_decrease_ = current_bitrate_ - *new_bitrate;
        }
      }
      if (estimated_throughput < link_capacity_.LowerBound()) {
        // The current throughput is far from the estimated link capacity. Clear
        // the estimate to allow an immediate update in OnOveruseDetected.
        // 輸入碼率遠小於平均碼率,置位統計的信息
        link_capacity_.Reset();
      }

      bitrate_is_initialized_ = true;
      // 更新檢測到的碼率
      // 僅在kRcDecrease狀態時,更新link_capacity_
      link_capacity_.OnOveruseDetected(estimated_throughput);
      // Stay on hold until the pipes are cleared.
      // 狀態改爲hold
      rate_control_state_ = kRcHold;
      time_last_bitrate_change_ = at_time;
      time_last_bitrate_decrease_ = at_time;
      break;
    }
    default:
      assert(false);
  }

  //賦值當前的目標碼率
  current_bitrate_ = ClampBitrate(new_bitrate.value_or(current_bitrate_));
}

AimdRateControl::MultiplicativeRateIncrease

乘性碼率增長,根據時間差來調整係數。

//乘性碼率增長,根據時間差來調整係數。
DataRate AimdRateControl::MultiplicativeRateIncrease(
    Timestamp at_time,
    Timestamp last_time,
    DataRate current_bitrate) const {
  double alpha = 1.08;
  if (last_time.IsFinite()) {
    auto time_since_last_update = at_time - last_time;
    //使用時間差作爲係數(不高於1.0,秒爲單位),1.08作爲底數。
    alpha = pow(alpha, std::min(time_since_last_update.seconds<double>(), 1.0));
  }
  DataRate multiplicative_increase =
      std::max(current_bitrate * (alpha - 1.0), DataRate::BitsPerSec(1000));
  return multiplicative_increase;
}

AimdRateControl::AdditiveRateIncrease

加性碼率增長,根據時間差來調整係數。

/**
 獲取每秒碼率最大增加數,一個rtt時間增加一個數據包的大小
 */
double AimdRateControl::GetNearMaxIncreaseRateBpsPerSecond() const {
  RTC_DCHECK(!current_bitrate_.IsZero());
  // 默認30fps,由於每個包不超過mtu,一般也就1200,用這兩個值估計每幀包數和平均每個包的大小
  const TimeDelta kFrameInterval = TimeDelta::Seconds(1) / 30;
  DataSize frame_size = current_bitrate_ * kFrameInterval;
  const DataSize kPacketSize = DataSize::Bytes(1200);
  // 每幀包數
  double packets_per_frame = std::ceil(frame_size / kPacketSize);
  // 平均每個包的大小
  DataSize avg_packet_size = frame_size / packets_per_frame;

  // Approximate the over-use estimator delay to 100 ms.
  TimeDelta response_time = rtt_ + TimeDelta::Millis(100);
  if (in_experiment_)
    response_time = response_time * 2;
  double increase_rate_bps_per_second =
      (avg_packet_size / response_time).bps<double>();
  double kMinIncreaseRateBpsPerSecond = 4000;
  return std::max(kMinIncreaseRateBpsPerSecond, increase_rate_bps_per_second);
}

//加性碼率增長,根據時間差來調整係數。
DataRate AimdRateControl::AdditiveRateIncrease(Timestamp at_time,
                                               Timestamp last_time) const {
  double time_period_seconds = (at_time - last_time).seconds<double>();
  //GetNearMaxIncreaseRateBpsPerSecond得到接近
  double data_rate_increase_bps =
      GetNearMaxIncreaseRateBpsPerSecond() * time_period_seconds;
  return DataRate::BitsPerSec(data_rate_increase_bps);
}

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章