コード例 #1
0
    def __init__(self, external_add_task, config, set_option,
                       get_remote_endpoints, get_rates):
        self.external_add_task = external_add_task
        self.config = config
        self.set_option = set_option
        self.get_rates = get_rates
        def got_new_rtt(rtt):
            self.external_add_task(0, self._inspect_rates, rtt)
        self.rttmonitor = RTTMonitor(got_new_rtt)
        self.nodefeeder = NodeFeeder(external_add_task=external_add_task,
                                     get_remote_endpoints=get_remote_endpoints,
                                     rttmonitor=self.rttmonitor)

        self.start_time = None

        self.max_samples = 10 # hmm...
        self.u = SizedList(self.max_samples)
        self.d = SizedList(self.max_samples)
        self.t = SizedList(self.max_samples * 10)
        self.ur = SizedList(self.max_samples)
        self.dr = SizedList(self.max_samples)

        self.current_std = 0.001        
        self.max_std = 0.001

        self.max_rates = {}
        self.max_rates["upload"] = 1.0
        self.max_rates["download"] = 1.0
        self.max_p = 1.0
        self.min_p = 2**500
        self.mid_p = ((self.max_p - self.min_p) / 2.0) + self.min_p
        self.old_p = None
コード例 #2
0
class AverageOfLastWindow(object):
    def __init__( self, window_size ):
        self._window = SizedList(window_size)

    def __call__( self, sample = None ):
        if sample is not None:
            self._window.append(sample)
        return mean(self._window)
コード例 #3
0
    def __init__(self, window_size, drop_every_nth,
                 false_pos_prob, max_consecutive, max_thresh, ewma ):
        assert drop_every_nth > 0
        assert false_pos_prob > 0.0 and false_pos_prob < 1.0
        assert max_consecutive > 1
        assert max_thresh > 0.0 and max_thresh < 1.0
        assert ewma > 0.0 and ewma < 1.0

        # parameters
        self._window_size = window_size
        self._false_pos_prob = false_pos_prob
        self._max_consecutive = max_consecutive
        self._thresh = max_thresh

        # estimators
        self._window = SizedList(window_size)
        self._propagation_estimator = \
            MedianOfMinWindow(window_size, drop_every_nth)
        self._delay_on_full_estimator = \
            MedianOfMaxWindow(window_size, drop_every_nth)
        self._cond_var = EWMA(alpha = ewma)   # variance when uncongested.
        self._cond_mean = EWMA(alpha = ewma)  # mean when uncongested.

        # counters
        self._init_samples = 0  # count of first samples.
        self._consecutive = 0   # consecutive samples above the threshold.

        # computed thresholds
        self._n = None
        self._thresh = None

        if stats:
            prop_vs_time = os.path.join( stats_dir, "prop_vs_time.plotdata" )
            fp = open( prop_vs_time, "w" )
            self._propagation_estimator = \
                StreamTracer( self._propagation_estimator, fp )

            full_vs_time = os.path.join( stats_dir, "full_vs_time.plotdata" )
            fp = open( full_vs_time, "w" )
            self._delay_on_full_estimator = \
                StreamTracer( self._delay_on_full_estimator, fp )

            cmean_vs_time = os.path.join( stats_dir, "cmean_vs_time.plotdata" )
            fp = open( cmean_vs_time, "w" )
            self._cond_mean = StreamTracer( self._cond_mean, fp )

            cvar_vs_time = os.path.join( stats_dir, "cvar_vs_time.plotdata" )
            fp = open( cvar_vs_time, "w" )
            self._cond_var = StreamTracer( self._cond_var, fp )

            thresh_vs_time = os.path.join(stats_dir,"thresh_vs_time.plotdata")
            self._thfp = open( thresh_vs_time, "w" )

            n_v_time = os.path.join( stats_dir, "n_vs_time.plotdata" )
            self._nfp = open( n_vs_time, "w" )
コード例 #4
0
 def __init__(self, window_size):
     self._window = SizedList(window_size)
     self._window_size = window_size
     self._max_var = 0.0
     if stats:
         delay_var_vs_time = os.path.join( stats_dir,
                                     "delay_var_vs_time.plotdata" )
         self._var_fp = open( delay_var_vs_time, "w" )
         max_var_vs_time = os.path.join( stats_dir, 
           "max_var_vs_time.plotdata" )
         self._max_var_fp = open( max_var_vs_time, "w" )
         _copy_gnuplot( "var_vs_time.gnuplot" )
コード例 #5
0
class BandwidthManager(object):
    
    def __init__(self, external_add_task, config, set_option,
                       get_remote_endpoints, get_rates):
        self.external_add_task = external_add_task
        self.config = config
        self.set_option = set_option
        self.get_rates = get_rates
        def got_new_rtt(rtt):
            self.external_add_task(0, self._inspect_rates, rtt)
        self.rttmonitor = RTTMonitor(got_new_rtt)
        self.nodefeeder = NodeFeeder(external_add_task=external_add_task,
                                     get_remote_endpoints=get_remote_endpoints,
                                     rttmonitor=self.rttmonitor)

        self.start_time = None

        self.max_samples = 10 # hmm...
        self.u = SizedList(self.max_samples)
        self.d = SizedList(self.max_samples)
        self.t = SizedList(self.max_samples * 10)
        self.ur = SizedList(self.max_samples)
        self.dr = SizedList(self.max_samples)

        self.current_std = 0.001        
        self.max_std = 0.001

        self.max_rates = {}
        self.max_rates["upload"] = 1.0
        self.max_rates["download"] = 1.0
        self.max_p = 1.0
        self.min_p = 2**500
        self.mid_p = ((self.max_p - self.min_p) / 2.0) + self.min_p
        self.old_p = None

    # I pulled these numbers out of my ass.

    def _method_1(self, type, t, c, old_c, rate):
        # This concept is:
        # if the correlation is high and the latency is high
        # then lower the bandwidth limit.
        # otherwise, raise it.

        if ((c > 0.96) and (t > 100)): 
            rate /= 2.0
            if debug:
                print type, "down to", rate
        else:
            rate += 500 # hmm
            if debug:
                print type, "up to", rate
        return rate
    
    def _method_2(self, type, t, c, old_c, rate):
        # This concept is:
        # if the correlation is low and the latency is high, lower the limit
        # otherwise raise it

        if ((c < 0.60) and (t > 100)): 
            rate /= 2.0
            if debug: 
                print type, "down to", rate
        else:
            rate += 500 # hmm
            if debug:
                print type, "up to", rate
        return rate

    def _method_vegasish(self, type, t, c, old_c, rate):

        middle_rtt = ((self.rttmonitor.get_min_rtt() +
                       self.rttmonitor.get_max_rtt()) / 2.0)
        if t > middle_rtt:
            rate *= 1.0/8.0
            if debug:
                print type, "down to", rate
        else:
            rate += 1000 # hmm
            if debug:
                print type, "up to", rate
        return rate            

    def _method_vegas_greg(self, type, t, c, old_c, rate):

        middle_rtt = ((self.rttmonitor.get_min_rtt() +
                       self.rttmonitor.get_max_rtt()) / 2.0)
        if t > middle_rtt and c < 0.5:
            rate *= 1.0/8.0
            if debug:
                print type, "down to", rate
        else:
            rate += 1000 # hmm
            if debug:
                print type, "up to", rate
        return rate            

    def _method_ratio(self, type, t, p, min_p, max_p, rate):
        ratio = p / max_p
        if debug:
            print "RATIO", ratio
        if ratio < 0.5:
            rate = ratio * self.max_rates[type]
            if debug:
                print type.upper(), "SET to", rate
        else:
            rate += rate * (ratio/10.0) # hmm
            if debug:
                print type.upper(), "UP to", rate
                
        return max(rate, 1000)


    def _method_stddev(self, type, std, max_std, rate):
        if std > (max_std * 0.80): # FUDGE
            rate *= 0.80 # FUDGE
            if debug:
                print type.upper(), "DOWN to", rate
        else:
            rate += 1000 # FUDGE
            if debug:
                print type.upper(), "UP to", rate

        return max(rate, 1000) # FUDGE
    
    
    def _affect_rate(self, type, std, max_std, rate, set):
        rate = self._method_stddev(type, std, max_std, rate)

        rock_bottom = False
        if rate <= 4096:
            if debug:
                print "Rock bottom"
            rock_bottom = True
            rate = 4096
    
        set(int(rate))

        return rock_bottom
        

    def _inspect_rates(self, t = None):

        if t == None:
            t = self.rttmonitor.get_current_rtt()

        if t == None:
            # this makes timeouts reduce the maximum std deviation
            self.max_std *= 0.80 # FUDGE
            return

        if self.start_time == None:
            self.start_time = bttime()
        def set_if_enabled(option, value):
            if not self.config['bandwidth_management']:
                return
            #print "Setting rate to ", value
            self.set_option(option, value)

        # TODO: slow start should be smarter than this
        if self.start_time + 20 > bttime():
            set_if_enabled('max_upload_rate', 10000000)
            set_if_enabled('max_download_rate', 10000000)

        if t < 3:
            # I simply don't believe you. Go away.
            return

        tup = self.get_rates()
        if tup == None:
            return
        u, d = tup
        #print "udt", u, d, t
        #print "uprate, downrate=", u, d

        self.u.append(u)
        self.d.append(d)
        self.t.append(t)
        self.ur.append(self.config['max_upload_rate'])
        self.dr.append(self.config['max_download_rate'])

        #s = time.time()
        #cu = correlation(self.u, self.t)
        #cd = correlation(self.d, self.t)
        #cur = correlation(self.u, self.ur)
        #cdr = correlation(self.d, self.dr)
        #e = time.time()

        self.current_std = standard_deviation(self.t)
        
        pu = ratio_sum_lists(self.u, self.t)
        pd = ratio_sum_lists(self.d, self.t)
        if len(self.u) > 2:
            lp = [ x/y for x, y in itertools.izip(self.u, self.t) ]
            min_pu = min(*lp)
            max_pu = max(*lp)
        else:
            min_pu = u / t
            max_pu = u / t
        pu = u / t

        self.max_rates["upload"] = max(max(self.u), self.max_rates["upload"])
        self.max_rates["download"] = max(max(self.d), self.max_rates["download"])

        if debug:
            print "STDDEV", u, self.config['max_upload_rate'], \
                  self.max_std, self.current_std, pu, pd
        
        rb = self._affect_rate("upload", self.current_std, self.max_std,
                               self.config['max_upload_rate'],
                               lambda r : set_if_enabled('max_upload_rate', r))
        # don't adjust download rate, it's not nearly correlated enough
##        if rb:
##            v = int(self.config['max_download_rate'] * 0.90) # FUDGE
##            v = max(v, 2000) # FUDGE
##            set_if_enabled('max_download_rate', v) 
##        else:
##            v = int(self.config['max_download_rate'] + 6000) # FUDGE
##            set_if_enabled('max_download_rate', v) 
##        if debug:
##            print "DOWNLOAD SET to", v
            
        #self._affect_rate("download", t, cd, self.last_cd, pd,
        #                  self.config['max_download_rate'],
        #                  lambda r : self.set_option('max_download_rate', r))

        self.max_std = max(self.max_std, self.current_std)

        #self.last_cu = cu
        #self.last_cd = cd

        # we're re-called by the pinging thing
        #self.external_add_task(0.1, self._inspect_rates)
        
        
コード例 #6
0
class ChebyshevCongestionEstimator(BinaryCongestionEstimator):
    """This congestion estimator first estimates the variance
       and mean of the conditional delay distribution for the condition
       when the bottleneck is uncongested.  We then test for the
       congested state based on delay samples exceeding a threshold.
       We set the threshold based on the Chebyshev Inequality:       
       
       Chebysev Inequality:
           P[(X-u)^2 >= k^2] <= sigma^2 / k^2           (1)

       More details are given in source code comments."""
    #
    #  Here u and sigma are the conditional mean and stddev, X is a single
    #  sample.  We thus can bound the probability of a single sample exceeding
    #  a threshold.  If we set the delay threshold to k such that we trigger
    #  congestion detection whenever the delay threshold is exceeded then
    #  this inequality can be interpreted as an UPPER BOUND ON THE PROBABILITY
    #  OF A FALSE POSITIVE CONGESTION DETECTION.
    #
    #  Because we are dealing with a single bottleneck, we can know the
    #  min (propagation) delay and max (buffer full) delay.  We thus know
    #  the worst case variance occurs when half the samples are at the upper
    #  bound and half at the lower bound resulting in
    #  
    #                                 2
    #                        (max-min)
    #      var_sample approx  -------                  (2)
    #                            4
    #
    #  Substituting (2) into (1) yields
    #                                    2
    #                           (max-min)
    #      P[(X-u)^2 >= k^2] <= ---------              (3)
    #                             4 k^2
    #  
    #  When the worst-case variance is achieved, the mean u = (max-min)/2.
    #  If we then set the threshold equal to max then k = (max-min)/2 and 
    #  (3) becomes
    #                           
    #      P[(X-u)^2 >= k^2] <= 1                      (4)
    #                             
    #  This means that in the worst-case, the Chebyshev inequality provides
    #  a useless bound.  However, for anything less than the worst-case,
    #  the false positive probability is less than 1.  But wait, how is it
    #  useful to have a false positive probability near 1?
    #
    #  If delay samples are independent when in the uncongested state,
    #  the probability that n consecutive samples exceed the theshold
    #  becomes,
    #  
    #      P[For n consecutive samples, (X-u)^2 >= k^2] <= (sigma^2 / k^2)^n
    #                                                  (5)
    #  
    #  If we only signal congestion when n consecutive samples are
    #  above the threshold and if sigma^2 / k^2 < 1, then we can set
    #  the probability of a false positive to anything we like by
    #  setting n sufficiently large.
    #  
    #  I argue that during the uncongested state, the samples should be
    #  approximately independent, because when the network is
    #  uncongested, delay is not driven by queueing or at least not
    #  by queueing that persists across a significant portion of our
    #  sample window.
    #  
    #  To allow for the most distance above and below the threshold,
    #  we try to set the threshold to (max-min)/2, but we will increase
    #  the threshold if necessary should the number of consecutive samples
    #  needed become too large (i.e., exceed max_consecutive) or if
    #  the conditional mean exceeds (max-min)/2.
    #  
    #      k = thresh - u
    #      k = (max-min)/2 - u
    #      let P = P[For n consecutive samples,(X-u)^2 >= k^2] = false_pos_prob
    #
    #                     2n
    #                sigma
    #      P <=  -----------------
    #             max-min        2n
    #           ( --------  - u )
    #                2
    #
    #      log P <=  2n log ( sigma / ((max-min)/2 - u))
    #
    #            1              log P
    #      n >=  -  ------------------------------  .     (6) 
    #            2  log (sigma / ((max-min)/2 - u)
    #
    #  We then turn >= in (6) to an assignment to derive the n used in
    #  detecting congestion.  If n exceeds max_consecutive then we adjust
    #  the threshold keeping n at max_consecutive.
    #
    #                    2n
    #               sigma
    #      P >= ---------------
    #                        2n
    #           (thresh - u )
    #
    #  
    #                 sigma
    #      thresh >=  ------- + u                          (7)
    #                 P^(1/2n)
    #
    #  The threshold is not allowed to exceed max_thresh of the way
    #  between min and max.  When threshold reaches this point, the
    #  thresholds become less meaningful and the performance of the
    #  congestion estimator is likely to suffer.
    #
    # Implementation Complexity:
    #  The computational burden of this congestion estimator is
    #  significantly higher than the TCP Reno/Tahoe loss based or TCP Vegas
    #  delay-based congestion estimators, but this algorithm is applied
    #  on the aggregate of ALL connections passing through the access point.
    #  State maintainence for the 100+ connections created by BitTorrent
    #  dwarfs the computational burden of this estimator.
    def __init__(self, window_size, drop_every_nth,
                 false_pos_prob, max_consecutive, max_thresh, ewma ):
        assert drop_every_nth > 0
        assert false_pos_prob > 0.0 and false_pos_prob < 1.0
        assert max_consecutive > 1
        assert max_thresh > 0.0 and max_thresh < 1.0
        assert ewma > 0.0 and ewma < 1.0

        # parameters
        self._window_size = window_size
        self._false_pos_prob = false_pos_prob
        self._max_consecutive = max_consecutive
        self._thresh = max_thresh

        # estimators
        self._window = SizedList(window_size)
        self._propagation_estimator = \
            MedianOfMinWindow(window_size, drop_every_nth)
        self._delay_on_full_estimator = \
            MedianOfMaxWindow(window_size, drop_every_nth)
        self._cond_var = EWMA(alpha = ewma)   # variance when uncongested.
        self._cond_mean = EWMA(alpha = ewma)  # mean when uncongested.

        # counters
        self._init_samples = 0  # count of first samples.
        self._consecutive = 0   # consecutive samples above the threshold.

        # computed thresholds
        self._n = None
        self._thresh = None

        if stats:
            prop_vs_time = os.path.join( stats_dir, "prop_vs_time.plotdata" )
            fp = open( prop_vs_time, "w" )
            self._propagation_estimator = \
                StreamTracer( self._propagation_estimator, fp )

            full_vs_time = os.path.join( stats_dir, "full_vs_time.plotdata" )
            fp = open( full_vs_time, "w" )
            self._delay_on_full_estimator = \
                StreamTracer( self._delay_on_full_estimator, fp )

            cmean_vs_time = os.path.join( stats_dir, "cmean_vs_time.plotdata" )
            fp = open( cmean_vs_time, "w" )
            self._cond_mean = StreamTracer( self._cond_mean, fp )

            cvar_vs_time = os.path.join( stats_dir, "cvar_vs_time.plotdata" )
            fp = open( cvar_vs_time, "w" )
            self._cond_var = StreamTracer( self._cond_var, fp )

            thresh_vs_time = os.path.join(stats_dir,"thresh_vs_time.plotdata")
            self._thfp = open( thresh_vs_time, "w" )

            n_v_time = os.path.join( stats_dir, "n_vs_time.plotdata" )
            self._nfp = open( n_vs_time, "w" )
        
    def timeout(self):
        self._delay_on_full_estimator.timeout()

    def __call__( self, rtt, rate ):
        self._window.append(rtt)
        full = self._delay_on_full_estimator(rtt)
        prop = self._propagation_estimator(rtt)
        if ( self._init_samples < self._window_size ):
            # too few samples to determine whether there is congestion...
            self._init_samples += 1
            return False

        # enough samples to initialize conditional estimators.
        elif self._init_samples == self._window_size:
            self._init_samples += 1
            self._update(rtt)
            return False

        assert self._n is not None and self._thresh is not None
        epsilon = ( full - prop ) * 0.05

        # if delay is within epsilon of the propagation delay then
        # assume that we are in the uncongested state.  We use the
        # window's middle sample to reduce bias.
        if self._window[len(self._window)/2] < prop + epsilon:
            self._update(rtt)   # updates thresholds.

        if rtt > self._thresh:
            self._consecutive += 1
            if self._consecutive >= self._n:
                self._consecutive = 0  # don't generate multiple detections
                                       # for single period of congestion unless
                                       # it persists across separate trials
                                       # of n samples each.
                return True   # congestion detected
        else:
            self._consecutive = 0
        return False          # no congestion detected
        
            
    def _update(self, rtt):
        """update thresholds when delay is within epsilon of the
           estimated propagation delay."""
        
        var = self._cond_var(variance(self._window))
        u = self._cond_mean(mean(self._window))

        #         1              log P
        #   n >=  -  ------------------------------  .     (6) 
        #         2  log (sigma / ((max-min)/2 - u)

        sigma = math.sqrt(var)
        max = self._delay_on_full_estimator()
        min = self._propagation_estimator()
        p = self._false_pos_prob
        thresh = (max-min)/2
        if thresh > u:
            n = int(0.5 * math.log(p) / math.log(sigma/(thresh-u)))
            if n < 1:
                n = 1
        
        if thresh <= u or n > self._max_consecutive:
            n = self._max_consecutive
            
            #             sigma
            # thresh >=  ------- + u                       (7)
            #            P^(1/2n)
            thresh = sigma / p**(0.5*n) + u
            if thresh > self._max_thresh:

                # this is a bad state.  if we are forced to set thresh to
                # max thresh then the rate of false positives will
                # inexorably increase.  What else to do?
                thresh = self._max_thresh

        self._thresh = thresh
        self._n = n
        if stats:
            self._thfp.write( "%f\t%f\n" % (bttime(), self._thresh) )
            self._nfp.write( "%f\t%d\n" % (bttime(), self._n) )
コード例 #7
0
class VarianceCongestionEstimator(BinaryCongestionEstimator):
    """Congestion is assumed iff the stddev exceeds a threshold
       fraction of the maximum standard deviation."""
    # OBJECTION:  Variance maximization works so long as the rate remains
    # below (to the left) of the peak in the variance versus rate curve.
    # In this regime, increasing rate causes an increase in variance.
    # When measured variance exceeds a threshold the system backs off causing
    # the variance to diminish.  The system oscillates about this 
    # optimal point.
    #
    # HOWEVER, the system behaves quite differently when rates are high, i.e.,
    # close to the bottleneck capacity.  The graph of delay versus send rate
    # looks similar to a bellcurve.  Our system increases the send rate
    # whenever the variance is below a threshold and decreases when 
    # above the threshold.  This is the correct behavior when on the
    # left-hand side of the bell curve.  However, it is the OPPOSITE
    # of the desired behavior when to the right of the peak of the
    # bell curve.  As a result, when rates get too high the system
    # continues to increase send rate until loss occurs.
    #
    # When loss occurs, the system backs off.  When the control law is 
    # AIMDControlLaw, the system backs off multiplicatively.  This backoff
    # moves the system to the left on the bell curve.  If the move is large
    # enough the system climbs over the hump and the system resumes
    # the proper behavior of increasing rate whenever variance is below
    # a threshold.  If however, the backoff is insufficient to reach
    # the peak of the bell curve then the system slides back to the right
    # and resumes increasing send rate until loss occurs.
    #
    # This system is not guaranteed to converge on the equilibrium from all
    # feasible points.     
    #    -- David Harrison
    def __init__(self, window_size):
        self._window = SizedList(window_size)
        self._window_size = window_size
        self._max_var = 0.0
        if stats:
            delay_var_vs_time = os.path.join( stats_dir,
                                        "delay_var_vs_time.plotdata" )
            self._var_fp = open( delay_var_vs_time, "w" )
            max_var_vs_time = os.path.join( stats_dir, 
              "max_var_vs_time.plotdata" )
            self._max_var_fp = open( max_var_vs_time, "w" )
            _copy_gnuplot( "var_vs_time.gnuplot" )

    def timeout(self):
        self._max_var *= 0.64  # FUDGE. same as using 0.8 * max stddev.
        if stats:
            self._max_var_fp.write( "%f\t%f\n" % (bttime(), self._max_var) )
        

    def __call__( self, rtt, rate ):
        self._window.append(rtt)
        var = variance(self._window)
        if stats:
            self._var_fp.write( "%f\t%f\n" % (bttime(), var) )
        if var > self._max_var: 
            self._max_var = var
            if stats:
                self._max_var_fp.write( "%f\t%f\n" % (bttime(), self._max_var))

        # won't signal congestion until we have at least a full window's
        # worth of samples.
        if self._window < self._window_size: 
            return False 
        if var > (self._max_var * 0.64): # FUDGE
            return True
        else:
            return False
コード例 #8
0
 def __init__( self, window_size ):
     self._window = SizedList(window_size)
コード例 #9
0
class VarianceCongestionEstimator(BinaryCongestionEstimator):
    """Congestion is assumed iff the stddev exceeds a threshold
       fraction of the maximum standard deviation."""
    # OBJECTION:  Variance maximization works so long as the rate remains
    # below (to the left) of the peak in the variance versus rate curve.
    # In this regime, increasing rate causes an increase in variance.
    # When measured variance exceeds a threshold the system backs off causing
    # the variance to diminish.  The system oscillates about this 
    # optimal point.
    #
    # HOWEVER, the system behaves quite differently when rates are high, i.e.,
    # close to the bottleneck capacity.  The graph of delay versus send rate
    # looks similar to a bellcurve.  Our system increases the send rate
    # whenever the variance is below a threshold and decreases when 
    # above the threshold.  This is the correct behavior when on the
    # left-hand side of the bell curve.  However, it is the OPPOSITE
    # of the desired behavior when to the right of the peak of the
    # bell curve.  As a result, when rates get too high the system
    # continues to increase send rate until loss occurs.
    #
    # When loss occurs, the system backs off.  When the control law is 
    # AIMDControlLaw, the system backs off multiplicatively.  This backoff
    # moves the system to the left on the bell curve.  If the move is large
    # enough the system climbs over the hump and the system resumes
    # the proper behavior of increasing rate whenever variance is below
    # a threshold.  If however, the backoff is insufficient to reach
    # the peak of the bell curve then the system slides back to the right
    # and resumes increasing send rate until loss occurs.
    #
    # This system is not guaranteed to converge on the equilibrium from all
    # feasible points.     
    #    -- David Harrison
    def __init__(self, window_size):
        self._window = SizedList(window_size)
        self._window_size = window_size
        self._max_var = 0.0
        if stats:
            delay_var_vs_time = os.path.join( stats_dir,
                                        "delay_var_vs_time.plotdata" )
            self._var_fp = open( delay_var_vs_time, "w" )
            max_var_vs_time = os.path.join( stats_dir, 
              "max_var_vs_time.plotdata" )
            self._max_var_fp = open( max_var_vs_time, "w" )
            _copy_gnuplot( "var_vs_time.gnuplot" )

    def timeout(self):
        self._max_var *= 0.64  # FUDGE. same as using 0.8 * max stddev.
        if stats:
            self._max_var_fp.write( "%f\t%f\n" % (bttime(), self._max_var) )
        

    def __call__( self, rtt, rate ):
        self._window.append(rtt)
        var = variance(self._window)
        if stats:
            self._var_fp.write( "%f\t%f\n" % (bttime(), var) )
        if var > self._max_var: 
            self._max_var = var
            if stats:
                self._max_var_fp.write( "%f\t%f\n" % (bttime(), self._max_var))

        # won't signal congestion until we have at least a full window's
        # worth of samples.
        if self._window < self._window_size: 
            return False 
        if var > (self._max_var * 0.64): # FUDGE
            return True
        else:
            return False
コード例 #10
0
 def __init__( self, window_size ):
     self._window = SizedList(window_size)