def _check(self): if self.current_started is not None: if self.current_started <= bttime() - 58: self.errorfunc(WARNING, "Tracker announce still not complete " "%d seconds after starting it" % int(bttime() - self.current_started)) return if self.peerid is None: self.peerid = self.wanted_peerid self.url = self._makeurl(self.peerid, self.port) self._announce(0) return if self.peerid != self.wanted_peerid: self._announce(2) self.peerid = None self.previous_up = self.up() self.previous_down = self.down() return if self.finish: self.finish = False self._announce(1) return if self.fail_wait is not None: if self.last_time + self.fail_wait <= bttime(): self._announce() return if self.last_time > bttime() - self.config['rerequest_interval']: return if self.ever_got_incoming(): getmore = self.howmany() <= self.config['min_peers'] / 3 else: getmore = self.howmany() < self.config['min_peers'] if getmore or bttime() - self.last_time > self.announce_interval: self._announce()
def __init__(self, max_rate_period, fudge=5): self.max_rate_period = max_rate_period self.ratesince = bttime() - fudge self.last = self.ratesince self.rate = 0.0 self.total = 0 self.when_next_expected = bttime() + fudge
def _check(self): if self.current_started is not None: if self.current_started <= bttime() - 58: self.errorfunc(WARNING, _("Tracker announce still not complete " "%d seconds after starting it") % int(bttime() - self.current_started)) return if self.peerid is None: self.peerid = self.wanted_peerid self.url = self._makeurl(self.peerid, self.port) self._announce(0) return if self.peerid != self.wanted_peerid: self._announce(2) self.peerid = None self.previous_up = self.up() self.previous_down = self.down() return if self.finish: self.finish = False self._announce(1) return if self.fail_wait is not None: if self.last_time + self.fail_wait <= bttime(): self._announce() return if self.last_time > bttime() - self.config['rerequest_interval']: return if self.ever_got_incoming(): getmore = self.howmany() <= self.config['min_peers'] / 3 else: getmore = self.howmany() < self.config['min_peers'] if getmore or bttime() - self.last_time > self.announce_interval: self._announce()
def data_came_in(self, amount): if not self.got_anything: self.got_anything = True self.start = bttime() - 2 self.last = self.start self.left -= amount return self.update(bttime(), amount)
def PFS_apply(self): #step 6 - 7: #calculate number of request collected reqs = len(self.r) #if (Number of request collected is <= 4) V (1 second has passed since I got the first request) SU = self.config['simultaneous_upload'] if reqs <= SU: # to step 7: With a FCFC policy, Seed sends PIECE requested return False if (self.tm_first_req != 0) and (self.tm_first_req - bttime() > 1.0): msg = 'PFS ' + str(self.tm_first_req - bttime()) + ' second has passed since first request' print msg self.logcollector.log(None, msg) self.PFS_clear_structure() # to step 7: With a FCFC policy, Seed sends PIECE requested return False #log output msg = 'PFS req_cnt (> SU=' + str(SU) + ') ' + str(reqs) + ' ' + str(self.r) print msg self.logcollector.log(None, msg) # step 9: wins_p = [] # wins piece wins_l = [] # select peers while (len(wins_p) < SU): # 9.1 (_rt, _p) = self.PFS_take_max() # (ratio, piece id) if _p not in wins_p: wins_p.append(_p) wins_l = self.PFS_select_leecher(_p, wins_l) # 9.2: update (decrease r_p) self.r[_p] -= 1.0 # 9.3: update round self.round += 1 # 9.4: update theta self.PFS_update_theta() # log output msg = 'PFS win pieces ' + str(wins_p) print msg self.logcollector.log(None, msg) # step 10, 11 self.PFS_choke(wins_l) self.PFS_clear_structure() return True
def _handle_events(self, events): for sock, event in events: if sock in self.serversockets: s = self.serversockets[sock] if event & (POLLHUP | POLLERR) != 0: self.poll.unregister(s) s.close() self.errorfunc(CRITICAL, 'lost server socket') else: handler, context = self.listening_handlers[sock] try: newsock, addr = s.accept() except socket.error: continue try: newsock.setblocking(0) nss = SingleSocket(self, newsock, handler, context) self.single_sockets[newsock.fileno()] = nss self.poll.register(newsock, POLLIN) self._make_wrapped_call(handler. \ external_connection_made, (nss,), context=context) except socket.error, e: self.errorfunc(WARNING, "Error handling accepted " "connection: "+str(e)) else: s = self.single_sockets.get(sock) if s is None: if sock == self.wakeupfds[0]: # Another thread wrote this just to wake us up. os.read(sock, 1) continue s.connected = True if event & POLLERR: self._close_socket(s) continue #Any message reinitializes the timer for the connection timeout. #The keep Alive message is a message with only a length prefix (4 bytes), no #message ID and no payload. This message will be passed to #data_came_in, but will have no action at all. Its only task is to #reset last_hit in this part of the code. if event & (POLLIN | POLLHUP): s.last_hit = bttime() try: data = s.socket.recv(100000) except socket.error, e: code, msg = e if code != EWOULDBLOCK: self._close_socket(s) continue if data == '': self._close_socket(s) else: self._make_wrapped_call(s.handler.data_came_in, (s, data), s) # data_came_in could have closed the socket (s.socket = None) if event & POLLOUT and s.socket is not None: s.try_write() if s.is_flushed(): self._make_wrapped_call(s.handler.connection_flushed, (s,), s)
def _check_version(self): now = bttime() if self.last_version_check > 0 and \ self.last_version_check > now - 24*60*60: return self.last_version_check = now self.run_ui_task(self.ui.check_version)
def get_time_left(self): if not self.got_anything: return None t = bttime() if t - self.last > 15: self.update(t, 0) return self.remaining
def _dump_state(self): self.last_save_time = bttime() r = [] def write_entry(infohash, t): if t.dlpath is None: assert t.state == ASKING_LOCATION r.append(infohash.encode('hex') + '\n') else: r.append(infohash.encode('hex') + ' ' + str(t.uptotal) + ' ' + str(t.downtotal)+' '+t.dlpath.encode('string_escape')+'\n') r.append('BitTorrent UI state file, version 3\n') r.append('Running torrents\n') for infohash in self.running_torrents: write_entry(infohash, self.torrents[infohash]) r.append('Queued torrents\n') for infohash in self.queue: write_entry(infohash, self.torrents[infohash]) r.append('Known torrents\n') for infohash in self.other_torrents: write_entry(infohash, self.torrents[infohash]) r.append('End\n') f = None try: f = file(os.path.join(self.config['data_dir'], 'ui_state'), 'wb') f.write(''.join(r)) f.close() except Exception, e: self.global_error(ERROR, 'Could not save UI state: ' + str(e)) if f is not None: f.close()
def __call__(self, *args, **kwargs): val = self._func( *args, **kwargs) self._stream.write("%f\t%f\n" % (bttime(),val)) self._i = (self._i+1) % 8 # flush a little more often. if self._i == 0: self._stream.flush() return val
def _queue_loop(self): if self.doneflag.isSet(): return self.rawserver.add_task(self._queue_loop, 20) now = bttime() if self.queue and self.starting_torrent is None: mintime = now - self.config['next_torrent_time'] * 60 minratio = self.config['next_torrent_ratio'] / 100 else: mintime = 0 minratio = self.config['last_torrent_ratio'] / 100 if not minratio: return for infohash in self.running_torrents: t = self.torrents[infohash] if t.state == RUN_QUEUED: continue totals = t.dl.get_total_transfer() # not updated for remaining torrents if one is stopped, who cares t.uptotal = t.uptotal_old + totals[0] t.downtotal = t.downtotal_old + totals[1] if t.finishtime is None or t.finishtime > now - 120: continue if t.finishtime > mintime: if t.uptotal < t.downtotal * minratio: continue self.change_torrent_state(infohash, RUNNING, KNOWN) break if self.running_torrents and self.last_save_time < now - 300: self._dump_state()
def request_status(self, infohash, want_spew, want_fileinfo): torrent = self.torrents.get(infohash) if torrent is None or torrent.state != RUNNING: return status = torrent.dl.get_status(want_spew, want_fileinfo) if torrent.finishtime is not None: now = bttime() uptotal = status['upTotal'] + torrent.uptotal_old downtotal = status['downTotal'] + torrent.downtotal_old ulspeed = status['upRate2'] if self.queue: ratio = self.config['next_torrent_ratio'] / 100 else: ratio = self.config['last_torrent_ratio'] / 100 if ratio <= 0 or ulspeed <= 0: rem = 1e99 else: rem = (downtotal * ratio - uptotal) / ulspeed if self.queue: rem = min( rem, torrent.finishtime + self.config['next_torrent_time'] * 60 - now) rem = max(rem, torrent.finishtime + 120 - now) if rem <= 0: rem = 1 if rem == 1e99: rem = None status['timeEst'] = rem self.run_ui_task(self.ui.update_status, infohash, status)
def request_status(self, infohash, want_spew, want_fileinfo): torrent = self.torrents.get(infohash) if torrent is None or torrent.state != RUNNING: return status = torrent.dl.get_status(want_spew, want_fileinfo) if torrent.finishtime is not None: now = bttime() uptotal = status['upTotal'] + torrent.uptotal_old downtotal = status['downTotal'] + torrent.downtotal_old ulspeed = status['upRate2'] if self.queue: ratio = self.config['next_torrent_ratio'] / 100 else: ratio = self.config['last_torrent_ratio'] / 100 if ratio <= 0 or ulspeed <= 0: rem = 1e99 else: rem = (downtotal * ratio - uptotal) / ulspeed if self.queue: rem = min(rem, torrent.finishtime + self.config['next_torrent_time'] * 60 - now) rem = max(rem, torrent.finishtime + 120 - now) if rem <= 0: rem = 1 if rem == 1e99: rem = None status['timeEst'] = rem self.run_ui_task(self.ui.update_status, infohash, status)
def got_piece(self, index, begin, piece): req = (index, begin, len(piece)) if req not in self.active_requests: self.multidownload.discarded_bytes += len(piece) if self.connection.uses_fast_extension: self.connection.close() return self.active_requests.remove(req) if self.multidownload.storage.endgame: if req not in self.multidownload.all_requests: self.multidownload.discarded_bytes += len(piece) return self.multidownload.all_requests.remove(req) for d in self.multidownload.downloads: if d.interested: if not d.choked: if req in d.active_requests: d.connection.send_cancel(*req) if not self.connection.uses_fast_extension: d.active_requests.remove(req) d.fix_download_endgame() else: self._request_more() self.last = bttime() self.update_rate(len(piece)) df = self.multidownload.storage.write(index, begin, piece, self.guard) df.addCallback(self._got_piece, index)
def __call__(self, *args, **kwargs): val = self._func(*args, **kwargs) self._stream.write("%f\t%f\n" % (bttime(), val)) self._i = (self._i + 1) % 8 # flush a little more often. if self._i == 0: self._stream.flush() return val
def __init__(self, rawserver, sock, handler, context, addr=None): self.rawserver = rawserver self.socket = sock self.handler = handler self.buffer = [] self.last_hit = bttime() self.fileno = sock.fileno() self.connected = False self.context = context self.ip = None self.port = None if isinstance(addr, basestring): # UNIX socket, not really ip self.ip = addr else: peername = (None, None) try: peername = self.socket.getpeername() except socket.error, e: # UDP raises (107, 'Transport endpoint is not connected') # but so can a TCP socket we just got from start_connection, # in which case addr is set and we use it later. if (e[0] == 107) and (addr == None): # lies. # the peer endpoint should be gathered from the # tuple passed to data_came_in try: peername = self.socket.getsockname() except socket.error, e: pass
def finished(self, torrent): """called when a download reaches 100%""" infohash = torrent.infohash t = self.torrents[infohash] totals = t.dl.get_total_transfer() if t.downtotal == 0 and t.downtotal_old == 0 and totals[1] == 0: self.set_config('seed_forever', True, infohash) self.set_config('seed_last_forever', True, infohash) self.request_status(infohash, False, False) if infohash == self.starting_torrent: t = self.torrents[infohash] if self.queue: ratio = t.config['next_torrent_ratio'] / 100 if t.config['seed_forever']: ratio = 1e99 msg = _("Not starting torrent as there are other torrents " "waiting to run, and this one already meets the " "settings for when to stop seeding.") else: ratio = t.config['last_torrent_ratio'] / 100 if t.config['seed_last_forever']: ratio = 1e99 msg = _("Not starting torrent as it already meets the " "settings for when to stop seeding the last " "completed torrent.") if ratio < 1e99 and t.uptotal >= t.metainfo.total_bytes * ratio: raise BTShutdown(msg) self.torrents[torrent.infohash].finishtime = bttime()
def update_rate(self, amount): self.total += amount t = bttime() self.rate = (self.rate * (self.last - self.ratesince) + amount) / (t - self.ratesince) self.last = t if self.ratesince < t - self.max_rate_period: self.ratesince = t - self.max_rate_period
def print_rate(self, size): self.total += size this_time = bttime() start_delta = this_time - self.start_time this_delta = this_time - self.last_time if start_delta > 0 and this_delta > 0: print "UPLOAD: This:", size / this_delta, "Total:", self.total / start_delta self.last_time = this_time
def _check(self): if self.current_started is not None: if self.current_started <= bttime() - 58: self.errorfunc(WARNING, "Tracker announce still not complete " "%d seconds after starting it" % int(bttime() - self.current_started)) return if self.peerid is None: self.peerid = self.wanted_peerid self.url = self._makeurl(self.peerid, self.port) self._announce(0) return if self.peerid != self.wanted_peerid: self._announce(2) self.peerid = None #up() is the upload measure self.previous_up = self.up() #down() is the download measure self.previous_down = self.down() return if self.finish: self.finish = False self._announce(1) return if self.fail_wait is not None: if self.last_time + self.fail_wait <= bttime(): self._announce() return if self.last_time > bttime() - self.config['rerequest_interval']: return #if at least one connection was incoming (i.e., not initiated by the local peer), #it sends a request for peers to the tracker if there is less than min_peers/3 connections. #If no connection was ever incoming, it requests to the tracker new peers if #there is less than min_peers connections. Only active connections (i.e., with a validated #handshake) are counted. if self.ever_got_incoming(): getmore = self.howmany() <= self.config['min_peers'] / 3 else: getmore = self.howmany() < self.config['min_peers'] #announce_interval is simply used to send statistics to the tracker #it is not used to rerequest peers to the tracker when needed. #in order to request peer if it is need (not enough peers), #a request can be sent at most every rerequest_interval that is #by default 5 minutes (300 seconds) if getmore or bttime() - self.last_time > self.announce_interval: self._announce()
def got_piece(self, index, begin, piece): print 'IN block: (piece %d[%d:%d] - SN: %d)' % ( index, begin, begin + len(piece), begin / len(piece)) try: self.active_requests.remove((index, begin, len(piece))) except ValueError: self.downloader.discarded_bytes += len(piece) return False if self.downloader.storage.endgame: self.downloader.all_requests.remove((index, begin, len(piece))) self.last = bttime() self.measure.update_rate(len(piece)) self.downloader.measurefunc(len(piece)) self.downloader.downmeasure.update_rate(len(piece)) if 'BF2-0-0' in self.connection.id: insert = self.downloader.storage.DF_piece_came_in( index, begin, piece, self.guard) if insert == 'request more': self._request_DF(index) else: insert = self.downloader.storage.piece_came_in( index, begin, piece, self.guard) if not insert: if self.downloader.storage.endgame: while self.downloader.storage.do_I_have_requests(index): nb, nl = self.downloader.storage.new_request(index) self.downloader.all_requests.append((index, nb, nl)) for d in self.downloader.downloads: d.fix_download_endgame() return False ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more([index]) return False if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.storage.endgame: for d in self.downloader.downloads: if d is not self and d.interested: if d.choked: d.fix_download_endgame() else: try: d.active_requests.remove( (index, begin, len(piece))) except ValueError: continue d.connection.send_cancel(index, begin, len(piece)) d.fix_download_endgame() self._request_more() if self.downloader.picker.am_I_complete(): for d in [ i for i in self.downloader.downloads if i.have.numfalse == 0 ]: d.connection.close() return self.downloader.storage.do_I_have(index)
def __call__( self, rtt, rate ): self._window.append(rtt) var = variance(self._window) if stats: self._var_fp.write( "%f\t%f\n" % (bttime(), var) ) if var > self._max_var: self._max_var = var if stats: self._max_var_fp.write( "%f\t%f\n" % (bttime(), self._max_var)) # won't signal congestion until we have at least a full window's # worth of samples. if self._window < self._window_size: return False if var > (self._max_var * 0.64): # FUDGE return True else: return False
def __init__(self, left): self.start = None self.last = None self.rate = 0 self.remaining = None self.left = left self.broke = False self.got_anything = False self.when_next_expected = bttime() + 5
def _check_version(self): now = bttime() if self.last_version_check > now - 24*3600: return self.last_version_check = now if not HAVE_DNS: self.global_error(WARNING, "Version check failed: no DNS library") return threading.Thread(target=self._version_thread).start()
def __call__(self, rtt, rate): self._window.append(rtt) var = variance(self._window) if stats: self._var_fp.write("%f\t%f\n" % (bttime(), var)) if var > self._max_var: self._max_var = var if stats: self._max_var_fp.write("%f\t%f\n" % (bttime(), self._max_var)) # won't signal congestion until we have at least a full window's # worth of samples. if self._window < self._window_size: return False if var > (self._max_var * 0.64): # FUDGE return True else: return False
def _check_version(self): now = bttime() if self.last_version_check > now - 24 * 3600: return self.last_version_check = now if not HAVE_DNS: self.global_error(WARNING, "Version check failed: no DNS library") return threading.Thread(target=self._version_thread).start()
def listen_once(self, period=1e9): try: self._pop_externally_added() if self.funcs: period = self.funcs[0][0] - bttime() if period < 0: period = 0 events = self.poll.poll(period * timemult) if self.doneflag.isSet(): return 0 while self.funcs and self.funcs[0][0] <= bttime(): garbage, func, args, context = self.funcs.pop(0) self._make_wrapped_call(func, args, context=context) self._close_dead() self._handle_events(events) if self.doneflag.isSet(): return 0 self._close_dead() except error, e: if self.doneflag.isSet(): return 0 # I can't find a coherent explanation for what the behavior # should be here, and people report conflicting behavior, # so I'll just try all the possibilities code = None if hasattr(e, '__getitem__'): code = e[0] else: code = e if code == ENOBUFS: # log the traceback so we can see where the exception is coming from print_exc(file=sys.stderr) self.errorfunc( CRITICAL, _("Have to exit due to the TCP stack flaking " "out. Please see the FAQ at %s") % FAQ_URL) return -1 elif code in (EINTR, ): # add other ignorable error codes here pass else: self.errorfunc(CRITICAL, str(e)) return 0
def update_rate(self, amount): self.total += amount t = bttime() if t < self.when_next_expected and amount == 0: return self.rate self.rate = (self.rate * (self.last - self.ratesince) + amount) / (t - self.ratesince) self.last = t self.ratesince = max(self.ratesince, t - self.max_rate_period) self.when_next_expected = t + min((amount / max(self.rate, 0.0001)), 5)
def scan_for_timeouts(self): self.add_task(self.scan_for_timeouts, self.config["timeout_check_interval"]) t = bttime() - self.config["socket_timeout"] tokill = [] for s in [s for s in self.single_sockets.values() if s not in self.udp_sockets.keys()]: if s.last_hit < t: tokill.append(s) for k in tokill: if k.socket is not None: self._close_socket(k)
def _handle_events(self, events): for sock, event in events: if sock in self.serversockets: s = self.serversockets[sock] if event & (POLLHUP | POLLERR) != 0: self.poll.unregister(s) s.close() self.errorfunc(CRITICAL, 'lost server socket') else: handler, context = self.listening_handlers[sock] try: newsock, addr = s.accept() except socket.error: continue try: newsock.setblocking(0) nss = SingleSocket(self, newsock, handler, context) self.single_sockets[newsock.fileno()] = nss self.poll.register(newsock, POLLIN) self._make_wrapped_call(handler. \ external_connection_made, (nss,), context=context) except socket.error, e: self.errorfunc(WARNING, "Error handling accepted " "connection: "+str(e)) else: s = self.single_sockets.get(sock) if s is None: if sock == self.wakeupfds[0]: # Another thread wrote this just to wake us up. os.read(sock, 1) continue s.connected = True if event & POLLERR: self._close_socket(s) continue if event & (POLLIN | POLLHUP): s.last_hit = bttime() try: data = s.socket.recv(100000) except socket.error, e: code, msg = e if code != EWOULDBLOCK: self._close_socket(s) continue if data == '': self._close_socket(s) else: self._make_wrapped_call(s.handler.data_came_in, (s, data), s) # data_came_in could have closed the socket (s.socket = None) if event & POLLOUT and s.socket is not None: s.try_write() if s.is_flushed(): self._make_wrapped_call(s.handler.connection_flushed, (s,), s)
def listen_once(self, period=1e9): try: self._pop_externally_added() if self.funcs: period = self.funcs[0][0] - bttime() if period < 0: period = 0 events = self.poll.poll(period * timemult) if self.doneflag.isSet(): return 0 while self.funcs and self.funcs[0][0] <= bttime(): garbage, func, args, context = self.funcs.pop(0) self._make_wrapped_call(func, args, context=context) self._close_dead() self._handle_events(events) if self.doneflag.isSet(): return 0 self._close_dead() except error, e: if self.doneflag.isSet(): return 0 # I can't find a coherent explanation for what the behavior # should be here, and people report conflicting behavior, # so I'll just try all the possibilities code = None if hasattr(e, '__getitem__'): code = e[0] else: code = e if code == ENOBUFS: # log the traceback so we can see where the exception is coming from print_exc(file = sys.stderr) self.errorfunc(CRITICAL, _("Have to exit due to the TCP stack flaking " "out. Please see the FAQ at %s") % FAQ_URL) return -1 elif code in (EINTR,): # add other ignorable error codes here pass else: self.errorfunc(CRITICAL, str(e)) return 0
def listen_forever(self): while not self.doneflag.isSet(): try: self._pop_externally_added() if not self.funcs: period = 1e9 else: period = self.funcs[0][0] - bttime() if period < 0: period = 0 events = self.poll.poll(period * timemult) if self.doneflag.isSet(): return while self.funcs and self.funcs[0][0] <= bttime(): garbage, func, context = self.funcs.pop(0) self._make_wrapped_call(func, (), context=context) self._close_dead() self._handle_events(events) if self.doneflag.isSet(): return self._close_dead() except error, e: if self.doneflag.isSet(): return # I can't find a coherent explanation for what the behavior # should be here, and people report conflicting behavior, # so I'll just try all the possibilities try: code, msg, desc = e except: try: code, msg = e except: code = ENOBUFS if code == ENOBUFS: self.errorfunc(CRITICAL, "Have to exit due to the TCP " "stack flaking out. " "Please see the FAQ at %s"%FAQ_URL) return except KeyboardInterrupt: print_exc() return
def scan_for_timeouts(self): self.add_task(self.scan_for_timeouts, self.config['timeout_check_interval']) t = bttime() - self.config['socket_timeout'] tokill = [] for s in self.single_sockets.values(): if s.last_hit < t: tokill.append(s) for k in tokill: if k.socket is not None: self._close_socket(k)
def __call__(self): new_time = bttime() delta_time = new_time - self.last_time # if last time was more than a second ago, we can't give a clear # approximation since rate is in tokens per second. delta_time = min(delta_time, 1.0) if delta_time <= 0: return 0 tokens = self.rate * delta_time self.last_time = new_time return tokens
def _check(self): assert thread.get_ident() == self.rawserver.ident assert not self.dead #self.errorfunc(logging.INFO, 'check: ' + str(self.current_started)) if self.current_started is not None: if self.current_started <= bttime() - 58: self.errorfunc(logging.WARNING, _("Tracker announce still not complete " "%d seconds after starting it") % int(bttime() - self.current_started)) return if self.peerid is None: self.peerid = self.wanted_peerid self.url = self._makeurl(self.peerid, self.port) self._announce('started') return if self.peerid != self.wanted_peerid: # _announce will clean up these up = self.up down = self.down self._announce('stopped') self.peerid = None self.previous_up = up() self.previous_down = down() return if self.finish: self.finish = False self._announce('completed') return if self.fail_wait is not None: if self.last_time + self.fail_wait <= bttime(): self._announce() return if self.last_time > bttime() - self.config['rerequest_interval']: return if self.ever_got_incoming(): getmore = self.howmany() <= self.config['min_peers'] / 3 else: getmore = self.howmany() < self.config['min_peers'] if getmore or bttime() - self.last_time > self.announce_interval: self._announce()
def testExpire(self): self.k['foo'] = 'bar' self.k.expire(bttime() - 1) l = self.k['foo'] l.sort() self.assertEqual(l, ['bar']) self.k['foo'] = 'bing' t = bttime() self.k.expire(bttime() - 1) l = self.k['foo'] l.sort() self.assertEqual(l, ['bar', 'bing']) self.k['foo'] = 'ding' self.k['foo'] = 'dang' l = self.k['foo'] l.sort() self.assertEqual(l, ['bar', 'bing', 'dang', 'ding']) self.k.expire(t) l = self.k['foo'] l.sort() self.assertEqual(l, ['dang', 'ding'])
def finished(self, torrent): infohash = torrent.infohash if infohash == self.starting_torrent: t = self.torrents[infohash] if self.queue: ratio = self.config['next_torrent_ratio'] / 100 else: ratio = self.config['last_torrent_ratio'] / 100 if ratio and t.uptotal >= t.downtotal * ratio: raise BTShutdown("Not starting torrent as it already meets " "the current settings for when to stop seeding") self.torrents[torrent.infohash].finishtime = bttime()
def _update(self, rtt): """update thresholds when delay is within epsilon of the estimated propagation delay.""" var = self._cond_var(variance(self._window)) u = self._cond_mean(mean(self._window)) # 1 log P # n >= - ------------------------------ . (6) # 2 log (sigma / ((max-min)/2 - u) sigma = math.sqrt(var) max = self._delay_on_full_estimator() min = self._propagation_estimator() p = self._false_pos_prob thresh = (max - min) / 2 if thresh > u: n = int(0.5 * math.log(p) / math.log(sigma / (thresh - u))) if n < 1: n = 1 if thresh <= u or n > self._max_consecutive: n = self._max_consecutive # sigma # thresh >= ------- + u (7) # P^(1/2n) thresh = sigma / p**(0.5 * n) + u if thresh > self._max_thresh: # this is a bad state. if we are forced to set thresh to # max thresh then the rate of false positives will # inexorably increase. What else to do? thresh = self._max_thresh self._thresh = thresh self._n = n if stats: self._thfp.write("%f\t%f\n" % (bttime(), self._thresh)) self._nfp.write("%f\t%d\n" % (bttime(), self._n))
def send_message(self, message): if message not in self.messages.keys(): #print 'bad message', message return new_state = self.messages[message] if self.transitions.has_key(self.mystate): if self.transitions[self.mystate].has_key(message): new_state = self.transitions[self.mystate][message] # special pre-natted timeout logic if new_state == 'pre-natted': if (self.mystate == 'pre-natted' and bttime() - self.start_time > self.time_to_nat): # go to natted state after a while new_state = 'natted' elif self.mystate != 'pre-natted': # start pre-natted timer self.start_time = bttime() if new_state != self.mystate: #print 'changing state from', self.mystate, 'to', new_state self.mystate = new_state self.change_state()
def _postrequest(self, data=None, errormsg=None, peerid=None): self.current_started = None self.last_time = bttime() if errormsg is not None: self.errorfunc(WARNING, errormsg) self._fail() return try: r = bdecode(data) check_peers(r) except BTFailure, e: if data != '': self.errorfunc(ERROR, _("bad data from tracker - ") + str(e)) self._fail() return
def _announce(self, event=None): self.current_started = bttime() s = ('%s&uploaded=%s&downloaded=%s&left=%s' % (self.url, str(self.up() - self.previous_up), str(self.down() - self.previous_down), str(self.amount_left()))) if self.last is not None: s += '&last=' + quote(str(self.last)) if self.trackerid is not None: s += '&trackerid=' + quote(str(self.trackerid)) if self.howmany() >= self.config['max_initiate']: s += '&numwant=0' else: s += '&compact=1' if event is not None: s += '&event=' + ['started', 'completed', 'stopped'][event] Thread(target=self._rerequest, args=[s, self.peerid]).start()
def finished(self, torrent): infohash = torrent.infohash if infohash == self.starting_torrent: t = self.torrents[infohash] if self.queue: ratio = self.config['next_torrent_ratio'] / 100 msg = "Not starting torrent as there are other torrents "\ "waiting to run, and this one already meets the "\ "settings for when to stop seeding." else: ratio = self.config['last_torrent_ratio'] / 100 msg = "Not starting torrent as it already meets the settings "\ "for when to stop seeding the last completed torrent." if ratio and t.uptotal >= t.downtotal * ratio: raise BTShutdown(msg) self.torrents[torrent.infohash].finishtime = bttime()
def __call__(self, is_congested, rate): """Passed rate is the averaged upload rate. Returned rate is a rate limit.""" if is_congested: print "CONGESTION!" limit = rate * self._decrease_factor self._ssthresh = limit # congestion resets slow-start threshold elif rate > self._ssthresh - self._increase_delta: self._ssthresh += self._increase_delta # allow slow-start limit = min(self._ssthresh, 2.0 * rate) if debug: print "AIMD: time=%f rate=%f ssthresh=%f limit=%f" % \ (bttime(), rate, self._ssthresh, limit) return limit
def __init__(self, raw_server, sock, handler, context, ip=None): self.raw_server = raw_server self.socket = sock self.handler = handler self.buffer = [] self.last_hit = bttime() self.fileno = sock.fileno() self.connected = False self.context = context if ip is not None: self.ip = ip else: try: peername = self.socket.getpeername() except socket.error: self.ip = 'unknown' else: try: self.ip = peername[0] except: assert isinstance(peername, basestring) self.ip = peername # UNIX socket, not really ip
def _inspect_rates(self, t=None): """Called whenever an RTT sample arrives. If t == None then a timeout occurred.""" if t == None: t = self.rttmonitor.get_current_rtt() if t == None: # this makes timeouts reduce the maximum std deviation self.congestion_estimator.timeout() return if debug: print "BandwidthManager._inspect_rates: %d" % t if stats: self.dfp.write("%f\t%f\n" % (bttime(), t)) if not self.config['bandwidth_management']: return # TODO: slow start should be smarter than this #if self.start_time < bttime() + 20: # self.config['max_upload_rate'] = 10000000 # self.config['max_dowload_rate'] = 10000000 #if t < 3: # # I simply don't believe you. Go away. # return tup = self.get_rates() if tup == None: return uprate, downrate = tup # proceed through the building blocks. (We can swap in various # implementations of each based on config). is_congested = self.congestion_estimator(t, uprate) rate_limit = self.control_law(is_congested, uprate) rate_limit = self.starvation_prevention(rate_limit) self._set_rate_limit(rate_limit)
def _dump_state(self): self.last_save_time = bttime() r = [] def write_entry(infohash, t): if t.dlpath is None: assert t.state == ASKING_LOCATION r.append(infohash.encode('hex') + '\n') else: r.append( infohash.encode('hex') + ' ' + str(t.uptotal) + ' ' + str(t.downtotal) + ' ' + t.dlpath.encode('string_escape') + '\n') r.append('BitTorrent UI state file, version 3\n') r.append('Running torrents\n') for infohash in self.running_torrents: write_entry(infohash, self.torrents[infohash]) r.append('Queued torrents\n') for infohash in self.queue: write_entry(infohash, self.torrents[infohash]) r.append('Known torrents\n') for infohash in self.other_torrents: write_entry(infohash, self.torrents[infohash]) r.append('End\n') f = None try: filename = os.path.join(self.config['data_dir'], 'ui_state') f = file(filename + '.new', 'wb') f.write(''.join(r)) f.close() if os.access(filename, os.F_OK): os.remove(filename) # no atomic rename on win32 os.rename(filename + '.new', filename) except Exception, e: self.global_error(ERROR, 'Could not save UI state: ' + str(e)) if f is not None: f.close()
def _announce(self, event=None): self.current_started = bttime() self._rerequest("", self.peerid)
def add_task(self, func, delay, args=(), context=None): assert thread.get_ident() == self.ident assert type(args) == list or type(args) == tuple if context in self.live_contexts: insort(self.funcs, (bttime() + delay, func, args, context))
self.errorfunc( WARNING, _("Error handling accepted connection: ") + str(e)) else: s = self.single_sockets.get(sock) if s is None: if sock == self.wakeupfds[0]: # Another thread wrote this just to wake us up. os.read(sock, 1) continue s.connected = True if event & POLLERR: self._close_socket(s) continue if event & (POLLIN | POLLHUP): s.last_hit = bttime() try: data, addr = s.socket.recvfrom(100000) except socket.error, e: code, msg = e if code != EWOULDBLOCK: self._close_socket(s) continue if data == '' and not self.udp_sockets.has_key(s): self._close_socket(s) else: if not self.udp_sockets.has_key(s): self._make_wrapped_call(s.handler.data_came_in, (s, data), s) else: self._make_wrapped_call(s.handler.data_came_in,