def __init__(self, sched, unitsize, slotsfunc=lambda x: None): self.sched = sched self.last = None self.unitsize = unitsize self.slotsfunc = slotsfunc self.measure = Measure(MAX_RATE_PERIOD) self.autoadjust = False self.upload_rate = MAX_RATE * 1000 self.slots = SLOTS_STARTING
def __init__(self, downloader, connection): self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.max_rate_period) self.have = Bitfield(downloader.numpieces) self.last = 0 self.example_interest = None
def __init__(self, connection, choker, storage, max_slice_length, max_rate_period, fudge): self.connection = connection self.choker = choker self.storage = storage self.max_slice_length = max_slice_length self.max_rate_period = max_rate_period self.choked = True self.interested = False self.buffer = [] self.measure = Measure(max_rate_period, fudge) if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list())
def __init__(self, downloader, connection): self.downloader = downloader self.connection = connection # Whether the peer is choking this client. self.choked = True # Whether this client is interested in data the peer has. self.interested = False # The (index, begin, length) tuples this client has requested from the peer. self.active_requests = [] # Measures the download rate from the peer. self.measure = Measure(downloader.max_rate_period) # The pieces the peer has. self.have = Bitfield(downloader.numpieces) # The last time this client has gotten data from the peer. self.last = 0 self.example_interest = None
def test_stops_at_backlog(): ds = DummyStorage([[(0, 2), (2, 2), (4, 2), (6, 2)]]) events = [] d = Downloader(ds, DummyPicker(len(ds.remaining), events), 2, 15, 1, Measure(15), 10) sd = d.make_download(DummyConnection(events)) assert events == [] assert ds.remaining == [[(0, 2), (2, 2), (4, 2), (6, 2)]] assert ds.active == [[]] sd.got_have_bitfield(Bitfield(1, chr(0x80))) assert events == ['got have', 'interested'] del events[:] assert ds.remaining == [[(0, 2), (2, 2), (4, 2), (6, 2)]] assert ds.active == [[]] sd.got_unchoke() assert events == [ 'requested', ('request', 0, 6, 2), 'requested', ('request', 0, 4, 2) ] del events[:] assert ds.remaining == [[(0, 2), (2, 2)]] assert ds.active == [[(4, 2), (6, 2)]] sd.got_piece(0, 4, 'ab') assert events == ['requested', ('request', 0, 2, 2)] del events[:] assert ds.remaining == [[(0, 2)]] assert ds.active == [[(2, 2), (6, 2)]]
def test_stops_at_backlog_endgame(): ds = DummyStorage([[(2, 2), (0, 2)], [(2, 2), (0, 2)], [(0, 2)]], True, 3) events = [] d = Downloader(ds, DummyPicker(len(ds.remaining), events), 3, 15, 3, Measure(15), 10) ev1 = [] ev2 = [] ev3 = [] sd1 = d.make_download(DummyConnection(ev1)) sd2 = d.make_download(DummyConnection(ev2)) sd3 = d.make_download(DummyConnection(ev3)) sd1.got_unchoke() sd1.got_have(0) assert ev1 == ['interested', ('request', 0, 0, 2), ('request', 0, 2, 2)] del ev1[:] sd2.got_unchoke() sd2.got_have(0) assert ev2 == [] sd2.got_have(1) assert ev2 == ['interested', ('request', 1, 0, 2), ('request', 1, 2, 2)] del ev2[:] sd3.got_unchoke() sd3.got_have(2) assert (ev2 == [('request', 0, 0, 2)] or ev2 == [('request', 0, 2, 2)]) n = ev2[0][2] del ev2[:] sd1.got_piece(0, n, 'ab') assert ev1 == [] assert ev2 == [('cancel', 0, n, 2), ('request', 0, 2 - n, 2)]
def __init__(self, connection, choker, storage, max_slice_length, max_rate_period, fudge): self.connection = connection self.choker = choker self.storage = storage self.max_slice_length = max_slice_length self.max_rate_period = max_rate_period # Whether this client is choking the peer. self.choked = True # Whether the peer is interested in data on this client. self.interested = False # (index, begin, length) tuples the peer has requested from this client. self.buffer = [] self.measure = Measure(max_rate_period, fudge) # Send our bitfield to the peer if we have any pieces. if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list())
def test_operation(): events = [] cs = [] co = Connecter(lambda c, events=events: DummyUpload(events), DummyDownloader(events), DummyChoker(events, cs), 3, Measure(10)) assert events == [] assert cs == [] dc = DummyConnection(events) co.connection_made(dc) assert len(cs) == 1 cc = cs[0] co.got_message(dc, BITFIELD + chr(0xc0)) co.got_message(dc, CHOKE) co.got_message(dc, UNCHOKE) co.got_message(dc, INTERESTED) co.got_message(dc, NOT_INTERESTED) co.got_message(dc, HAVE + tobinary(2)) co.got_message(dc, REQUEST + tobinary(1) + tobinary(5) + tobinary(6)) co.got_message(dc, CANCEL + tobinary(2) + tobinary(3) + tobinary(4)) co.got_message(dc, PIECE + tobinary(1) + tobinary(0) + 'abc') co.got_message(dc, PIECE + tobinary(1) + tobinary(3) + 'def') co.connection_flushed(dc) cc.send_bitfield(chr(0x60)) cc.send_interested() cc.send_not_interested() cc.send_choke() cc.send_unchoke() cc.send_have(4) cc.send_request(0, 2, 1) cc.send_cancel(1, 2, 3) cc.send_piece(1, 2, 'abc') co.connection_lost(dc) x = [ 'made upload', 'made download', 'made', ('bitfield', chr(0xC0)), 'choke', 'unchoke', 'interested', 'not interested', ('have', 2), ('request', 1, 5, 6), ('cancel', 2, 3, 4), ('piece', 1, 0, 'abc'), ('piece', 1, 3, 'def'), ('m', HAVE + tobinary(1)), 'flushed', ('m', BITFIELD + chr(0x60)), ('m', INTERESTED), ('m', NOT_INTERESTED), ('m', CHOKE), ('m', UNCHOKE), ('m', HAVE + tobinary(4)), ('m', REQUEST + tobinary(0) + tobinary(2) + tobinary(1)), ('m', CANCEL + tobinary(1) + tobinary(2) + tobinary(3)), ('m', PIECE + tobinary(1) + tobinary(2) + 'abc'), 'disconnected', 'lost' ] for a, b in zip(events, x): assert a == b, repr((a, b))
def test_got_have_single(): ds = DummyStorage([[(0, 2)]]) events = [] d = Downloader(ds, DummyPicker(len(ds.remaining), events), 2, 15, 1, Measure(15), 10) sd = d.make_download(DummyConnection(events)) assert events == [] assert ds.remaining == [[(0, 2)]] assert ds.active == [[]] sd.got_unchoke() assert events == [] assert ds.remaining == [[(0, 2)]] assert ds.active == [[]] sd.got_have(0) assert events == [ 'got have', 'interested', 'requested', ('request', 0, 0, 2) ] del events[:] assert ds.remaining == [[]] assert ds.active == [[(0, 2)]] sd.disconnected() assert events == ['lost have']
def test_choke_clears_active(): ds = DummyStorage([[(0, 2)]]) events = [] d = Downloader(ds, DummyPicker(len(ds.remaining), events), 2, 15, 1, Measure(15), 10) sd1 = d.make_download(DummyConnection(events)) sd2 = d.make_download(DummyConnection(events)) assert events == [] assert ds.remaining == [[(0, 2)]] assert ds.active == [[]] sd1.got_unchoke() sd1.got_have(0) assert events == [ 'got have', 'interested', 'requested', ('request', 0, 0, 2) ] del events[:] assert ds.remaining == [[]] assert ds.active == [[(0, 2)]] sd2.got_unchoke() sd2.got_have(0) assert events == ['got have'] del events[:] assert ds.remaining == [[]] assert ds.active == [[(0, 2)]] sd1.got_choke() assert events == [ 'interested', 'requested', ('request', 0, 0, 2), 'not interested' ] del events[:] assert ds.remaining == [[]] assert ds.active == [[(0, 2)]] sd2.got_piece(0, 0, 'ab') assert events == ['complete', 'not interested'] del events[:] assert ds.remaining == [[]] assert ds.active == [[]]
def test_endgame(): ds = DummyStorage([[(0, 2)], [(0, 2)], [(0, 2)]], True, 3) events = [] d = Downloader(ds, DummyPicker(len(ds.remaining), events), 10, 15, 3, Measure(15), 10) ev1 = [] ev2 = [] ev3 = [] ev4 = [] sd1 = d.make_download(DummyConnection(ev1)) sd2 = d.make_download(DummyConnection(ev2)) sd3 = d.make_download(DummyConnection(ev3)) sd1.got_unchoke() sd1.got_have(0) assert ev1 == ['interested', ('request', 0, 0, 2)] del ev1[:] sd2.got_unchoke() sd2.got_have(0) sd2.got_have(1) assert ev2 == ['interested', ('request', 1, 0, 2)] del ev2[:] sd3.got_unchoke() sd3.got_have(0) sd3.got_have(1) sd3.got_have(2) assert (ev3 == [ 'interested', ('request', 2, 0, 2), ('request', 0, 0, 2), ('request', 1, 0, 2) ] or ev3 == [ 'interested', ('request', 2, 0, 2), ('request', 1, 0, 2), ('request', 0, 0, 2) ]) del ev3[:] assert ev2 == [('request', 0, 0, 2)] del ev2[:] sd2.got_piece(0, 0, 'ab') assert ev1 == [('cancel', 0, 0, 2), 'not interested'] del ev1[:] assert ev2 == [] assert ev3 == [('cancel', 0, 0, 2)] del ev3[:] sd3.got_choke() assert ev1 == [] assert ev2 == [] assert ev3 == [] sd3.got_unchoke() assert (ev3 == [('request', 2, 0, 2), ('request', 1, 0, 2)] or ev3 == [('request', 1, 0, 2), ('request', 2, 0, 2)]) del ev3[:] assert ev1 == [] assert ev2 == [] sd4 = d.make_download(DummyConnection(ev4)) sd4.got_have_bitfield([True, True, True]) assert ev4 == ['interested'] del ev4[:] sd4.got_unchoke() assert (ev4 == [('request', 2, 0, 2), ('request', 1, 0, 2)] or ev4 == [('request', 1, 0, 2), ('request', 2, 0, 2)]) assert ev1 == [] assert ev2 == [] assert ev3 == []
class RateLimiter: def __init__(self, sched, unitsize, slotsfunc=lambda x: None): self.sched = sched self.last = None self.unitsize = unitsize self.slotsfunc = slotsfunc self.measure = Measure(MAX_RATE_PERIOD) self.autoadjust = False self.upload_rate = MAX_RATE * 1000 self.slots = SLOTS_STARTING # garbage if not automatic def set_upload_rate(self, rate): if DEBUG: print >> sys.stderr, "RateLimiter: set_upload_rate", rate # rate = -1 # test automatic if rate < 0: if self.autoadjust: return self.autoadjust = True self.autoadjustup = 0 self.pings = [] rate = MAX_RATE self.slots = SLOTS_STARTING self.slotsfunc(self.slots) else: self.autoadjust = False if not rate: rate = MAX_RATE self.upload_rate = rate * 1000 self.lasttime = clock() self.bytes_sent = 0 def queue(self, conn): if DEBUG: print >> sys.stderr, "RateLimiter: queue", conn assert conn.next_upload is None if self.last is None: self.last = conn conn.next_upload = conn self.try_send(True) else: conn.next_upload = self.last.next_upload self.last.next_upload = conn self.last = conn def try_send(self, check_time=False): if DEBUG: print >> sys.stderr, "RateLimiter: try_send" t = clock() self.bytes_sent -= (t - self.lasttime) * self.upload_rate #print >> sys.stderr, 'try_send: bytes_sent: %s' % self.bytes_sent self.lasttime = t if check_time and (self.upload_rate < MAX_RATE * 1000): #do not set self.bytes_sent to 0 if we are unlimited... self.bytes_sent = max(self.bytes_sent, 0) cur = self.last.next_upload while self.bytes_sent <= 0: #we would like to send up to self.bytes_sent data to someone #why not try to send this at once? #bytes = cur.send_partial(self.unitsize) remaining_bytes = max(self.unitsize, int(1 - self.bytes_sent)) bytes = cur.send_partial(remaining_bytes) self.bytes_sent += bytes self.measure.update_rate(bytes) if bytes == 0 or cur.backlogged(): if self.last is cur: self.last = None cur.next_upload = None break else: self.last.next_upload = cur.next_upload cur.next_upload = None cur = self.last.next_upload else: #does this connection still have a buffer? if not cur.upload.buffer: self.last = cur cur = cur.next_upload #switch to the next one else: pass else: # 01/04/10 Boudewijn: because we use a -very- small value # to indicate a 0bps rate, we will schedule the call to be # made in a very long time. This results in no upload for # a very long time. # # the try_send method has protection again calling to # soon, so we can simply schedule the call to be made # sooner. delay = min(5.0, self.bytes_sent / self.upload_rate) self.sched(self.try_send, delay) def adjust_sent(self, bytes): # if DEBUG: print >>sys.stderr, "RateLimiter: adjust_sent", bytes self.bytes_sent = min(self.bytes_sent + bytes, self.upload_rate * 3) self.measure.update_rate(bytes) def ping(self, delay): ##raise Exception('Is this called?') if DEBUG: print >> sys.stderr, delay if not self.autoadjust: return self.pings.append(delay > PING_BOUNDARY) if len(self.pings) < PING_SAMPLES + PING_DISCARDS: return if DEBUG: print >> sys.stderr, 'RateLimiter: cycle' pings = sum(self.pings[PING_DISCARDS:]) del self.pings[:] if pings >= PING_THRESHHOLD: # assume flooded if self.upload_rate == MAX_RATE: self.upload_rate = self.measure.get_rate() * ADJUST_DOWN else: self.upload_rate = min(self.upload_rate, self.measure.get_rate() * 1.1) self.upload_rate = max(int(self.upload_rate * ADJUST_DOWN), 2) self.slots = int(sqrt(self.upload_rate * SLOTS_FACTOR)) self.slotsfunc(self.slots) if DEBUG: print >> sys.stderr, 'RateLimiter: adjust down to ' + str( self.upload_rate) self.lasttime = clock() self.bytes_sent = 0 self.autoadjustup = UP_DELAY_FIRST else: # not flooded if self.upload_rate == MAX_RATE: return self.autoadjustup -= 1 if self.autoadjustup: return self.upload_rate = int(self.upload_rate * ADJUST_UP) self.slots = int(sqrt(self.upload_rate * SLOTS_FACTOR)) self.slotsfunc(self.slots) if DEBUG: print >> sys.stderr, 'RateLimiter: adjust up to ' + str( self.upload_rate) self.lasttime = clock() self.bytes_sent = 0 self.autoadjustup = UP_DELAY_NEXT
return e = 'maxport less than minport - no ports to check' for listen_port in xrange(config['minport'], config['maxport'] + 1): try: rawserver.bind(listen_port, config['bind']) break except socketerror, e: pass else: errorfunc("Couldn't listen - " + str(e)) return choker = Choker(config['max_uploads'], rawserver.add_task, finflag.isSet, config['min_uploads']) upmeasure = Measure(config['max_rate_period'], config['upload_rate_fudge']) downmeasure = Measure(config['max_rate_period']) def make_upload(connection, choker = choker, storagewrapper = storagewrapper, max_slice_length = config['max_slice_length'], max_rate_period = config['max_rate_period'], fudge = config['upload_rate_fudge']): return Upload(connection, choker, storagewrapper, max_slice_length, max_rate_period, fudge) ratemeasure = RateMeasure(storagewrapper.get_amount_left()) rm[0] = ratemeasure.data_rejected picker = PiecePicker(len(pieces), config['rarest_first_cutoff']) for i in xrange(len(pieces)): if storagewrapper.do_I_have(i): picker.complete(i) downloader = Downloader(storagewrapper, picker,
class BT1Download: def __init__(self, statusfunc, finfunc, errorfunc, excfunc, doneflag, config, response, infohash, id, rawserver, port, appdataobj=None): self.statusfunc = statusfunc self.finfunc = finfunc self.errorfunc = errorfunc self.excfunc = excfunc self.doneflag = doneflag self.config = config self.response = response self.infohash = infohash self.myid = id self.rawserver = rawserver self.port = port self.info = self.response['info'] self.pieces = [ self.info['pieces'][x:x + 20] for x in xrange(0, len(self.info['pieces']), 20) ] self.len_pieces = len(self.pieces) self.argslistheader = argslistheader self.unpauseflag = threading.Event() self.unpauseflag.set() self.downloader = None self.storagewrapper = None self.fileselector = None self.super_seeding_active = False self.filedatflag = threading.Event() self.spewflag = threading.Event() self.superseedflag = threading.Event() self.whenpaused = None self.finflag = threading.Event() self.rerequest = None self.tcp_ack_fudge = config['tcp_ack_fudge'] self.selector_enabled = config['selector_enabled'] if appdataobj: self.appdataobj = appdataobj elif self.selector_enabled: self.appdataobj = ConfigDir() self.appdataobj.deleteOldCacheData(config['expire_cache_data'], [self.infohash]) self.excflag = self.rawserver.get_exception_flag() self.failed = False self.checking = False self.started = False self.picker = PiecePicker(self.len_pieces, config['rarest_first_cutoff'], config['rarest_first_priority_cutoff']) self.choker = Choker(config, rawserver.add_task, self.picker, self.finflag.isSet) def checkSaveLocation(self, loc): if 'length' in self.info: return os.path.exists(loc) return any( os.path.exists(os.path.join(loc, x['path'][0])) for x in self.info['files']) def saveAs(self, filefunc, pathfunc=None): try: def make(f, forcedir=False): if not forcedir: f = os.path.split(f)[0] if f != '' and not os.path.exists(f): os.makedirs(f) if 'length' in self.info: file_length = self.info['length'] file = filefunc(self.info['name'], file_length, self.config['saveas'], False) if file is None: return None make(file) files = [(file, file_length)] else: file_length = sum(x['length'] for x in self.info['files']) file = filefunc(self.info['name'], file_length, self.config['saveas'], True) if file is None: return None # if this path exists, and no files from the info dict exist, # we assume it's a new download and the user wants to create a # new directory with the default name existing = 0 if os.path.exists(file): if not os.path.isdir(file): self.errorfunc(file + 'is not a dir') return None if len(os.listdir(file)) > 0: # if it's not empty existing = any( os.path.exists(os.path.join(file, x['path'][0])) for x in self.info['files']) if not existing: file = os.path.join(file, self.info['name']) if os.path.exists(file) and \ not os.path.isdir(file): if file[-8:] == '.torrent': file = file[:-8] if os.path.exists(file) and \ not os.path.isdir(file): self.errorfunc("Can't create dir - " + self.info['name']) return None make(file, True) # alert the UI to any possible change in path if pathfunc is not None: pathfunc(file) files = [] for x in self.info['files']: n = os.path.join(file, *x['path']) files.append((n, x['length'])) make(n) except OSError as e: self.errorfunc("Couldn't allocate dir - " + str(e)) return None self.filename = file self.files = files self.datalength = file_length return file def getFilename(self): return self.filename def _finished(self): self.finflag.set() try: self.storage.set_readonly() except (IOError, OSError) as e: self.errorfunc('trouble setting readonly at end - ' + str(e)) if self.superseedflag.isSet(): self._set_super_seed() self.choker.set_round_robin_period( max( self.config['round_robin_period'], self.config['round_robin_period'] * self.info['piece length'] / 200000)) self.rerequest_complete() self.finfunc() def _data_flunked(self, amount, index): self.ratemeasure_datarejected(amount) if not self.doneflag.isSet(): self.errorfunc('piece {:d} failed hash check, re-downloading it' ''.format(index)) def _failed(self, reason): self.failed = True self.doneflag.set() if reason is not None: self.errorfunc(reason) def initFiles(self, old_style=False, statusfunc=None): if self.doneflag.isSet(): return None if not statusfunc: statusfunc = self.statusfunc disabled_files = None if self.selector_enabled: self.priority = self.config['priority'] if self.priority: try: self.priority = self.priority.split(',') assert len(self.priority) == len(self.files) self.priority = [int(p) for p in self.priority] for p in self.priority: assert p >= -1 assert p <= 2 except: self.errorfunc('bad priority list given, ignored') self.priority = None data = self.appdataobj.getTorrentData(self.infohash) try: d = data['resume data']['priority'] assert len(d) == len(self.files) disabled_files = [x == -1 for x in d] except: try: disabled_files = [x == -1 for x in self.priority] except: pass try: try: self.storage = Storage(self.files, self.info['piece length'], self.doneflag, self.config, disabled_files) except IOError as e: self.errorfunc('trouble accessing files - ' + str(e)) return None if self.doneflag.isSet(): return None self.storagewrapper = StorageWrapper( self.storage, self.config['download_slice_size'], self.pieces, self.info['piece length'], self._finished, self._failed, statusfunc, self.doneflag, self.config['check_hashes'], self._data_flunked, self.rawserver.add_task, self.config, self.unpauseflag) except ValueError as e: self._failed('bad data - ' + str(e)) except IOError as e: self._failed('IOError - ' + str(e)) if self.doneflag.isSet(): return None if self.selector_enabled: self.fileselector = FileSelector( self.files, self.info['piece length'], self.appdataobj.getPieceDir(self.infohash), self.storage, self.storagewrapper, self.rawserver.add_task, self._failed) if data: data = data.get('resume data') if data: self.fileselector.unpickle(data) self.checking = True if old_style: return self.storagewrapper.old_style_init() return self.storagewrapper.initialize def getCachedTorrentData(self): return self.appdataobj.getTorrentData(self.infohash) def _make_upload(self, connection, ratelimiter, totalup): return Upload(connection, ratelimiter, totalup, self.choker, self.storagewrapper, self.picker, self.config) def _kick_peer(self, connection): self.rawserver.add_task(connection.close, 0) def _ban_peer(self, ip): self.encoder_ban(ip) def _received_raw_data(self, x): if self.tcp_ack_fudge: x = int(x * self.tcp_ack_fudge) self.ratelimiter.adjust_sent(x) def _received_data(self, x): self.downmeasure.update_rate(x) self.ratemeasure.data_came_in(x) def _received_http_data(self, x): self.downmeasure.update_rate(x) self.ratemeasure.data_came_in(x) self.downloader.external_data_received(x) def _cancelfunc(self, pieces): self.downloader.cancel_piece_download(pieces) self.httpdownloader.cancel_piece_download(pieces) def _reqmorefunc(self, pieces): self.downloader.requeue_piece_download(pieces) def startEngine(self, ratelimiter=None, statusfunc=None): if self.doneflag.isSet(): return False if not statusfunc: statusfunc = self.statusfunc self.checking = False if not CRYPTO_OK: if self.config['crypto_allowed']: self.errorfunc('warning - crypto library not installed') self.config['crypto_allowed'] = 0 self.config['crypto_only'] = 0 self.config['crypto_stealth'] = 0 for i in xrange(self.len_pieces): if self.storagewrapper.do_I_have(i): self.picker.complete(i) self.upmeasure = Measure(self.config['max_rate_period'], self.config['upload_rate_fudge']) self.downmeasure = Measure(self.config['max_rate_period']) if ratelimiter: self.ratelimiter = ratelimiter else: self.ratelimiter = RateLimiter(self.rawserver.add_task, self.config['upload_unit_size'], self.setConns) self.ratelimiter.set_upload_rate(self.config['max_upload_rate']) self.ratemeasure = RateMeasure() self.ratemeasure_datarejected = self.ratemeasure.data_rejected self.downloader = Downloader( self.storagewrapper, self.picker, self.config['request_backlog'], self.config['max_rate_period'], self.len_pieces, self.config['download_slice_size'], self._received_data, self.config['snub_time'], self.config['auto_kick'], self._kick_peer, self._ban_peer) self.downloader.set_download_rate(self.config['max_download_rate']) self.connecter = Connecter(self._make_upload, self.downloader, self.choker, self.len_pieces, self.upmeasure, self.config, self.ratelimiter, self.rawserver.add_task) self.encoder = Encoder(self.connecter, self.rawserver, self.myid, self.config['max_message_length'], self.rawserver.add_task, self.config['keepalive_interval'], self.infohash, self._received_raw_data, self.config) self.encoder_ban = self.encoder.ban self.httpdownloader = HTTPDownloader( self.storagewrapper, self.picker, self.rawserver, self.finflag, self.errorfunc, self.downloader, self.config['max_rate_period'], self.infohash, self._received_http_data, self.connecter.got_piece) if 'httpseeds' in self.response and not self.finflag.isSet(): for u in self.response['httpseeds']: self.httpdownloader.make_download(u) if self.selector_enabled: self.fileselector.tie_in(self.picker, self._cancelfunc, self._reqmorefunc, self.rerequest_ondownloadmore) if self.priority: self.fileselector.set_priorities_now(self.priority) # erase old data once you've started modifying it self.appdataobj.deleteTorrentData(self.infohash) if self.config['super_seeder']: self.set_super_seed() self.started = True return True def rerequest_complete(self): if self.rerequest: self.rerequest.announce(1) def rerequest_stopped(self): if self.rerequest: self.rerequest.announce(2) def rerequest_lastfailed(self): if self.rerequest: return self.rerequest.last_failed return False def rerequest_ondownloadmore(self): if self.rerequest: self.rerequest.hit() def startRerequester(self, seededfunc=None, force_rapid_update=False): trackerlist = self.response.get('announce-list', [[self.response['announce']]]) self.rerequest = Rerequester( self.port, self.myid, self.infohash, trackerlist, self.config, self.rawserver.add_task, self.rawserver.add_task, self.errorfunc, self.excfunc, self.encoder.start_connections, self.connecter.how_many_connections, self.storagewrapper.get_amount_left, self.upmeasure.get_total, self.downmeasure.get_total, self.upmeasure.get_rate, self.downmeasure.get_rate, self.doneflag, self.unpauseflag, seededfunc, force_rapid_update) self.rerequest.start() def _init_stats(self): self.statistics = Statistics(self.upmeasure, self.downmeasure, self.connecter, self.httpdownloader, self.ratelimiter, self.rerequest_lastfailed, self.filedatflag) if 'files' in self.info: self.statistics.set_dirstats(self.files, self.info['piece length']) if self.config['spew']: self.spewflag.set() def autoStats(self, displayfunc=None): if not displayfunc: displayfunc = self.statusfunc self._init_stats() DownloaderFeedback(self.choker, self.httpdownloader, self.rawserver.add_task, self.upmeasure.get_rate, self.downmeasure.get_rate, self.ratemeasure, self.storagewrapper.get_stats, self.datalength, self.finflag, self.spewflag, self.statistics, displayfunc, self.config['display_interval']) def startStats(self): self._init_stats() d = DownloaderFeedback(self.choker, self.httpdownloader, self.rawserver.add_task, self.upmeasure.get_rate, self.downmeasure.get_rate, self.ratemeasure, self.storagewrapper.get_stats, self.datalength, self.finflag, self.spewflag, self.statistics) return d.gather def getPortHandler(self): return self.encoder def shutdown(self, torrentdata={}): if self.checking or self.started: self.storagewrapper.sync() self.storage.close() self.rerequest_stopped() if self.fileselector and self.started: if not self.failed: self.fileselector.finish() torrentdata['resume data'] = self.fileselector.pickle() try: self.appdataobj.writeTorrentData(self.infohash, torrentdata) except: self.appdataobj.deleteTorrentData(self.infohash) # clear it return not self.failed and not self.excflag.isSet() # if returns false, you may wish to auto-restart the torrent def setUploadRate(self, rate): try: def s(self=self, rate=rate): self.config['max_upload_rate'] = rate self.ratelimiter.set_upload_rate(rate) self.rawserver.add_task(s) except AttributeError: pass def setConns(self, conns, conns2=None): if not conns2: conns2 = conns try: def s(self=self, conns=conns, conns2=conns2): self.config['min_uploads'] = conns self.config['max_uploads'] = conns2 if (conns > 30): self.config['max_initiate'] = conns + 10 self.rawserver.add_task(s) except AttributeError: pass def setDownloadRate(self, rate): try: def s(self=self, rate=rate): self.config['max_download_rate'] = rate self.downloader.set_download_rate(rate) self.rawserver.add_task(s) except AttributeError: pass def startConnection(self, ip, port, id): self.encoder._start_connection((ip, port), id) def _startConnection(self, ipandport, id): self.encoder._start_connection(ipandport, id) def setInitiate(self, initiate): try: def s(self=self, initiate=initiate): self.config['max_initiate'] = initiate self.rawserver.add_task(s) except AttributeError: pass def getConfig(self): return self.config def getDefaults(self): return defaultargs(defaults) def getUsageText(self): return self.argslistheader def reannounce(self, special=None): try: def r(self=self, special=special): if special is None: self.rerequest.announce() else: self.rerequest.announce(specialurl=special) self.rawserver.add_task(r) except AttributeError: pass def getResponse(self): try: return self.response except: return None def Pause(self): if not self.storagewrapper: return False self.unpauseflag.clear() self.rawserver.add_task(self.onPause) return True def onPause(self): self.whenpaused = clock() if not self.downloader: return self.downloader.pause(True) self.encoder.pause(True) self.choker.pause(True) def Unpause(self): self.unpauseflag.set() self.rawserver.add_task(self.onUnpause) def onUnpause(self): if not self.downloader: return self.downloader.pause(False) self.encoder.pause(False) self.choker.pause(False) # rerequest automatically if paused for >60 seconds if self.rerequest and self.whenpaused and \ clock() - self.whenpaused > 60: self.rerequest.announce(3) def set_super_seed(self): try: self.superseedflag.set() def s(self=self): if self.finflag.isSet(): self._set_super_seed() self.rawserver.add_task(s) except AttributeError: pass def _set_super_seed(self): if not self.super_seeding_active: self.super_seeding_active = True self.errorfunc(' ** SUPER-SEED OPERATION ACTIVE **\n ' 'please set Max uploads so each peer gets 6-8 kB/s') def s(self=self): self.downloader.set_super_seed() self.choker.set_super_seed() self.rawserver.add_task(s) # mode started when already finished if self.finflag.isSet(): def r(self=self): # so after kicking everyone off, reannounce self.rerequest.announce(3) self.rawserver.add_task(r) def am_I_finished(self): return self.finflag.isSet() def get_transfer_stats(self): return self.upmeasure.get_total(), self.downmeasure.get_total()
def startEngine(self, ratelimiter=None, statusfunc=None): if self.doneflag.isSet(): return False if not statusfunc: statusfunc = self.statusfunc self.checking = False if not CRYPTO_OK: if self.config['crypto_allowed']: self.errorfunc('warning - crypto library not installed') self.config['crypto_allowed'] = 0 self.config['crypto_only'] = 0 self.config['crypto_stealth'] = 0 for i in xrange(self.len_pieces): if self.storagewrapper.do_I_have(i): self.picker.complete(i) self.upmeasure = Measure(self.config['max_rate_period'], self.config['upload_rate_fudge']) self.downmeasure = Measure(self.config['max_rate_period']) if ratelimiter: self.ratelimiter = ratelimiter else: self.ratelimiter = RateLimiter(self.rawserver.add_task, self.config['upload_unit_size'], self.setConns) self.ratelimiter.set_upload_rate(self.config['max_upload_rate']) self.ratemeasure = RateMeasure() self.ratemeasure_datarejected = self.ratemeasure.data_rejected self.downloader = Downloader( self.storagewrapper, self.picker, self.config['request_backlog'], self.config['max_rate_period'], self.len_pieces, self.config['download_slice_size'], self._received_data, self.config['snub_time'], self.config['auto_kick'], self._kick_peer, self._ban_peer) self.downloader.set_download_rate(self.config['max_download_rate']) self.connecter = Connecter(self._make_upload, self.downloader, self.choker, self.len_pieces, self.upmeasure, self.config, self.ratelimiter, self.rawserver.add_task) self.encoder = Encoder(self.connecter, self.rawserver, self.myid, self.config['max_message_length'], self.rawserver.add_task, self.config['keepalive_interval'], self.infohash, self._received_raw_data, self.config) self.encoder_ban = self.encoder.ban self.httpdownloader = HTTPDownloader( self.storagewrapper, self.picker, self.rawserver, self.finflag, self.errorfunc, self.downloader, self.config['max_rate_period'], self.infohash, self._received_http_data, self.connecter.got_piece) if 'httpseeds' in self.response and not self.finflag.isSet(): for u in self.response['httpseeds']: self.httpdownloader.make_download(u) if self.selector_enabled: self.fileselector.tie_in(self.picker, self._cancelfunc, self._reqmorefunc, self.rerequest_ondownloadmore) if self.priority: self.fileselector.set_priorities_now(self.priority) # erase old data once you've started modifying it self.appdataobj.deleteTorrentData(self.infohash) if self.config['super_seeder']: self.set_super_seed() self.started = True return True
class Upload: def __init__(self, connection, choker, storage, max_slice_length, max_rate_period, fudge): self.connection = connection self.choker = choker self.storage = storage self.max_slice_length = max_slice_length self.max_rate_period = max_rate_period self.choked = True self.interested = False self.buffer = [] self.measure = Measure(max_rate_period, fudge) if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) def got_not_interested(self): if self.interested: self.interested = False del self.buffer[:] self.choker.not_interested(self.connection) def got_interested(self): if not self.interested: self.interested = True self.choker.interested(self.connection) def flushed(self): while len(self.buffer) > 0 and self.connection.is_flushed(): index, begin, length = self.buffer[0] del self.buffer[0] piece = self.storage.get_piece(index, begin, length) if piece is None: self.connection.close() return self.measure.update_rate(len(piece)) self.connection.send_piece(index, begin, piece) def got_request(self, index, begin, length): if not self.interested or length > self.max_slice_length: self.connection.close() return if not self.choked: self.buffer.append((index, begin, length)) self.flushed() def got_cancel(self, index, begin, length): try: self.buffer.remove((index, begin, length)) except ValueError: pass def choke(self): if not self.choked: self.choked = True del self.buffer[:] self.connection.send_choke() def unchoke(self): if self.choked: self.choked = False self.connection.send_unchoke() def is_choked(self): return self.choked def is_interested(self): return self.interested def has_queries(self): return len(self.buffer) > 0 def get_rate(self): return self.measure.get_rate()
class SingleDownload: def __init__(self, downloader, connection): self.downloader = downloader self.connection = connection # Whether the peer is choking this client. self.choked = True # Whether this client is interested in data the peer has. self.interested = False # The (index, begin, length) tuples this client has requested from the peer. self.active_requests = [] # Measures the download rate from the peer. self.measure = Measure(downloader.max_rate_period) # The pieces the peer has. self.have = Bitfield(downloader.numpieces) # The last time this client has gotten data from the peer. self.last = 0 self.example_interest = None def disconnected(self): self.downloader.downloads.remove(self) # Decrement the availability of each piece this peer had. for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.lost_have(i) self._letgo() def _letgo(self): if not self.active_requests: return if self.downloader.storage.is_endgame(): # If in endgame mode, requesting blocks in active_requests from other peers anyway. self.active_requests = [] return # The piece indexes that this client was requesting from the peer. lost = [] for index, begin, length in self.active_requests: # No longer downloading this block. self.downloader.storage.request_lost(index, begin, length) if index not in lost: lost.append(index) self.active_requests = [] # Get all other SingleDownload instances that are not choking us. ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more(lost) for d in self.downloader.downloads: # Get all other SingleDownload instances that are choking us. None of these were in ds. # Also, filter by the ones that we are not interested in. if d.choked and not d.interested: for l in lost: if d.have[l] and self.downloader.storage.do_I_have_requests( l): # This other peer has a piece that the client was downloading from this peer, # so become interested. d.interested = True d.connection.send_interested() break def got_choke(self): if not self.choked: # The peer choked this client. self.choked = True self._letgo() def got_unchoke(self): if self.choked: # The peer unchoked this client. self.choked = False if self.interested: # This peer has data we want, so request it. self._request_more() def is_choked(self): return self.choked def is_interested(self): return self.interested def got_piece(self, index, begin, piece): try: # This active request to the peer has been fulfilled. self.active_requests.remove((index, begin, len(piece))) except ValueError: return False if self.downloader.storage.is_endgame(): # Remove this from the consolidated list of blocks sent to all peers. self.downloader.all_requests.remove((index, begin, len(piece))) # Update our upload and download rates. self.last = time() self.measure.update_rate(len(piece)) self.downloader.measurefunc(len(piece)) self.downloader.downmeasure.update_rate(len(piece)) # TODO if not self.downloader.storage.piece_came_in(index, begin, piece): # This block completed a piece but it failed validation. if self.downloader.storage.is_endgame(): while self.downloader.storage.do_I_have_requests(index): nb, nl = self.downloader.storage.new_request(index) self.downloader.all_requests.append((index, nb, nl)) for d in self.downloader.downloads: d.fix_download_endgame() return False # Decrease the priority of this piece... self.downloader.picker.bump(index) # ... but try downloading this piece again immediately? ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more([index]) return False if self.downloader.storage.do_I_have(index): # Notify the picker that this piece is complete. self.downloader.picker.complete(index) if self.downloader.storage.is_endgame(): for d in self.downloader.downloads: if d is not self and d.interested: if d.choked: # Keep requesting pieces that we're requesting from other peers. d.fix_download_endgame() else: # Cancel the request for this block from this peer. try: d.active_requests.remove( (index, begin, len(piece))) except ValueError: # Wasn't requesting this block from this peer. continue d.connection.send_cancel(index, begin, len(piece)) # Keep requesting pieces that we're requesting from other peers. d.fix_download_endgame() self._request_more() if self.downloader.picker.am_I_complete(): for d in [ i for i in self.downloader.downloads if i.have.numfalse == 0 ]: d.connection.close() return self.downloader.storage.do_I_have(index) def _want(self, index): # Want a piece if this user has it and TODO. return self.have[index] and self.downloader.storage.do_I_have_requests( index) def _request_more(self, indices=None): assert not self.choked # Return if we already have the maximum outstanding requests to this peer. if len(self.active_requests) == self.downloader.backlog: return if self.downloader.storage.is_endgame(): # Keep requesting pieces that we're requesting from other peers. self.fix_download_endgame() return lost_interests = [] while len(self.active_requests) < self.downloader.backlog: # Have less than the maximum outstanding requests to this peer... if indices is None: # Not passed any specific indexes to get. Pick a piece to download. interest = self.downloader.picker.next(self._want, self.have.numfalse == 0) else: # Pick a piece from one of the given indexes to download. interest = None for i in indices: if self.have[ i] and self.downloader.storage.do_I_have_requests( i): interest = i break if interest is None: # Could not find anything we want, so break. break if not self.interested: # Found a piece we want; tell the peer that this client is interested. self.interested = True self.connection.send_interested() self.example_interest = interest # Get a block of the piece to request. begin, length = self.downloader.storage.new_request(interest) # Notify the PiecePicker that we're requesting this piece. self.downloader.picker.requested(interest, self.have.numfalse == 0) # Append to the list of all requests, and actually request it. self.active_requests.append((interest, begin, length)) self.connection.send_request(interest, begin, length) if not self.downloader.storage.do_I_have_requests(interest): # TODO lost_interests.append(interest) if not self.active_requests and self.interested: # Peer has no pieces this client wants, so no longer interested. self.interested = False self.connection.send_not_interested() if lost_interests: for d in self.downloader.downloads: if d.active_requests or not d.interested: # Looking for a client that has no active requests, but we're interested in. continue if d.example_interest is not None and self.downloader.storage.do_I_have_requests( d.example_interest): continue for lost in lost_interests: if d.have[lost]: break else: continue # interest = self.downloader.picker.next(d._want, d.have.numfalse == 0) if interest is None: d.interested = False d.connection.send_not_interested() else: d.example_interest = interest if self.downloader.storage.is_endgame(): # Now entering endgame mode. self.downloader.all_requests = [] # Consolidate the block requests this client has sent to all peers. for d in self.downloader.downloads: self.downloader.all_requests.extend(d.active_requests) # Request from each peer pieces that we're requesting from other peers. for d in self.downloader.downloads: d.fix_download_endgame() def fix_download_endgame(self): # Find pieces this peer has which we're requesting from other peers. want = [ a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests ] if self.interested and not self.active_requests and not want: # There are no such pieces, and we're not requesting any others, so become uninterested. self.interested = False self.connection.send_not_interested() return if not self.interested and want: # There are such pieces, so become interested. self.interested = True self.connection.send_interested() if self.choked: # Peer is choking us, so we can't send any requests yet. return # Don't send exceed the maximum number of requests to the client. shuffle(want) del want[self.downloader.backlog - len(self.active_requests):] # Request the blocks that we're requesting from other peers. self.active_requests.extend(want) for piece, begin, length in want: self.connection.send_request(piece, begin, length) def got_have(self, index): if self.have[index]: # Already knew that the client has this piece. return self.have[index] = True # Increase the availability of this piece. self.downloader.picker.got_have(index) if self.downloader.picker.am_I_complete() and self.have.numfalse == 0: # Both this client and the peer have every piece, so close. self.connection.close() return if self.downloader.storage.is_endgame(): # Keep requesting pieces that we're requesting from other peers. self.fix_download_endgame() elif self.downloader.storage.do_I_have_requests(index): if not self.choked: self._request_more([index]) else: # The peer is choking us, but express that this client is now interested. if not self.interested: self.interested = True self.connection.send_interested() def got_have_bitfield(self, have): # Assign the full bitfield of pieces this client has. self.have = have for i in xrange(len(self.have)): # Increase the availability of each piece. if self.have[i]: self.downloader.picker.got_have(i) if self.downloader.picker.am_I_complete() and self.have.numfalse == 0: # Both this client and the peer have every piece, so close. self.connection.close() return if self.downloader.storage.is_endgame(): for piece, begin, length in self.downloader.all_requests: # Endgame, so want to send a request for any missing blocks to this peer. if self.have[piece]: self.interested = True self.connection.send_interested() return for i in xrange(len(self.have)): # This peer has a piece that we want, so express interest. if self.have[i] and self.downloader.storage.do_I_have_requests(i): self.interested = True self.connection.send_interested() return def get_rate(self): return self.measure.get_rate() def is_snubbed(self): # Whether significant time has gone by without getting data from a peer. return time() - self.last > self.downloader.snub_time
class RateLimiter: def __init__(self, sched, unitsize, slotsfunc=lambda x: None): self.sched = sched self.last = None self.unitsize = unitsize self.slotsfunc = slotsfunc self.measure = Measure(MAX_RATE_PERIOD) self.autoadjust = False self.upload_rate = MAX_RATE * 1000 self.slots = SLOTS_STARTING # garbage if not automatic def set_upload_rate(self, rate): # rate = -1 # test automatic if rate < 0: if self.autoadjust: return self.autoadjust = True self.autoadjustup = 0 self.pings = [] rate = MAX_RATE self.slots = SLOTS_STARTING self.slotsfunc(self.slots) else: self.autoadjust = False if not rate: rate = MAX_RATE self.upload_rate = rate * 1000 self.lasttime = clock() self.bytes_sent = 0 def queue(self, conn): assert conn.next_upload is None if self.last is None: self.last = conn conn.next_upload = conn self.try_send(True) else: conn.next_upload = self.last.next_upload self.last.next_upload = conn # 2fastbt_ if not conn.connection.is_coordinator_con(): self.last = conn # _2fastbt def try_send(self, check_time=False): t = clock() self.bytes_sent -= (t - self.lasttime) * self.upload_rate #print 'try_send: bytes_sent: %s' % self.bytes_sent self.lasttime = t if check_time: self.bytes_sent = max(self.bytes_sent, 0) cur = self.last.next_upload while self.bytes_sent <= 0: bytes = cur.send_partial(self.unitsize) self.bytes_sent += bytes self.measure.update_rate(bytes) if bytes == 0 or cur.backlogged(): if self.last is cur: self.last = None cur.next_upload = None break else: self.last.next_upload = cur.next_upload cur.next_upload = None cur = self.last.next_upload else: # 2fastbt_ if not cur.connection.is_coordinator_con( ) or not cur.upload.buffer: # _2fastbt self.last = cur cur = cur.next_upload # 2fastbt_ else: pass # _2fastbt else: self.sched(self.try_send, self.bytes_sent / self.upload_rate) def adjust_sent(self, bytes): self.bytes_sent = min(self.bytes_sent + bytes, self.upload_rate * 3) self.measure.update_rate(bytes) def ping(self, delay): ##raise Exception('Is this called?') if DEBUG: print delay if not self.autoadjust: return self.pings.append(delay > PING_BOUNDARY) if len(self.pings) < PING_SAMPLES + PING_DISCARDS: return if DEBUG: print 'cycle' pings = sum(self.pings[PING_DISCARDS:]) del self.pings[:] if pings >= PING_THRESHHOLD: # assume flooded if self.upload_rate == MAX_RATE: self.upload_rate = self.measure.get_rate() * ADJUST_DOWN else: self.upload_rate = min(self.upload_rate, self.measure.get_rate() * 1.1) self.upload_rate = max(int(self.upload_rate * ADJUST_DOWN), 2) self.slots = int(sqrt(self.upload_rate * SLOTS_FACTOR)) self.slotsfunc(self.slots) if DEBUG: print 'adjust down to ' + str(self.upload_rate) self.lasttime = clock() self.bytes_sent = 0 self.autoadjustup = UP_DELAY_FIRST else: # not flooded if self.upload_rate == MAX_RATE: return self.autoadjustup -= 1 if self.autoadjustup: return self.upload_rate = int(self.upload_rate * ADJUST_UP) self.slots = int(sqrt(self.upload_rate * SLOTS_FACTOR)) self.slotsfunc(self.slots) if DEBUG: print 'adjust up to ' + str(self.upload_rate) self.lasttime = clock() self.bytes_sent = 0 self.autoadjustup = UP_DELAY_NEXT
class RateLimiter: def __init__(self, sched, unitsize, slotsfunc=lambda x: None): self.sched = sched self.last = None self.unitsize = unitsize self.slotsfunc = slotsfunc self.measure = Measure(MAX_RATE_PERIOD) self.autoadjust = False self.upload_rate = MAX_RATE * 1000 self.slots = SLOTS_STARTING def set_upload_rate(self, rate): if DEBUG: print >> sys.stderr, 'RateLimiter: set_upload_rate', rate if rate < 0: if self.autoadjust: return self.autoadjust = True self.autoadjustup = 0 self.pings = [] rate = MAX_RATE self.slots = SLOTS_STARTING self.slotsfunc(self.slots) else: self.autoadjust = False if not rate: rate = MAX_RATE self.upload_rate = rate * 1000 self.lasttime = clock() self.bytes_sent = 0 def queue(self, conn): if DEBUG: print >> sys.stderr, 'RateLimiter: queue', conn if self.last is None: self.last = conn conn.next_upload = conn self.try_send(True) else: conn.next_upload = self.last.next_upload self.last.next_upload = conn if not conn.connection.is_coordinator_con(): self.last = conn def try_send(self, check_time=False): if DEBUG: print >> sys.stderr, 'RateLimiter: try_send' t = clock() self.bytes_sent -= (t - self.lasttime) * self.upload_rate self.lasttime = t if check_time: self.bytes_sent = max(self.bytes_sent, 0) cur = self.last.next_upload while self.bytes_sent <= 0: bytes = cur.send_partial(self.unitsize) self.bytes_sent += bytes self.measure.update_rate(bytes) if bytes == 0 or cur.backlogged(): if self.last is cur: self.last = None cur.next_upload = None break else: self.last.next_upload = cur.next_upload cur.next_upload = None cur = self.last.next_upload elif not cur.connection.is_coordinator_con( ) or not cur.upload.buffer: self.last = cur cur = cur.next_upload else: delay = min(5.0, self.bytes_sent / self.upload_rate) self.sched(self.try_send, delay) def adjust_sent(self, bytes): self.bytes_sent = min(self.bytes_sent + bytes, self.upload_rate * 3) self.measure.update_rate(bytes) def ping(self, delay): if DEBUG: print >> sys.stderr, delay if not self.autoadjust: return self.pings.append(delay > PING_BOUNDARY) if len(self.pings) < PING_SAMPLES + PING_DISCARDS: return if DEBUG: print >> sys.stderr, 'RateLimiter: cycle' pings = sum(self.pings[PING_DISCARDS:]) del self.pings[:] if pings >= PING_THRESHHOLD: if self.upload_rate == MAX_RATE: self.upload_rate = self.measure.get_rate() * ADJUST_DOWN else: self.upload_rate = min(self.upload_rate, self.measure.get_rate() * 1.1) self.upload_rate = max(int(self.upload_rate * ADJUST_DOWN), 2) self.slots = int(sqrt(self.upload_rate * SLOTS_FACTOR)) self.slotsfunc(self.slots) if DEBUG: print >> sys.stderr, 'RateLimiter: adjust down to ' + str( self.upload_rate) self.lasttime = clock() self.bytes_sent = 0 self.autoadjustup = UP_DELAY_FIRST else: if self.upload_rate == MAX_RATE: return self.autoadjustup -= 1 if self.autoadjustup: return self.upload_rate = int(self.upload_rate * ADJUST_UP) self.slots = int(sqrt(self.upload_rate * SLOTS_FACTOR)) self.slotsfunc(self.slots) if DEBUG: print >> sys.stderr, 'RateLimiter: adjust up to ' + str( self.upload_rate) self.lasttime = clock() self.bytes_sent = 0 self.autoadjustup = UP_DELAY_NEXT
class SingleDownload: def __init__(self, downloader, connection): self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.max_rate_period) self.have = Bitfield(downloader.numpieces) self.last = 0 self.example_interest = None def disconnected(self): self.downloader.downloads.remove(self) for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.lost_have(i) self._letgo() def _letgo(self): if not self.active_requests: return if self.downloader.storage.is_endgame(): self.active_requests = [] return lost = [] for index, begin, length in self.active_requests: self.downloader.storage.request_lost(index, begin, length) if index not in lost: lost.append(index) self.active_requests = [] ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more(lost) for d in self.downloader.downloads: if d.choked and not d.interested: for l in lost: if d.have[l] and self.downloader.storage.do_I_have_requests( l): d.interested = True d.connection.send_interested() break def got_choke(self): if not self.choked: self.choked = True self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more() def is_choked(self): return self.choked def is_interested(self): return self.interested def got_piece(self, index, begin, piece): try: self.active_requests.remove((index, begin, len(piece))) except ValueError: return False if self.downloader.storage.is_endgame(): self.downloader.all_requests.remove((index, begin, len(piece))) self.last = time() self.measure.update_rate(len(piece)) self.downloader.measurefunc(len(piece)) self.downloader.downmeasure.update_rate(len(piece)) if not self.downloader.storage.piece_came_in(index, begin, piece): if self.downloader.storage.is_endgame(): while self.downloader.storage.do_I_have_requests(index): nb, nl = self.downloader.storage.new_request(index) self.downloader.all_requests.append((index, nb, nl)) for d in self.downloader.downloads: d.fix_download_endgame() return False self.downloader.picker.bump(index) ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more([index]) return False if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.storage.is_endgame(): for d in self.downloader.downloads: if d is not self and d.interested: if d.choked: d.fix_download_endgame() else: try: d.active_requests.remove( (index, begin, len(piece))) except ValueError: continue d.connection.send_cancel(index, begin, len(piece)) d.fix_download_endgame() self._request_more() if self.downloader.picker.am_I_complete(): for d in [ i for i in self.downloader.downloads if i.have.numfalse == 0 ]: d.connection.close() return self.downloader.storage.do_I_have(index) def _want(self, index): return self.have[index] and self.downloader.storage.do_I_have_requests( index) def _request_more(self, indices=None): assert not self.choked if len(self.active_requests) == self.downloader.backlog: return if self.downloader.storage.is_endgame(): self.fix_download_endgame() return lost_interests = [] while len(self.active_requests) < self.downloader.backlog: if indices is None: interest = self.downloader.picker.next(self._want, self.have.numfalse == 0) else: interest = None for i in indices: if self.have[ i] and self.downloader.storage.do_I_have_requests( i): interest = i break if interest is None: break if not self.interested: self.interested = True self.connection.send_interested() self.example_interest = interest begin, length = self.downloader.storage.new_request(interest) self.downloader.picker.requested(interest, self.have.numfalse == 0) self.active_requests.append((interest, begin, length)) self.connection.send_request(interest, begin, length) if not self.downloader.storage.do_I_have_requests(interest): lost_interests.append(interest) if not self.active_requests and self.interested: self.interested = False self.connection.send_not_interested() if lost_interests: for d in self.downloader.downloads: if d.active_requests or not d.interested: continue if d.example_interest is not None and self.downloader.storage.do_I_have_requests( d.example_interest): continue for lost in lost_interests: if d.have[lost]: break else: continue interest = self.downloader.picker.next(d._want, d.have.numfalse == 0) if interest is None: d.interested = False d.connection.send_not_interested() else: d.example_interest = interest if self.downloader.storage.is_endgame(): self.downloader.all_requests = [] for d in self.downloader.downloads: self.downloader.all_requests.extend(d.active_requests) for d in self.downloader.downloads: d.fix_download_endgame() def fix_download_endgame(self): want = [ a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests ] if self.interested and not self.active_requests and not want: self.interested = False self.connection.send_not_interested() return if not self.interested and want: self.interested = True self.connection.send_interested() if self.choked: return shuffle(want) del want[self.downloader.backlog - len(self.active_requests):] self.active_requests.extend(want) for piece, begin, length in want: self.connection.send_request(piece, begin, length) def got_have(self, index): if self.have[index]: return self.have[index] = True self.downloader.picker.got_have(index) if self.downloader.picker.am_I_complete() and self.have.numfalse == 0: self.connection.close() return if self.downloader.storage.is_endgame(): self.fix_download_endgame() elif self.downloader.storage.do_I_have_requests(index): if not self.choked: self._request_more([index]) else: if not self.interested: self.interested = True self.connection.send_interested() def got_have_bitfield(self, have): self.have = have for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.got_have(i) if self.downloader.picker.am_I_complete() and self.have.numfalse == 0: self.connection.close() return if self.downloader.storage.is_endgame(): for piece, begin, length in self.downloader.all_requests: if self.have[piece]: self.interested = True self.connection.send_interested() return for i in xrange(len(self.have)): if self.have[i] and self.downloader.storage.do_I_have_requests(i): self.interested = True self.connection.send_interested() return def get_rate(self): return self.measure.get_rate() def is_snubbed(self): return time() - self.last > self.downloader.snub_time
class Upload: def __init__(self, connection, choker, storage, max_slice_length, max_rate_period, fudge): self.connection = connection self.choker = choker self.storage = storage self.max_slice_length = max_slice_length self.max_rate_period = max_rate_period # Whether this client is choking the peer. self.choked = True # Whether the peer is interested in data on this client. self.interested = False # (index, begin, length) tuples the peer has requested from this client. self.buffer = [] self.measure = Measure(max_rate_period, fudge) # Send our bitfield to the peer if we have any pieces. if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) def got_not_interested(self): # The peer isn't interested in our data. if self.interested: self.interested = False # Don't proceed to send data that was previously requested. del self.buffer[:] self.choker.not_interested(self.connection) def got_interested(self): if not self.interested: self.interested = True self.choker.interested(self.connection) def flushed(self): # Send each requested (index, begin, length) part until there is backpressure. while len(self.buffer) > 0 and self.connection.is_flushed(): index, begin, length = self.buffer[0] del self.buffer[0] piece = self.storage.get_piece(index, begin, length) if piece is None: # The peer requested a bad piece, so we're done. self.connection.close() return self.measure.update_rate(len(piece)) self.connection.send_piece(index, begin, piece) def got_request(self, index, begin, length): if not self.interested or length > self.max_slice_length: # The peer requested data without sending an interested message first. # Or the peer is requesting too much data. self.connection.close() return if not self.choked: # We're not choking this peer, so enqueue and then try to fulfill the request. self.buffer.append((index, begin, length)) self.flushed() def got_cancel(self, index, begin, length): try: # The peer canceled a request. self.buffer.remove((index, begin, length)) except ValueError: pass def choke(self): if not self.choked: # This client is now choking the peer, so cancel all outstanding requests. self.choked = True del self.buffer[:] # Notify the peer that it has been choked by this client. self.connection.send_choke() def unchoke(self): if self.choked: # This client is no longer choking the peer. self.choked = False # Notify the peer that it is no longer choked by this client. self.connection.send_unchoke() def is_choked(self): return self.choked def is_interested(self): return self.interested def has_queries(self): return len(self.buffer) > 0 def get_rate(self): return self.measure.get_rate()