class KRateLimiter: # special rate limiter that drops entries that have been sitting in the queue for longer than self.age seconds # by default we toss anything that has less than 5 seconds to live def __init__(self, transport, rate, call_later, rlcount, rate_period, age=(KRPC_TIMEOUT - 5)): self.q = [] self.transport = transport self.rate = rate self.curr = 0 self.running = False self.age = age self.last = 0 self.call_later = call_later self.rlcount = rlcount self.measure = Measure(rate_period) self.sent=self.dropped=0 if self.rate == 0: self.rate = 1e10 def sendto(self, s, i, addr): self.q.append((time(), (s, i, addr))) if not self.running: self.run(check=True) def run(self, check=False): t = time() self.expire(t) self.curr -= (t - self.last) * self.rate self.last = t if check: self.curr = max(self.curr, 0 - self.rate) shuffle(self.q) while self.q and self.curr <= 0: x, tup = self.q.pop() size = len(tup[0]) self.curr += size try: self.transport.sendto(*tup) self.sent+=1 self.rlcount(size) self.measure.update_rate(size) except: if tup[2][1] != 0: print ">>> sendto exception", tup print_exc() self.q.sort() if self.q or self.curr > 0: self.running = True # sleep for at least a half second self.call_later(max(self.curr / self.rate, 0.5), self.run) else: self.running = False def expire(self, t=time()): if self.q: expire_time = t - self.age while self.q and self.q[0][0] < expire_time: self.q.pop(0) self.dropped+=1
class SingleDownload(object): def __init__(self, downloader, connection): self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.config['max_rate_period']) self.peermeasure = Measure(max(downloader.storage.piece_size / 10000, 20)) self.have = Bitfield(downloader.numpieces) self.last = 0 self.example_interest = None self.backlog = 2 self.guard = BadDataGuard(self) def _backlog(self): backlog = 2 + int(4 * self.measure.get_rate() / self.downloader.chunksize) if backlog > 50: backlog = max(50, int(.075 * backlog)) self.backlog = backlog return backlog def disconnected(self): self.downloader.lost_peer(self) for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.lost_have(i) self._letgo() self.guard.download = None def _letgo(self): if not self.active_requests: return if self.downloader.storage.endgame: self.active_requests = [] return lost = [] for index, begin, length in self.active_requests: self.downloader.storage.request_lost(index, begin, length) if index not in lost: lost.append(index) self.active_requests = [] ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more(lost) for d in self.downloader.downloads: if d.choked and not d.interested: for l in lost: if d.have[l] and self.downloader.storage.do_I_have_requests(l): d.interested = True d.connection.send_interested() break def got_choke(self): if not self.choked: self.choked = True self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more() def got_piece(self, index, begin, piece): try: self.active_requests.remove((index, begin, len(piece))) except ValueError: self.downloader.discarded_bytes += len(piece) return False if self.downloader.storage.endgame: self.downloader.all_requests.remove((index, begin, len(piece))) self.last = bttime() self.measure.update_rate(len(piece)) self.downloader.measurefunc(len(piece)) self.downloader.downmeasure.update_rate(len(piece)) if not self.downloader.storage.piece_came_in(index, begin, piece, self.guard): if self.downloader.storage.endgame: while self.downloader.storage.do_I_have_requests(index): nb, nl = self.downloader.storage.new_request(index) self.downloader.all_requests.append((index, nb, nl)) for d in self.downloader.downloads: d.fix_download_endgame() return False ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more([index]) return False if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.storage.endgame: for d in self.downloader.downloads: if d is not self and d.interested: if d.choked: d.fix_download_endgame() else: try: d.active_requests.remove((index, begin, len(piece))) except ValueError: continue d.connection.send_cancel(index, begin, len(piece)) d.fix_download_endgame() self._request_more() if self.downloader.picker.am_I_complete(): for d in [i for i in self.downloader.downloads if i.have.numfalse == 0]: d.connection.close() return self.downloader.storage.do_I_have(index) def _want(self, index): return self.have[index] and self.downloader.storage.do_I_have_requests(index) def _request_more(self, indices = None): assert not self.choked if len(self.active_requests) >= self._backlog(): return if self.downloader.storage.endgame: self.fix_download_endgame() return lost_interests = [] while len(self.active_requests) < self.backlog: if indices is None: interest = self.downloader.picker.next(self._want, self.have.numfalse == 0) else: interest = None for i in indices: if self.have[i] and self.downloader.storage.do_I_have_requests(i): interest = i break if interest is None: break if not self.interested: self.interested = True self.connection.send_interested() self.example_interest = interest self.downloader.picker.requested(interest, self.have.numfalse == 0) while len(self.active_requests) < (self.backlog-2) * 5 + 2: begin, length = self.downloader.storage.new_request(interest) self.active_requests.append((interest, begin, length)) self.connection.send_request(interest, begin, length) if not self.downloader.storage.do_I_have_requests(interest): lost_interests.append(interest) break if not self.active_requests and self.interested: self.interested = False self.connection.send_not_interested() if lost_interests: for d in self.downloader.downloads: if d.active_requests or not d.interested: continue if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest): continue for lost in lost_interests: if d.have[lost]: break else: continue interest = self.downloader.picker.next(d._want, d.have.numfalse == 0) if interest is None: d.interested = False d.connection.send_not_interested() else: d.example_interest = interest if self.downloader.storage.endgame: self.downloader.all_requests = [] for d in self.downloader.downloads: self.downloader.all_requests.extend(d.active_requests) for d in self.downloader.downloads: d.fix_download_endgame() def fix_download_endgame(self): want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests] if self.interested and not self.active_requests and not want: self.interested = False self.connection.send_not_interested() return if not self.interested and want: self.interested = True self.connection.send_interested() if self.choked or len(self.active_requests) >= self._backlog(): return shuffle(want) del want[self.backlog - len(self.active_requests):] self.active_requests.extend(want) for piece, begin, length in want: self.connection.send_request(piece, begin, length) def got_have(self, index): if self.have[index]: return if index == self.downloader.numpieces-1: self.peermeasure.update_rate(self.downloader.storage.total_length- (self.downloader.numpieces-1)*self.downloader.storage.piece_size) else: self.peermeasure.update_rate(self.downloader.storage.piece_size) self.have[index] = True self.downloader.picker.got_have(index) if self.downloader.picker.am_I_complete() and self.have.numfalse == 0: self.connection.close() return if self.downloader.storage.endgame: self.fix_download_endgame() elif self.downloader.storage.do_I_have_requests(index): if not self.choked: self._request_more([index]) else: if not self.interested: self.interested = True self.connection.send_interested() def got_have_bitfield(self, have): if self.downloader.picker.am_I_complete() and have.numfalse == 0: self.connection.close() return self.have = have for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.got_have(i) if self.downloader.storage.endgame: for piece, begin, length in self.downloader.all_requests: if self.have[piece]: self.interested = True self.connection.send_interested() return for i in xrange(len(self.have)): if self.have[i] and self.downloader.storage.do_I_have_requests(i): self.interested = True self.connection.send_interested() return def get_rate(self): return self.measure.get_rate() def is_snubbed(self): return bttime() - self.last > self.downloader.snub_time
class KRateLimiter: # special rate limiter that drops entries that have been sitting in the queue for longer than self.age seconds # by default we toss anything that has less than 5 seconds to live def __init__(self, transport, rate, call_later, rlcount, rate_period, age=(KRPC_TIMEOUT - 5)): self.q = [] self.transport = transport self.rate = rate self.curr = 0 self.running = False self.age = age self.last = 0 self.call_later = call_later self.rlcount = rlcount self.measure = Measure(rate_period) self.sent = self.dropped = 0 if self.rate == 0: self.rate = 1e10 def sendto(self, s, i, addr): self.q.append((time(), (s, i, addr))) if not self.running: self.run(check=True) def run(self, check=False): t = time() self.expire(t) self.curr -= (t - self.last) * self.rate self.last = t if check: self.curr = max(self.curr, 0 - self.rate) shuffle(self.q) while self.q and self.curr <= 0: x, tup = self.q.pop() size = len(tup[0]) self.curr += size try: self.transport.sendto(*tup) self.sent += 1 self.rlcount(size) self.measure.update_rate(size) except: if tup[2][1] != 0: print ">>> sendto exception", tup print_exc() self.q.sort() if self.q or self.curr > 0: self.running = True # sleep for at least a half second self.call_later(max(self.curr / self.rate, 0.5), self.run) else: self.running = False def expire(self, t=time()): if self.q: expire_time = t - self.age while self.q and self.q[0][0] < expire_time: self.q.pop(0) self.dropped += 1
class Download(object): """Implements BitTorrent protocol semantics for downloading over a single connection. See Upload for the protocol semantics in the upload direction. See Connector for the protocol syntax implementation.""" def __init__(self, multidownload, connector): self.multidownload = multidownload self.connector = connector self.choked = True self.interested = False self.prefer_full = False self.active_requests = set() self.expecting_reject = set() self.intro_size = self.multidownload.chunksize * 4 # just a guess self.measure = Measure(multidownload.config['max_rate_period']) self.peermeasure = Measure( max(multidownload.storage.piece_size / 10000, 20)) self.have = Bitfield(multidownload.numpieces) self.last = 0 self.example_interest = None self.guard = BadDataGuard(self) self.suggested_pieces = [] self.allowed_fast_pieces = [] self._useful_received_listeners = set() self._raw_received_listeners = set() self.add_useful_received_listener(self.measure.update_rate) self.total_bytes = 0 self.add_useful_received_listener(self.accumulate_total) self.payment_key_status ={} #hash with retries of sending payment key: key=(idx,offset,len) val ("statustring",retries) #todo move to multidownload, and later to seperate package self.peer_certificate = None self.own_certificate_is_sent_to_peer = False def accumulate_total(self, x): self.total_bytes += x def add_useful_received_listener(self, listener): # "useful received bytes are used in measuring goodput. self._useful_received_listeners.add(listener) def remove_useful_received_listener(self, listener): self._useful_received_listeners.remove(listener) def fire_useful_received_listeners(self, bytes): for f in self._useful_received_listeners: f(bytes) def add_raw_received_listener(self, listener): self._raw_received_listeners.add(listener) def remove_raw_received_listener(self, listener): self._raw_received_listeners.remove(listener) def fire_raw_received_listeners(self, bytes): for f in self._raw_received_listeners: f(bytes) def _backlog(self): # Dave's suggestion: # backlog = 2 + thruput delay product in chunks. # Assume one-way download propagation delay is always less than 200ms. # backlog = 2 + int(0.2 * self.measure.get_rate() / # self.multidownload.chunksize # Then eliminate the cap of 50 and the 0.075*backlog. backlog = 2 + int(4 * self.measure.get_rate() / self.multidownload.chunksize) if self.total_bytes < self.intro_size: # optimistic backlog to get things started backlog = max(10, backlog) if backlog > 50: backlog = max(50, int(.075 * backlog)) if self.multidownload.rm.endgame: # OPTIONAL: zero pipelining during endgame #b = 1 pass return backlog def disconnected(self): self.multidownload.lost_peer(self) if self.have.numfalse == 0: self.multidownload.lost_have_all() else: # arg, slow count = 0 target = len(self.have) - self.have.numfalse for i in xrange(len(self.have)): if count == target: break if self.have[i]: self.multidownload.lost_have(i) count += 1 self._letgo() self.guard.download = None def _letgo(self): if not self.active_requests: return if self.multidownload.rm.endgame: self.active_requests.clear() return lost = [] for index, begin, length in self.active_requests: self.multidownload.rm.request_lost(index, begin, length) self.multidownload.active_requests_remove(index) if index not in lost: lost.append(index) self.active_requests.clear() ds = [d for d in self.multidownload.downloads if not d.choked] random.shuffle(ds) for d in ds: d._request_more(lost) for d in self.multidownload.downloads: if d.choked and not d.interested: for l in lost: if d._want(l): d.interested = True d.connector.send_interested() break def got_choke(self): if not self.choked: self.choked = True if not self.connector.uses_fast_extension: self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more() def got_cert(self, cert): """ checks certificate and stores it into an object from which the public public key can be read @param cert: a DER encoded X509 certificate string @return: the certificate object or None in case of an invalid certificate """ if self.peer_certificate and not cert: return self.peer_certificate print "before parsing der certficate in got_cert length= %d" % len(cert) peer_certificate = self.multidownload.pk_tools.parseDERCert_tls(cert) print "after parsing cert = %s" % str(peer_certificate) if peer_certificate == self.peer_certificate and self.peer_certificate is not None : #maybe also check if not none print "peer certificate already present so skipping check lencer = %d == len self.cert %d" % (len(peer_certificate),len(self.peer_certificate)) return peer_certificate else: cert_ok = self.multidownload.pk_tools.validate_in_mem_certificate(peer_certificate) if cert_ok: print "peer certificate validated" self.peer_certificate = peer_certificate return peer_certificate else: print "peer certificate NOT OK!!!" return None def got_mp_piece(self, index, begin, piece, sig): """ check if the certificate and the signature of the received piece message are OK if they are OK process piece with got_piece, otherwise send a empty keyreward message @param index: index of the piece (piece number), a piece is a block of which the has is in the meta file @param begin: offset of the subpiece is the piece, this is currently always zero, this enables to account a single piece hashcheck failing to single peer. @param piece: The piece itself @param sig: the signature of the piece , created by encrypting a zero padded sha-1 hash. @return: nothing """ if self.peer_certificate and self.peer_certificate.publicKey: sig_ok = self.multidownload.pk_tools.check_sha_signature_tls(self.peer_certificate.publicKey,sig,piece) print "after checking signature sig_OK is %d" % sig_ok if sig_ok: print "signature ok in got_mp_piece" self.got_piece(index,begin,piece) return else: print "signature not OK in got_mp_piece" #TODO maybe also increment retries else: print "no peer certificate or certificate.publickey" print "cert= %s\n\n\n publicKey= %s" % (str(self.peer_certificate),str(self.peer_certificate.publicKey)) self.send_key_reward(index,begin,len(piece),False) def got_piece(self, index, begin, piece): req = (index, begin, len(piece)) if req not in self.active_requests: self.multidownload.discarded_bytes += len(piece) if self.connector.uses_fast_extension: # getting a piece we sent a cancel for # is just like receiving a reject self.got_reject_request(*req) return self.active_requests.remove(req) # we still give the peer credit in endgame, since we did request # the piece (it was in active_requests) self.fire_useful_received_listeners(len(piece)) if self.multidownload.rm.endgame: if req not in self.multidownload.all_requests: self.multidownload.discarded_bytes += len(piece) return self.multidownload.all_requests.remove(req) for d in self.multidownload.downloads: if d.interested: if not d.choked and req in d.active_requests: d.connector.send_cancel(*req) d.active_requests.remove(req) if d.connector.uses_fast_extension: d.expecting_reject.add(req) d.fix_download_endgame() else: self._request_more() self.last = bttime() df = self.multidownload.storage.write(index, begin, piece, self.guard) df.addCallback(self._got_piece, index) df.addErrback(self.multidownload.errorfunc) def _got_piece(self, hashchecked, index): if hashchecked: self.multidownload.hashchecked(index) def _want(self, index): return (self.have[index] and self.multidownload.rm.want_requests(index)) def send_request(self, index, begin, length): piece_size = self.multidownload.storage.piece_size if begin + length > piece_size: raise ValueError("Issuing request that exceeds piece size: " "(%d + %d == %d) > %d" % (begin, length, begin + length, piece_size)) self.multidownload.active_requests_add(index) self.active_requests.add((index, begin, length)) if self.multidownload.micropayments: msg_to_sign = pack("!iii",index,begin,length) sig = self.multidownload.pk_tools.get_signature_tls(self.multidownload.private_key, msg_to_sign) if self.own_certificate_is_sent_to_peer: cert_to_send = "" else: cert_to_send = self.multidownload.certificate.writeBytes().tostring() self.own_certificate_is_sent_to_peer = True self.connector.upload.own_certificate_is_sent_to_peer = True self.connector.send_mp_request(index, begin, length,sig,cert_to_send) else: self.connector.send_request(index, begin, length) def _request_more(self, indices = []): if self.choked: self._request_when_choked() return #log( "_request_more.active_requests=%s" % self.active_requests ) b = self._backlog() if len(self.active_requests) >= b: return if self.multidownload.rm.endgame: self.fix_download_endgame() return self.suggested_pieces = [i for i in self.suggested_pieces if not self.multidownload.storage.do_I_have(i)] lost_interests = [] while len(self.active_requests) < b: if not indices: interest = self.multidownload.picker.next(self.have, self.multidownload.rm.active_requests, self.multidownload.rm.fully_active, self.suggested_pieces) else: interest = None for i in indices: if self._want(i): interest = i break if interest is None: break if not self.interested: self.interested = True self.connector.send_interested() # an example interest created by from_behind is preferable if self.example_interest is None: self.example_interest = interest # request as many chunks of interesting piece as fit in backlog. while len(self.active_requests) < b: begin, length = self.multidownload.rm.new_request(interest, self.prefer_full) self.send_request(interest, begin, length) if not self.multidownload.rm.want_requests(interest): lost_interests.append(interest) break if not self.active_requests and self.interested: self.interested = False self.connector.send_not_interested() self._check_lost_interests(lost_interests) self.multidownload.check_enter_endgame() def _check_lost_interests(self, lost_interests): """ Notify other downloads that these pieces are no longer interesting. @param lost_interests: list of pieces that have been fully requested. """ if not lost_interests: return for d in self.multidownload.downloads: if d.active_requests or not d.interested: continue if (d.example_interest is not None and self.multidownload.rm.want_requests(d.example_interest)): continue # any() does not exist until python 2.5 #if not any([d.have[lost] for lost in lost_interests]): # continue for lost in lost_interests: if d.have[lost]: break else: continue interest = self.multidownload.picker.from_behind(d.have, self.multidownload.rm.fully_active) if interest is None: d.interested = False d.connector.send_not_interested() else: d.example_interest = interest def _request_when_choked(self): self.allowed_fast_pieces = [i for i in self.allowed_fast_pieces if not self.multidownload.storage.do_I_have(i)] if not self.allowed_fast_pieces: return fast = list(self.allowed_fast_pieces) b = self._backlog() lost_interests = [] while len(self.active_requests) < b: while fast: piece = fast.pop() if self._want(piece): break else: break # no unrequested pieces among allowed fast. # request chunks until no more chunks or no more room in backlog. while len(self.active_requests) < b: begin, length = self.multidownload.rm.new_request(piece, self.prefer_full) self.send_request(piece, begin, length) if not self.multidownload.rm.want_requests(piece): lost_interests.append(piece) break self._check_lost_interests(lost_interests) self.multidownload.check_enter_endgame() def fix_download_endgame(self): want = [] for a in self.multidownload.all_requests: if not self.have[a[0]]: continue if a in self.active_requests: continue want.append(a) if self.interested and not self.active_requests and not want: self.interested = False self.connector.send_not_interested() return if not self.interested and want: self.interested = True self.connector.send_interested() if self.choked: return random.shuffle(want) for req in want[:self._backlog() - len(self.active_requests)]: self.send_request(*req) def got_have(self, index): if self.have[index]: return if index == self.multidownload.numpieces-1: self.peermeasure.update_rate(self.multidownload.storage.total_length- (self.multidownload.numpieces-1)*self.multidownload.storage.piece_size) else: self.peermeasure.update_rate(self.multidownload.storage.piece_size) self.have[index] = True self.multidownload.got_have(index) if (self.multidownload.storage.get_amount_left() == 0 and self.have.numfalse == 0): self.connector.close() return if self.multidownload.rm.endgame: self.fix_download_endgame() elif self.multidownload.rm.want_requests(index): self._request_more([index]) # call _request_more whether choked. if self.choked and not self.interested: self.interested = True self.connector.send_interested() def got_have_bitfield(self, have): if have.numfalse == 0: self._got_have_all(have) return self.have = have # arg, slow count = 0 target = len(self.have) - self.have.numfalse for i in xrange(len(self.have)): if count == target: break if self.have[i]: self.multidownload.got_have(i) count += 1 if self.multidownload.rm.endgame: for piece, begin, length in self.multidownload.all_requests: if self.have[piece]: self.interested = True self.connector.send_interested() return for piece in self.multidownload.rm.iter_want(): if self.have[piece]: self.interested = True self.connector.send_interested() return def _got_have_all(self, have=None): if self.multidownload.storage.get_amount_left() == 0: self.connector.close() return if have is None: # bleh n = self.multidownload.numpieces rlen, extra = divmod(n, 8) if extra: extra = chr((0xFF << (8 - extra)) & 0xFF) else: extra = '' s = (chr(0xFF) * rlen) + extra have = Bitfield(n, s) self.have = have self.multidownload.got_have_all() if self.multidownload.rm.endgame: for piece, begin, length in self.multidownload.all_requests: self.interested = True self.connector.send_interested() return for i in self.multidownload.rm.iter_want(): self.interested = True self.connector.send_interested() return def get_rate(self): return self.measure.get_rate() def is_snubbed(self): return bttime() - self.last > self.multidownload.snub_time def got_have_none(self): pass # currently no action is taken when have_none is received. # The picker already assumes the local peer has none of the # pieces until got_have is called. def got_have_all(self): assert self.connector.uses_fast_extension self._got_have_all() def got_suggest_piece(self, piece): assert self.connector.uses_fast_extension if not self.multidownload.storage.do_I_have(piece): self.suggested_pieces.append(piece) self._request_more() # try to request more. Just returns if choked. def got_allowed_fast(self,piece): """Upon receiving this message, the multidownload knows that it is allowed to download the specified piece even when choked.""" #log( "got_allowed_fast %d" % piece ) assert self.connector.uses_fast_extension if not self.multidownload.storage.do_I_have(piece): if piece not in self.allowed_fast_pieces: self.allowed_fast_pieces.append(piece) random.shuffle(self.allowed_fast_pieces) # O(n) but n is small. self._request_more() # will try to request. Handles cases like # whether neighbor has "allowed fast" piece. def got_reject_request(self, piece, begin, length): assert self.connector.uses_fast_extension req = (piece, begin, length) if req not in self.expecting_reject: if req not in self.active_requests: self.connector.protocol_violation("Reject received for " "piece not pending") self.connector.close() return self.active_requests.remove(req) else: self.expecting_reject.remove(req) if self.multidownload.rm.endgame: return self.multidownload.rm.request_lost(*req) if not self.choked: self._request_more() ds = [d for d in self.multidownload.downloads if not d.choked] random.shuffle(ds) for d in ds: d._request_more([piece]) for d in self.multidownload.downloads: if d.choked and not d.interested: if d._want(piece): d.interested = True d.connector.send_interested() break def got_key_reward_response(self,index, begin, length, result): """ Process a payment key response message. If the response indicates succesful reception of the key update status to "done". If the response indicates failure (invalid key or no key recvd) resend key. The keyreward message is also sent as a reply to a piece request if the downloader has too many outstanding unpaid pieces. @param index: index of the piece (piece number) the reward is for. A piece is a block of which the has is in the meta file. @param begin: offset of the subpiece is the piece the reward is for. This is currently always zero, it enables to account a single piece hashcheck failing to single peer. @param length: The length of the piece the reward is for. @param result: boolean indicating if the uploader validated the sent key @return: nothing """ print("received reward for piece %d %d %d" % (index,begin,length)) if not (index,begin,length) in self.payment_key_status: print("received reward for unsent piece %d %d %d" % (index,begin,length)) return (status,retries,old_good) = self.payment_key_status[(index,begin,length)] if status == "done": print "received key reward response for already done piece" #we already received this one return elif status == None: #something weird happened, received unwanted key print("received unwanted key_reward resonse idx=%06d begin=%06d len=%06d key=%s" % (index,begin,len)) return elif status == "waiting": (old_status,old_retries, good) = self.payment_key_status[(index,begin,length)] if result != 0: #success print "received positive key reward response for %d %d %d" % (index,begin,length) self.payment_key_status[(index,begin,length)] = ("done",retries, good) #elif retries > MAX_REWARD_RETRIES: #permanent failure # log("failure, too many retries") ## self.payment_key_status[ (index,begin,length)] = ("failed",retries,good) # self.multidownload.ban(self.ip) # return else: #temporary failure #TODO add check for validity of response self.send_key_reward(index,begin,length,good) retries+=1 self.payment_key_status[ (index,begin,length)] = ("waiting",retries,good) else: print("bad status for key_reward resonse idx=%06d begin=%06d len=%06d key=%s" % (index,begin,len)) def send_key_reward(self,index,begin,length,good): """ Send a key reward message. The message may or may not contain the keyreward. If the message is send as response to corrupt piece, no keyreward is included. @param index: index of the piece (piece number) the reward is for. A piece is a block of which the has is in the meta file. @param begin: offset of the subpiece is the piece the reward is for. This is currently always zero, it enables to account a single piece hashcheck failing to single peer. @param length: The length of the piece the reward is for. @param good: boolean indicating if the key reward has to be included. @return: nothing """ print "sending key reward for piece %d %d %d result= %d" % (index,begin,length,good) if (index,begin,length) not in self.payment_key_status: print "first time send reward for piece %d %d %d result= %d" % (index,begin,length,good) self.payment_key_status[ (index,begin,length)] = ("waiting", 0,good) (status,retries,good) = self.payment_key_status[ (index,begin,length)] if status == "done": print "received double key reward response %d %d %d" % (index,begin,length) #we already received the reward for this one return if good and (index,begin,length) in self.multidownload.key_rewards: #only send a reward if we already received the keylist from tracker. print("sending key reward ") key = self.multidownload.key_rewards[(index,begin,length)] print "encrypting key:"+key key = self.multidownload.pk_tools.encrypt_piece_tls(self.peer_certificate.publicKey, key) print "encrypted key hex %s" % key.encode('hex') else: key = ""; print "sending empty reward" self.connector.send_key_reward(index, begin, length,key)
class Download(object): """Implements BitTorrent protocol semantics for downloading over a single connection. See Upload for the protocol semantics in the upload direction. See Connector for the protocol syntax implementation.""" def __init__(self, multidownload, connector): self.multidownload = multidownload self.connector = connector self.choked = True self.interested = False self.prefer_full = False self.active_requests = set() self.expecting_reject = set() self.intro_size = self.multidownload.chunksize * 4 # just a guess self.measure = Measure(multidownload.config['max_rate_period']) self.peermeasure = Measure( max(multidownload.storage.piece_size / 10000, 20)) self.have = Bitfield(multidownload.numpieces) self.last = 0 self.example_interest = None self.guard = BadDataGuard(self) self.suggested_pieces = [] self.allowed_fast_pieces = [] self._useful_received_listeners = set() self._raw_received_listeners = set() self.add_useful_received_listener(self.measure.update_rate) self.total_bytes = 0 self.add_useful_received_listener(self.accumulate_total) def accumulate_total(self, x): self.total_bytes += x def add_useful_received_listener(self, listener): # "useful received bytes are used in measuring goodput. self._useful_received_listeners.add(listener) def remove_useful_received_listener(self, listener): self._useful_received_listeners.remove(listener) def fire_useful_received_listeners(self, bytes): for f in self._useful_received_listeners: f(bytes) def add_raw_received_listener(self, listener): self._raw_received_listeners.add(listener) def remove_raw_received_listener(self, listener): self._raw_received_listeners.remove(listener) def fire_raw_received_listeners(self, bytes): for f in self._raw_received_listeners: f(bytes) def _backlog(self): # Dave's suggestion: # backlog = 2 + thruput delay product in chunks. # Assume one-way download propagation delay is always less than 200ms. # backlog = 2 + int(0.2 * self.measure.get_rate() / # self.multidownload.chunksize # Then eliminate the cap of 50 and the 0.075*backlog. backlog = 2 + int(4 * self.measure.get_rate() / self.multidownload.chunksize) if self.total_bytes < self.intro_size: # optimistic backlog to get things started backlog = max(10, backlog) if backlog > 50: backlog = max(50, int(.075 * backlog)) if self.multidownload.rm.endgame: # OPTIONAL: zero pipelining during endgame #b = 1 pass return backlog def disconnected(self): self.multidownload.lost_peer(self) if self.have.numfalse == 0: self.multidownload.lost_have_all() else: # arg, slow count = 0 target = len(self.have) - self.have.numfalse for i in xrange(len(self.have)): if count == target: break if self.have[i]: self.multidownload.lost_have(i) count += 1 self._letgo() self.guard.download = None def _letgo(self): if not self.active_requests: return if self.multidownload.rm.endgame: self.active_requests.clear() return lost = [] for index, begin, length in self.active_requests: self.multidownload.rm.request_lost(index, begin, length) self.multidownload.active_requests_remove(index) if index not in lost: lost.append(index) self.active_requests.clear() ds = [d for d in self.multidownload.downloads if not d.choked] random.shuffle(ds) for d in ds: d._request_more(lost) for d in self.multidownload.downloads: if d.choked and not d.interested: for l in lost: if d._want(l): d.interested = True d.connector.send_interested() break def got_choke(self): if not self.choked: self.choked = True if not self.connector.uses_fast_extension: self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more() def got_piece(self, index, begin, piece): req = (index, begin, len(piece)) if req not in self.active_requests: self.multidownload.discarded_bytes += len(piece) if self.connector.uses_fast_extension: # getting a piece we sent a cancel for # is just like receiving a reject self.got_reject_request(*req) return self.active_requests.remove(req) # we still give the peer credit in endgame, since we did request # the piece (it was in active_requests) self.fire_useful_received_listeners(len(piece)) if self.multidownload.rm.endgame: if req not in self.multidownload.all_requests: self.multidownload.discarded_bytes += len(piece) return self.multidownload.all_requests.remove(req) for d in self.multidownload.downloads: if d.interested: if not d.choked and req in d.active_requests: d.connector.send_cancel(*req) d.active_requests.remove(req) if d.connector.uses_fast_extension: d.expecting_reject.add(req) d.fix_download_endgame() else: self._request_more() self.last = bttime() df = self.multidownload.storage.write(index, begin, piece, self.guard) df.addCallback(self._got_piece, index) df.addErrback(self.multidownload.errorfunc) def _got_piece(self, hashchecked, index): if hashchecked: self.multidownload.hashchecked(index) def _want(self, index): return (self.have[index] and self.multidownload.rm.want_requests(index)) def send_request(self, index, begin, length): piece_size = self.multidownload.storage.piece_size if begin + length > piece_size: raise ValueError("Issuing request that exceeds piece size: " "(%d + %d == %d) > %d" % (begin, length, begin + length, piece_size)) self.multidownload.active_requests_add(index) self.active_requests.add((index, begin, length)) self.connector.send_request(index, begin, length) def _request_more(self, indices = []): if self.choked: self._request_when_choked() return #log( "_request_more.active_requests=%s" % self.active_requests ) b = self._backlog() if len(self.active_requests) >= b: return if self.multidownload.rm.endgame: self.fix_download_endgame() return self.suggested_pieces = [i for i in self.suggested_pieces if not self.multidownload.storage.do_I_have(i)] lost_interests = [] while len(self.active_requests) < b: if not indices: interest = self.multidownload.picker.next(self.have, self.multidownload.rm.active_requests, self.multidownload.rm.fully_active, self.suggested_pieces) else: interest = None for i in indices: if self._want(i): interest = i break if interest is None: break if not self.interested: self.interested = True self.connector.send_interested() # an example interest created by from_behind is preferable if self.example_interest is None: self.example_interest = interest # request as many chunks of interesting piece as fit in backlog. while len(self.active_requests) < b: begin, length = self.multidownload.rm.new_request(interest, self.prefer_full) self.send_request(interest, begin, length) if not self.multidownload.rm.want_requests(interest): lost_interests.append(interest) break if not self.active_requests and self.interested: self.interested = False self.connector.send_not_interested() self._check_lost_interests(lost_interests) self.multidownload.check_enter_endgame() def _check_lost_interests(self, lost_interests): """ Notify other downloads that these pieces are no longer interesting. @param lost_interests: list of pieces that have been fully requested. """ if not lost_interests: return for d in self.multidownload.downloads: if d.active_requests or not d.interested: continue if (d.example_interest is not None and self.multidownload.rm.want_requests(d.example_interest)): continue # any() does not exist until python 2.5 #if not any([d.have[lost] for lost in lost_interests]): # continue for lost in lost_interests: if d.have[lost]: break else: continue interest = self.multidownload.picker.from_behind(d.have, self.multidownload.rm.fully_active) if interest is None: d.interested = False d.connector.send_not_interested() else: d.example_interest = interest def _request_when_choked(self): self.allowed_fast_pieces = [i for i in self.allowed_fast_pieces if not self.multidownload.storage.do_I_have(i)] if not self.allowed_fast_pieces: return fast = list(self.allowed_fast_pieces) b = self._backlog() lost_interests = [] while len(self.active_requests) < b: while fast: piece = fast.pop() if self._want(piece): break else: break # no unrequested pieces among allowed fast. # request chunks until no more chunks or no more room in backlog. while len(self.active_requests) < b: begin, length = self.multidownload.rm.new_request(piece, self.prefer_full) self.send_request(piece, begin, length) if not self.multidownload.rm.want_requests(piece): lost_interests.append(piece) break self._check_lost_interests(lost_interests) self.multidownload.check_enter_endgame() def fix_download_endgame(self): want = [] for a in self.multidownload.all_requests: if not self.have[a[0]]: continue if a in self.active_requests: continue want.append(a) if self.interested and not self.active_requests and not want: self.interested = False self.connector.send_not_interested() return if not self.interested and want: self.interested = True self.connector.send_interested() if self.choked: return random.shuffle(want) for req in want[:self._backlog() - len(self.active_requests)]: self.send_request(*req) def got_have(self, index): if self.have[index]: return if index == self.multidownload.numpieces-1: self.peermeasure.update_rate(self.multidownload.storage.total_length- (self.multidownload.numpieces-1)*self.multidownload.storage.piece_size) else: self.peermeasure.update_rate(self.multidownload.storage.piece_size) self.have[index] = True self.multidownload.got_have(index) if (self.multidownload.storage.get_amount_left() == 0 and self.have.numfalse == 0): self.connector.close() return if self.multidownload.rm.endgame: self.fix_download_endgame() elif self.multidownload.rm.want_requests(index): self._request_more([index]) # call _request_more whether choked. if self.choked and not self.interested: self.interested = True self.connector.send_interested() def got_have_bitfield(self, have): if have.numfalse == 0: self._got_have_all(have) return self.have = have # arg, slow count = 0 target = len(self.have) - self.have.numfalse for i in xrange(len(self.have)): if count == target: break if self.have[i]: self.multidownload.got_have(i) count += 1 if self.multidownload.rm.endgame: for piece, begin, length in self.multidownload.all_requests: if self.have[piece]: self.interested = True self.connector.send_interested() return for piece in self.multidownload.rm.iter_want(): if self.have[piece]: self.interested = True self.connector.send_interested() return def _got_have_all(self, have=None): if self.multidownload.storage.get_amount_left() == 0: self.connector.close() return if have is None: # bleh n = self.multidownload.numpieces rlen, extra = divmod(n, 8) if extra: extra = chr((0xFF << (8 - extra)) & 0xFF) else: extra = '' s = (chr(0xFF) * rlen) + extra have = Bitfield(n, s) self.have = have self.multidownload.got_have_all() if self.multidownload.rm.endgame: for piece, begin, length in self.multidownload.all_requests: self.interested = True self.connector.send_interested() return for i in self.multidownload.rm.iter_want(): self.interested = True self.connector.send_interested() return def get_rate(self): return self.measure.get_rate() def is_snubbed(self): return bttime() - self.last > self.multidownload.snub_time def got_have_none(self): pass # currently no action is taken when have_none is received. # The picker already assumes the local peer has none of the # pieces until got_have is called. def got_have_all(self): assert self.connector.uses_fast_extension self._got_have_all() def got_suggest_piece(self, piece): assert self.connector.uses_fast_extension if not self.multidownload.storage.do_I_have(piece): self.suggested_pieces.append(piece) self._request_more() # try to request more. Just returns if choked. def got_allowed_fast(self,piece): """Upon receiving this message, the multidownload knows that it is allowed to download the specified piece even when choked.""" #log( "got_allowed_fast %d" % piece ) assert self.connector.uses_fast_extension if not self.multidownload.storage.do_I_have(piece): if piece not in self.allowed_fast_pieces: self.allowed_fast_pieces.append(piece) random.shuffle(self.allowed_fast_pieces) # O(n) but n is small. self._request_more() # will try to request. Handles cases like # whether neighbor has "allowed fast" piece. def got_reject_request(self, piece, begin, length): assert self.connector.uses_fast_extension req = (piece, begin, length) if req not in self.expecting_reject: if req not in self.active_requests: self.connector.protocol_violation("Reject received for " "piece not pending") self.connector.close() return self.active_requests.remove(req) else: self.expecting_reject.remove(req) if self.multidownload.rm.endgame: return self.multidownload.rm.request_lost(*req) if not self.choked: self._request_more() ds = [d for d in self.multidownload.downloads if not d.choked] random.shuffle(ds) for d in ds: d._request_more([piece]) for d in self.multidownload.downloads: if d.choked and not d.interested: if d._want(piece): d.interested = True d.connector.send_interested() break
class Download(object): """Implements BitTorrent protocol semantics for downloading over a single connection. See Upload for the protocol semantics in the upload direction. See Connector for the protocol syntax implementation.""" def __init__(self, multidownload, connector): self.multidownload = multidownload self.connector = connector self.choked = True self.interested = False self.prefer_full = False self.active_requests = set() self.expecting_reject = set() self.intro_size = self.multidownload.chunksize * 4 # just a guess self.measure = Measure(multidownload.config['max_rate_period']) self.peermeasure = Measure( max(multidownload.storage.piece_size / 10000, 20)) self.have = Bitfield(multidownload.numpieces) self.last = 0 self.example_interest = None self.guard = BadDataGuard(self) self.suggested_pieces = [] self.allowed_fast_pieces = [] self._useful_received_listeners = set() self._raw_received_listeners = set() self.add_useful_received_listener(self.measure.update_rate) self.total_bytes = 0 self.add_useful_received_listener(self.accumulate_total) def accumulate_total(self, x): self.total_bytes += x def add_useful_received_listener(self, listener): # "useful received bytes are used in measuring goodput. self._useful_received_listeners.add(listener) def remove_useful_received_listener(self, listener): self._useful_received_listeners.remove(listener) def fire_useful_received_listeners(self, bytes): for f in self._useful_received_listeners: f(bytes) def add_raw_received_listener(self, listener): self._raw_received_listeners.add(listener) def remove_raw_received_listener(self, listener): self._raw_received_listeners.remove(listener) def fire_raw_received_listeners(self, bytes): for f in self._raw_received_listeners: f(bytes) def _backlog(self): # Dave's suggestion: # backlog = 2 + thruput delay product in chunks. # Assume one-way download propagation delay is always less than 200ms. # backlog = 2 + int(0.2 * self.measure.get_rate() / # self.multidownload.chunksize # Then eliminate the cap of 50 and the 0.075*backlog. backlog = 2 + int( 4 * self.measure.get_rate() / self.multidownload.chunksize) if self.total_bytes < self.intro_size: # optimistic backlog to get things started backlog = max(10, backlog) if backlog > 50: backlog = max(50, int(.075 * backlog)) if self.multidownload.rm.endgame: # OPTIONAL: zero pipelining during endgame #b = 1 pass return backlog def disconnected(self): self.multidownload.lost_peer(self) if self.have.numfalse == 0: self.multidownload.lost_have_all() else: # arg, slow count = 0 target = len(self.have) - self.have.numfalse for i in xrange(len(self.have)): if count == target: break if self.have[i]: self.multidownload.lost_have(i) count += 1 self._letgo() self.guard.download = None def _letgo(self): if not self.active_requests: return if self.multidownload.rm.endgame: self.active_requests.clear() return lost = [] for index, begin, length in self.active_requests: self.multidownload.rm.request_lost(index, begin, length) self.multidownload.active_requests_remove(index) if index not in lost: lost.append(index) self.active_requests.clear() ds = [d for d in self.multidownload.downloads if not d.choked] random.shuffle(ds) for d in ds: d._request_more(lost) for d in self.multidownload.downloads: if d.choked and not d.interested: for l in lost: if d._want(l): d.interested = True d.connector.send_interested() break def got_choke(self): if not self.choked: self.choked = True # ugly. instead, it should move all the requests to expecting_reject if not self.connector.uses_fast_extension: self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more() def got_piece(self, index, begin, piece): req = (index, begin, len(piece)) if req not in self.active_requests: self.multidownload.discarded_bytes += len(piece) if self.connector.uses_fast_extension: # getting a piece we sent a cancel for # is just like receiving a reject self.got_reject_request(*req) return self.active_requests.remove(req) # we still give the peer credit in endgame, since we did request # the piece (it was in active_requests) self.fire_useful_received_listeners(len(piece)) if self.multidownload.rm.endgame: if req not in self.multidownload.all_requests: self.multidownload.discarded_bytes += len(piece) return self.multidownload.all_requests.remove(req) for d in self.multidownload.downloads: if d.interested: if not d.choked and req in d.active_requests: d.connector.send_cancel(*req) d.active_requests.remove(req) if d.connector.uses_fast_extension: d.expecting_reject.add(req) d.fix_download_endgame() else: self._request_more() self.last = bttime() df = self.multidownload.storage.write(index, begin, piece, self.guard) df.addCallback(self._got_piece, index) df.addErrback(self.multidownload.errorfunc) def _got_piece(self, hashchecked, index): if hashchecked: self.multidownload.hashchecked(index) def _want(self, index): return (self.have[index] and self.multidownload.rm.want_requests(index)) def send_request(self, index, begin, length): piece_size = self.multidownload.storage.piece_size if begin + length > piece_size: raise ValueError("Issuing request that exceeds piece size: " "(%d + %d == %d) > %d" % (begin, length, begin + length, piece_size)) self.multidownload.active_requests_add(index) self.active_requests.add((index, begin, length)) self.connector.send_request(index, begin, length) def _request_more(self, indices=[]): if self.choked: self._request_when_choked() return #log( "_request_more.active_requests=%s" % self.active_requests ) b = self._backlog() if len(self.active_requests) >= b: return if self.multidownload.rm.endgame: self.fix_download_endgame() return self.suggested_pieces = [ i for i in self.suggested_pieces if not self.multidownload.storage.do_I_have(i) ] lost_interests = [] while len(self.active_requests) < b: if not indices: interest = self.multidownload.picker.next( self.have, self.multidownload.rm.active_requests, self.multidownload.rm.fully_active, self.suggested_pieces) else: interest = None for i in indices: if self._want(i): interest = i break if interest is None: break if not self.interested: self.interested = True self.connector.send_interested() # an example interest created by from_behind is preferable if self.example_interest is None: self.example_interest = interest # request as many chunks of interesting piece as fit in backlog. while len(self.active_requests) < b: begin, length = self.multidownload.rm.new_request( interest, self.prefer_full) self.send_request(interest, begin, length) if not self.multidownload.rm.want_requests(interest): lost_interests.append(interest) break if not self.active_requests and self.interested: self.interested = False self.connector.send_not_interested() self._check_lost_interests(lost_interests) self.multidownload.check_enter_endgame() def _check_lost_interests(self, lost_interests): """ Notify other downloads that these pieces are no longer interesting. @param lost_interests: list of pieces that have been fully requested. """ if not lost_interests: return for d in self.multidownload.downloads: if d.active_requests or not d.interested: continue if (d.example_interest is not None and self.multidownload.rm.want_requests(d.example_interest)): continue # any() does not exist until python 2.5 #if not any([d.have[lost] for lost in lost_interests]): # continue for lost in lost_interests: if d.have[lost]: break else: continue interest = self.multidownload.picker.from_behind( d.have, self.multidownload.rm.fully_active) if interest is None: d.interested = False d.connector.send_not_interested() else: d.example_interest = interest def _request_when_choked(self): self.allowed_fast_pieces = [ i for i in self.allowed_fast_pieces if not self.multidownload.storage.do_I_have(i) ] if not self.allowed_fast_pieces: return fast = list(self.allowed_fast_pieces) b = self._backlog() lost_interests = [] while len(self.active_requests) < b: while fast: piece = fast.pop() if self._want(piece): break else: break # no unrequested pieces among allowed fast. # request chunks until no more chunks or no more room in backlog. while len(self.active_requests) < b: begin, length = self.multidownload.rm.new_request( piece, self.prefer_full) self.send_request(piece, begin, length) if not self.multidownload.rm.want_requests(piece): lost_interests.append(piece) break self._check_lost_interests(lost_interests) self.multidownload.check_enter_endgame() def fix_download_endgame(self): want = [] for a in self.multidownload.all_requests: if not self.have[a[0]]: continue if a in self.active_requests: continue want.append(a) if self.interested and not self.active_requests and not want: self.interested = False self.connector.send_not_interested() return if not self.interested and want: self.interested = True self.connector.send_interested() if self.choked: return random.shuffle(want) for req in want[:self._backlog() - len(self.active_requests)]: self.send_request(*req) def got_have(self, index): if self.have[index]: return if index == self.multidownload.numpieces - 1: self.peermeasure.update_rate( self.multidownload.storage.total_length - (self.multidownload.numpieces - 1) * self.multidownload.storage.piece_size) else: self.peermeasure.update_rate(self.multidownload.storage.piece_size) self.have[index] = True self.multidownload.got_have(index) if (self.multidownload.storage.get_amount_left() == 0 and self.have.numfalse == 0): self.connector.close() return if self.multidownload.rm.endgame: self.fix_download_endgame() elif self.multidownload.rm.want_requests(index): self._request_more([index]) # call _request_more whether choked. if self.choked and not self.interested: self.interested = True self.connector.send_interested() def got_have_bitfield(self, have): if have.numfalse == 0: self._got_have_all(have) return self.have = have # arg, slow count = 0 target = len(self.have) - self.have.numfalse for i in xrange(len(self.have)): if count == target: break if self.have[i]: self.multidownload.got_have(i) count += 1 if self.multidownload.rm.endgame: for piece, begin, length in self.multidownload.all_requests: if self.have[piece]: self.interested = True self.connector.send_interested() return for piece in self.multidownload.rm.iter_want(): if self.have[piece]: self.interested = True self.connector.send_interested() return def _got_have_all(self, have=None): if self.multidownload.storage.get_amount_left() == 0: self.connector.close() return if have is None: # bleh n = self.multidownload.numpieces rlen, extra = divmod(n, 8) if extra: extra = chr((0xFF << (8 - extra)) & 0xFF) else: extra = '' s = (chr(0xFF) * rlen) + extra have = Bitfield(n, s) self.have = have self.multidownload.got_have_all() if self.multidownload.rm.endgame: for piece, begin, length in self.multidownload.all_requests: self.interested = True self.connector.send_interested() return for i in self.multidownload.rm.iter_want(): self.interested = True self.connector.send_interested() return def get_rate(self): return self.measure.get_rate() def is_snubbed(self): return bttime() - self.last > self.multidownload.snub_time def got_have_none(self): pass # currently no action is taken when have_none is received. # The picker already assumes the local peer has none of the # pieces until got_have is called. def got_have_all(self): assert self.connector.uses_fast_extension self._got_have_all() def got_suggest_piece(self, piece): assert self.connector.uses_fast_extension if not self.multidownload.storage.do_I_have(piece): self.suggested_pieces.append(piece) self._request_more() # try to request more. Just returns if choked. def got_allowed_fast(self, piece): """Upon receiving this message, the multidownload knows that it is allowed to download the specified piece even when choked.""" #log( "got_allowed_fast %d" % piece ) assert self.connector.uses_fast_extension if not self.multidownload.storage.do_I_have(piece): if piece not in self.allowed_fast_pieces: self.allowed_fast_pieces.append(piece) random.shuffle( self.allowed_fast_pieces) # O(n) but n is small. self._request_more() # will try to request. Handles cases like # whether neighbor has "allowed fast" piece. def got_reject_request(self, piece, begin, length): assert self.connector.uses_fast_extension req = (piece, begin, length) if req not in self.expecting_reject: if req not in self.active_requests: self.connector.protocol_violation("Reject received for " "piece not pending") self.connector.close() return self.active_requests.remove(req) else: self.expecting_reject.remove(req) if self.multidownload.rm.endgame: return self.multidownload.rm.request_lost(*req) if not self.choked: self._request_more() ds = [d for d in self.multidownload.downloads if not d.choked] random.shuffle(ds) for d in ds: d._request_more([piece]) for d in self.multidownload.downloads: if d.choked and not d.interested: if d._want(piece): d.interested = True d.connector.send_interested() break
class Upload(object): def __init__(self, connection, ratelimiter, totalup, totalup2, choker, storage, max_slice_length, max_rate_period): self.connection = connection self.ratelimiter = ratelimiter self.totalup = totalup self.totalup2 = totalup2 self.choker = choker self.storage = storage self.max_slice_length = max_slice_length self.max_rate_period = max_rate_period self.choked = True self.unchoke_time = None self.interested = False self.buffer = [] self.measure = Measure(max_rate_period) if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) def got_not_interested(self): if self.interested: self.interested = False del self.buffer[:] self.choker.not_interested(self.connection) def got_interested(self): if not self.interested: self.interested = True self.choker.interested(self.connection) def get_upload_chunk(self): if not self.buffer: return None index, begin, length = self.buffer.pop(0) piece = self.storage.get_piece(index, begin, length) if piece is None: self.connection.close() return None self.measure.update_rate(len(piece)) self.totalup.update_rate(len(piece)) self.totalup2.update_rate(len(piece)) return (index, begin, piece) def got_request(self, index, begin, length): if not self.interested or length > self.max_slice_length: self.connection.close() return if not self.connection.choke_sent: self.buffer.append((index, begin, length)) if self.connection.next_upload is None and \ self.connection.connection.is_flushed(): self.ratelimiter.queue(self.connection) def got_cancel(self, index, begin, length): try: self.buffer.remove((index, begin, length)) except ValueError: pass def choke(self): if not self.choked: self.choked = True self.connection.send_choke() def sent_choke(self): assert self.choked del self.buffer[:] def unchoke(self, time): if self.choked: self.choked = False self.unchoke_time = time self.connection.send_unchoke() def has_queries(self): return len(self.buffer) > 0 def get_rate(self): return self.measure.get_rate()
class Download(object): """Implements BitTorrent protocol semantics for downloading over a single connection. See Upload for the protocol semantics in the upload direction. See Connector.Connection for the protocol syntax implementation.""" def __init__(self, multidownload, connection): self.multidownload = multidownload self.connection = connection self.choked = True self.interested = False self.prefer_full = False self.active_requests = set() self.measure = Measure(multidownload.config['max_rate_period']) self.peermeasure = Measure( max(multidownload.storage.piece_size / 10000, 20)) self.have = Bitfield(multidownload.numpieces) self.last = 0 self.example_interest = None self.guard = BadDataGuard(self) self.suggested_pieces = [] self.allowed_fast_pieces = [] def _backlog(self): backlog = 2 + int(4 * self.measure.get_rate() / self.multidownload.chunksize) if backlog > 50: backlog = max(50, int(.075 * backlog)) return backlog def disconnected(self): self.multidownload.lost_peer(self) if self.have.numfalse == 0: self.multidownload.lost_have_all() else: # arg, slow count = 0 target = len(self.have) - self.have.numfalse for i in xrange(len(self.have)): if count == target: break if self.have[i]: self.multidownload.lost_have(i) count += 1 self._letgo() self.guard.download = None def _letgo(self): if not self.active_requests: return if self.multidownload.storage.endgame: self.active_requests.clear() return lost = [] for index, begin, length in self.active_requests: self.multidownload.storage.request_lost(index, begin, length) self.multidownload.active_requests.remove(index) if index not in lost: lost.append(index) self.active_requests.clear() ds = [d for d in self.multidownload.downloads if not d.choked] random.shuffle(ds) for d in ds: d._request_more(lost) for d in self.multidownload.downloads: if d.choked and not d.interested: for l in lost: if d.have[l] and \ self.multidownload.storage.want_requests(l): d.interested = True d.connection.send_interested() break def got_choke(self): if not self.choked: self.choked = True if not self.connection.uses_fast_extension: self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more() def got_piece(self, index, begin, piece): req = (index, begin, len(piece)) if req not in self.active_requests: self.multidownload.discarded_bytes += len(piece) if self.connection.uses_fast_extension: self.connection.close() return self.active_requests.remove(req) if self.multidownload.storage.endgame: if req not in self.multidownload.all_requests: self.multidownload.discarded_bytes += len(piece) return self.multidownload.all_requests.remove(req) for d in self.multidownload.downloads: if d.interested: if not d.choked: if req in d.active_requests: d.connection.send_cancel(*req) if not self.connection.uses_fast_extension: d.active_requests.remove(req) d.fix_download_endgame() else: self._request_more() self.last = bttime() self.update_rate(len(piece)) df = self.multidownload.storage.write(index, begin, piece, self.guard) df.addCallback(self._got_piece, index) def _got_piece(self, hashchecked, index): if hashchecked: self.multidownload.hashchecked(index) def _want(self, index): return self.have[index] and \ self.multidownload.storage.want_requests(index) def _request_more(self, indices = []): if self.choked: self._request_when_choked() return #log( "_request_more.active_requests=%s" % self.active_requests ) b = self._backlog() if len(self.active_requests) >= b: return if self.multidownload.storage.endgame: self.fix_download_endgame() return self.suggested_pieces = [i for i in self.suggested_pieces if not self.multidownload.storage.do_I_have(i)] lost_interests = [] while len(self.active_requests) < b: if not indices: interest = self.multidownload.picker.next(self.have, self.multidownload.active_requests, self.multidownload.storage.full_pieces, self.suggested_pieces) else: interest = None for i in indices: if self.have[i] and \ self.multidownload.storage.want_requests(i): interest = i break if interest is None: break if not self.interested: self.interested = True self.connection.send_interested() # an example interest created by from_behind is preferable if self.example_interest is None: self.example_interest = interest # request as many chunks of interesting piece as fit in backlog. while len(self.active_requests) < b: begin, length = self.multidownload.storage.new_request(interest, self.prefer_full) self.multidownload.active_requests_add(interest) self.active_requests.add((interest, begin, length)) self.connection.send_request(interest, begin, length) if not self.multidownload.storage.want_requests(interest): lost_interests.append(interest) break if not self.active_requests and self.interested: self.interested = False self.connection.send_not_interested() self._check_lost_interests(lost_interests) if self.multidownload.storage.endgame: self.multidownload.all_requests = set() for d in self.multidownload.downloads: self.multidownload.all_requests.update(d.active_requests) for d in self.multidownload.downloads: d.fix_download_endgame() def _check_lost_interests(self, lost_interests): """ Notify other downloads that these pieces are no longer interesting. @param lost_interests: list of pieces that have been fully requested. """ if not lost_interests: return for d in self.multidownload.downloads: if d.active_requests or not d.interested: continue if d.example_interest is not None and not \ self.multidownload.storage.have[d.example_interest] and \ self.multidownload.storage.want_requests(d.example_interest): continue # any() does not exist until python 2.5 #if not any([d.have[lost] for lost in lost_interests]): # continue for lost in lost_interests: if d.have[lost]: break else: continue interest = self.multidownload.picker.from_behind(self.have, self.multidownload.storage.full_pieces) if interest is None: d.interested = False d.connection.send_not_interested() else: d.example_interest = interest def _request_when_choked(self): self.allowed_fast_pieces = [i for i in self.allowed_fast_pieces if not self.multidownload.storage.do_I_have(i)] if not self.allowed_fast_pieces: return fast = list(self.allowed_fast_pieces) b = self._backlog() lost_interests = [] while len(self.active_requests) < b: while fast: piece = fast.pop() if self.have[piece] \ and self.multidownload.storage.want_requests(piece): break else: break # no unrequested pieces among allowed fast. # request chunks until no more chunks or no more room in backlog. while len(self.active_requests) < b: begin, length = self.multidownload.storage.new_request(piece, self.prefer_full) self.multidownload.active_requests_add(piece) self.active_requests.add((piece, begin, length)) self.connection.send_request(piece, begin, length) if not self.multidownload.storage.want_requests(piece): lost_interests.append(piece) break self._check_lost_interests(lost_interests) if self.multidownload.storage.endgame: self.multidownload.all_requests = set() for d in self.multidownload.downloads: self.multidownload.all_requests.update(d.active_requests) for d in self.multidownload.downloads: d.fix_download_endgame() def fix_download_endgame(self): want = [] for a in self.multidownload.all_requests: if not self.have[a[0]]: continue if a in self.active_requests: continue want.append(a) if self.interested and not self.active_requests and not want: self.interested = False self.connection.send_not_interested() return if not self.interested and want: self.interested = True self.connection.send_interested() if self.choked: return random.shuffle(want) for req in want[:self._backlog() - len(self.active_requests)]: self.active_requests.add(req) self.connection.send_request(*req) def got_have(self, index): if self.have[index]: return if index == self.multidownload.numpieces-1: self.peermeasure.update_rate(self.multidownload.storage.total_length- (self.multidownload.numpieces-1)*self.multidownload.storage.piece_size) else: self.peermeasure.update_rate(self.multidownload.storage.piece_size) self.have[index] = True self.multidownload.got_have(index) if self.multidownload.storage.get_amount_left() == 0 and self.have.numfalse == 0: self.connection.close() return if self.multidownload.storage.endgame: self.fix_download_endgame() elif self.multidownload.storage.want_requests(index): self._request_more([index]) # call _request_more whether choked. if self.choked: if not self.interested: self.interested = True self.connection.send_interested() def got_have_bitfield(self, have): if have.numfalse == 0: self._got_have_all(have) return self.have = have # arg, slow count = 0 target = len(self.have) - self.have.numfalse for i in xrange(len(self.have)): if count == target: break if self.have[i]: self.multidownload.got_have(i) count += 1 if self.multidownload.storage.endgame: for piece, begin, length in self.multidownload.all_requests: if self.have[piece]: self.interested = True self.connection.send_interested() return for piece in self.multidownload.storage.iter_want(): if self.have[piece]: self.interested = True self.connection.send_interested() return def _got_have_all(self, have=None): if self.multidownload.storage.get_amount_left() == 0: self.connection.close() return if have is None: # bleh n = self.multidownload.numpieces rlen, extra = divmod(n, 8) if extra: extra = chr((0xFF << (8 - extra)) & 0xFF) else: extra = '' s = (chr(0xFF) * rlen) + extra have = Bitfield(n, s) self.have = have self.multidownload.got_have_all() if self.multidownload.storage.endgame: for piece, begin, length in self.multidownload.all_requests: self.interested = True self.connection.send_interested() return for i in self.multidownload.storage.iter_want(): self.interested = True self.connection.send_interested() return def update_rate(self, amount): self.measure.update_rate(amount) self.multidownload.update_rate(amount) def get_rate(self): return self.measure.get_rate() def is_snubbed(self): return bttime() - self.last > self.multidownload.snub_time def got_have_none(self): pass # currently no action is taken when have_none is received. # The picker already assumes the local peer has none of the # pieces until got_have is called. def got_have_all(self): assert self.connection.uses_fast_extension self._got_have_all() def got_suggest_piece(self, piece): assert self.connection.uses_fast_extension if not self.multidownload.storage.do_I_have(piece): self.suggested_pieces.append(piece) self._request_more() # try to request more. Just returns if choked. def got_allowed_fast(self,piece): """Upon receiving this message, the multidownload knows that it is allowed to download the specified piece even when choked.""" #log( "got_allowed_fast %d" % piece ) assert self.connection.uses_fast_extension if not self.multidownload.storage.do_I_have(piece): if piece not in self.allowed_fast_pieces: self.allowed_fast_pieces.append(piece) random.shuffle(self.allowed_fast_pieces) # O(n) but n is small. self._request_more() # will try to request. Handles cases like # whether neighbor has "allowed fast" piece. def got_reject_request(self, piece, begin, length): if not self.connection.uses_fast_extension: self.connection.close() return req = (piece, begin, length) if req not in self.active_requests: self.connection.close() return self.active_requests.remove(req) if self.multidownload.storage.endgame: return self.multidownload.storage.request_lost(*req) if not self.choked: self._request_more() ds = [d for d in self.multidownload.downloads if not d.choked] random.shuffle(ds) for d in ds: d._request_more([piece]) for d in self.multidownload.downloads: if d.choked and not d.interested: if d.have[piece] and \ self.multidownload.storage.want_requests(piece): d.interested = True d.connection.send_interested() break
class SingleDownload(object): def __init__(self, downloader, connection): self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.config['max_rate_period']) self.peermeasure = Measure(max(downloader.storage.piece_size / 10000, 20)) self.have = Bitfield(downloader.numpieces) self.last = 0 self.example_interest = None self.backlog = 2 self.guard = BadDataGuard(self) def _backlog(self): backlog = 2 + int(4 * self.measure.get_rate() / self.downloader.chunksize) if backlog > 50: backlog = max(50, int(.075 * backlog)) self.backlog = backlog return backlog def disconnected(self): self.downloader.lost_peer(self) for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.lost_have(i) self._letgo() self.guard.download = None def _letgo(self): if not self.active_requests: return if self.downloader.storage.endgame: self.active_requests = [] return lost = [] for index, begin, length in self.active_requests: self.downloader.storage.request_lost(index, begin, length) if index not in lost: lost.append(index) self.active_requests = [] ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more(lost) for d in self.downloader.downloads: if d.choked and not d.interested: for l in lost: if d.have[l] and self.downloader.storage.do_I_have_requests(l): d.interested = True d.connection.send_interested() break def got_choke(self): if not self.choked: self.choked = True self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more() def got_piece(self, index, begin, piece): try: self.active_requests.remove((index, begin, len(piece))) except ValueError: self.downloader.discarded_bytes += len(piece) return False if self.downloader.storage.endgame: self.downloader.all_requests.remove((index, begin, len(piece))) self.last = time() self.measure.update_rate(len(piece)) self.downloader.measurefunc(len(piece)) self.downloader.downmeasure.update_rate(len(piece)) if not self.downloader.storage.piece_came_in(index, begin, piece, self.guard): if self.downloader.storage.endgame: while self.downloader.storage.do_I_have_requests(index): nb, nl = self.downloader.storage.new_request(index) self.downloader.all_requests.append((index, nb, nl)) for d in self.downloader.downloads: d.fix_download_endgame() return False ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more([index]) return False if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.storage.endgame: for d in self.downloader.downloads: if d is not self and d.interested: if d.choked: d.fix_download_endgame() else: try: d.active_requests.remove((index, begin, len(piece))) except ValueError: continue d.connection.send_cancel(index, begin, len(piece)) d.fix_download_endgame() self._request_more() if self.downloader.picker.am_I_complete(): for d in [i for i in self.downloader.downloads if i.have.numfalse == 0]: d.connection.close() return self.downloader.storage.do_I_have(index) def _want(self, index): return self.have[index] and self.downloader.storage.do_I_have_requests(index) def _request_more(self, indices = None): assert not self.choked if len(self.active_requests) >= self._backlog(): return if self.downloader.storage.endgame: self.fix_download_endgame() return lost_interests = [] while len(self.active_requests) < self.backlog: if indices is None: interest = self.downloader.picker.next(self._want, self.have.numfalse == 0) else: interest = None for i in indices: if self.have[i] and self.downloader.storage.do_I_have_requests(i): interest = i break if interest is None: break if not self.interested: self.interested = True self.connection.send_interested() self.example_interest = interest self.downloader.picker.requested(interest, self.have.numfalse == 0) while len(self.active_requests) < (self.backlog-2) * 5 + 2: begin, length = self.downloader.storage.new_request(interest) self.active_requests.append((interest, begin, length)) self.connection.send_request(interest, begin, length) if not self.downloader.storage.do_I_have_requests(interest): lost_interests.append(interest) break if not self.active_requests and self.interested: self.interested = False self.connection.send_not_interested() if lost_interests: for d in self.downloader.downloads: if d.active_requests or not d.interested: continue if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest): continue for lost in lost_interests: if d.have[lost]: break else: continue interest = self.downloader.picker.next(d._want, d.have.numfalse == 0) if interest is None: d.interested = False d.connection.send_not_interested() else: d.example_interest = interest if self.downloader.storage.endgame: self.downloader.all_requests = [] for d in self.downloader.downloads: self.downloader.all_requests.extend(d.active_requests) for d in self.downloader.downloads: d.fix_download_endgame() def fix_download_endgame(self): want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests] if self.interested and not self.active_requests and not want: self.interested = False self.connection.send_not_interested() return if not self.interested and want: self.interested = True self.connection.send_interested() if self.choked or len(self.active_requests) >= self._backlog(): return shuffle(want) del want[self.backlog - len(self.active_requests):] self.active_requests.extend(want) for piece, begin, length in want: self.connection.send_request(piece, begin, length) def got_have(self, index): if self.have[index]: return if index == self.downloader.numpieces-1: self.peermeasure.update_rate(self.downloader.storage.total_length- (self.downloader.numpieces-1)*self.downloader.storage.piece_size) else: self.peermeasure.update_rate(self.downloader.storage.piece_size) self.have[index] = True self.downloader.picker.got_have(index) if self.downloader.picker.am_I_complete() and self.have.numfalse == 0: self.connection.close() return if self.downloader.storage.endgame: self.fix_download_endgame() elif self.downloader.storage.do_I_have_requests(index): if not self.choked: self._request_more([index]) else: if not self.interested: self.interested = True self.connection.send_interested() def got_have_bitfield(self, have): self.have = have for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.got_have(i) if self.downloader.picker.am_I_complete() and self.have.numfalse == 0: self.connection.close() return if self.downloader.storage.endgame: for piece, begin, length in self.downloader.all_requests: if self.have[piece]: self.interested = True self.connection.send_interested() return for i in xrange(len(self.have)): if self.have[i] and self.downloader.storage.do_I_have_requests(i): self.interested = True self.connection.send_interested() return def get_rate(self): return self.measure.get_rate() def is_snubbed(self): return time() - self.last > self.downloader.snub_time
class DownloadPeer: def __init__(self, downloader, connection): self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.max_rate_period) self.peermeasure = Measure(downloader.max_rate_period) self.have = Bitfield(downloader.numpieces) self.last = -1000 self.last2 = -1000 self.example_interest = None # self.backlog = 2 self.backlog = 8 self.ip = connection.get_ip() self.guard = BadDataGuard(self) def _backlog(self, just_unchoked): # self.backlog = min( # 2+int(4*self.measure.get_rate()/self.downloader.chunksize), # (2*just_unchoked)+self.downloader.queue_limit() ) # if self.backlog > 50: # self.backlog = max(50, self.backlog * 0.075) # return self.backlog self.backlog = 4 + int( 8 * self.measure.get_rate() / self.downloader.chunksize) return self.backlog def disconnected(self): self.downloader.lost_peer(self) if self.have.complete(): self.downloader.picker.lost_seed() else: for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.lost_have(i) if self.have.complete() and self.downloader.storage.is_endgame(): self.downloader.add_disconnected_seed( self.connection.get_readable_id()) self._letgo() self.guard.download = None def _letgo(self): if self.downloader.queued_out.has_key(self): del self.downloader.queued_out[self] if not self.active_requests: return if self.downloader.endgamemode: self.active_requests = [] return lost = {} for index, begin, length in self.active_requests: self.downloader.storage.request_lost(index, begin, length) lost[index] = 1 lost = lost.keys() self.active_requests = [] if self.downloader.paused: return ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more() for d in self.downloader.downloads: if d.choked and not d.interested: for l in lost: if d.have[l] and self.downloader.storage.do_I_have_requests( l): d.send_interested() break def got_choke(self): if not self.choked: self.choked = True self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more(new_unchoke=True) self.last2 = clock() def is_choked(self): return self.choked def is_interested(self): return self.interested def send_interested(self): if not self.interested: self.interested = True self.connection.send_interested() if not self.choked: self.last2 = clock() def send_not_interested(self): if self.interested: self.interested = False self.connection.send_not_interested() def got_piece(self, index, begin, piece): length = len(piece) try: self.active_requests.remove((index, begin, length)) except ValueError: self.downloader.discarded += length return False if self.downloader.endgamemode: self.downloader.all_requests.remove((index, begin, length)) self.last = clock() self.last2 = clock() self.measure.update_rate(length) self.downloader.measurefunc(length) if not self.downloader.storage.piece_came_in(index, begin, piece, self.guard): self.downloader.piece_flunked(index) return False if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.endgamemode: for d in self.downloader.downloads: if d is not self: if d.interested: if d.choked: assert not d.active_requests d.fix_download_endgame() else: try: d.active_requests.remove( (index, begin, length)) except ValueError: continue d.connection.send_cancel(index, begin, length) d.fix_download_endgame() else: assert not d.active_requests self._request_more() self.downloader.check_complete(index) return self.downloader.storage.do_I_have(index) def _request_more(self, new_unchoke=False): assert not self.choked if self.downloader.endgamemode: self.fix_download_endgame(new_unchoke) return if self.downloader.paused: return if len(self.active_requests) >= self._backlog(new_unchoke): if not (self.active_requests or self.backlog): self.downloader.queued_out[self] = 1 return lost_interests = [] while len(self.active_requests) < self.backlog: interest = self.downloader.picker.next( self.have, self.downloader.storage.do_I_have_requests, self.downloader.too_many_partials()) if interest is None: break self.example_interest = interest self.send_interested() loop = True while len(self.active_requests) < self.backlog and loop: begin, length = self.downloader.storage.new_request(interest) self.downloader.picker.requested(interest) self.active_requests.append((interest, begin, length)) self.connection.send_request(interest, begin, length) self.downloader.chunk_requested(length) if not self.downloader.storage.do_I_have_requests(interest): loop = False lost_interests.append(interest) if not self.active_requests: self.send_not_interested() if lost_interests: for d in self.downloader.downloads: if d.active_requests or not d.interested: continue if d.example_interest is not None and self.downloader.storage.do_I_have_requests( d.example_interest): continue for lost in lost_interests: if d.have[lost]: break else: continue interest = self.downloader.picker.next( d.have, self.downloader.storage.do_I_have_requests, self.downloader.too_many_partials()) if interest is None: d.send_not_interested() else: d.example_interest = interest if self.downloader.storage.is_endgame(): self.downloader.start_endgame() def fix_download_endgame(self, new_unchoke=False): if self.downloader.paused: return if len(self.active_requests) >= self._backlog(new_unchoke): if not (self.active_requests or self.backlog) and not self.choked: self.downloader.queued_out[self] = 1 return want = [ a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests ] if not (self.active_requests or want): self.send_not_interested() return if want: self.send_interested() if self.choked: return shuffle(want) del want[self.backlog - len(self.active_requests):] self.active_requests.extend(want) for piece, begin, length in want: self.connection.send_request(piece, begin, length) self.downloader.chunk_requested(length) def got_have(self, index): if index == self.downloader.numpieces - 1: self.downloader.totalmeasure.update_rate( self.downloader.storage.total_length - (self.downloader.numpieces - 1) * self.downloader.storage.piece_length) self.peermeasure.update_rate(self.downloader.storage.total_length - (self.downloader.numpieces - 1) * self.downloader.storage.piece_length) else: self.downloader.totalmeasure.update_rate( self.downloader.storage.piece_length) self.peermeasure.update_rate(self.downloader.storage.piece_length) if not self.have[index]: self.have[index] = True self.downloader.picker.got_have(index) if self.have.complete(): self.downloader.picker.became_seed() if self.downloader.storage.am_I_complete(): self.downloader.add_disconnected_seed( self.connection.get_readable_id()) self.connection.close() elif self.downloader.endgamemode: self.fix_download_endgame() elif (not self.downloader.paused and not self.downloader.picker.is_blocked(index) and self.downloader.storage.do_I_have_requests(index)): if not self.choked: self._request_more() else: self.send_interested() return self.have.complete() def _check_interests(self): if self.interested or self.downloader.paused: return for i in xrange(len(self.have)): if (self.have[i] and not self.downloader.picker.is_blocked(i) and (self.downloader.endgamemode or self.downloader.storage.do_I_have_requests(i))): self.send_interested() return def got_have_bitfield(self, have): if self.downloader.storage.am_I_complete() and have.complete(): if self.downloader.super_seeding: self.connection.send_bitfield( have.tostring()) # be nice, show you're a seed too self.connection.close() self.downloader.add_disconnected_seed( self.connection.get_readable_id()) return False self.have = have if have.complete(): self.downloader.picker.got_seed() else: temp = 4 for i in xrange(len(have)): if have[i]: self.downloader.picker.got_have(i) if self.downloader.endgamemode and not self.downloader.paused: for piece, begin, length in self.downloader.all_requests: if self.have[piece]: self.send_interested() break else: self._check_interests() return have.complete() def get_rate(self): return self.measure.get_rate() def get_peer_completion(self): if len(self.have) > 0: return float(len(self.have) - self.have.numfalse) / float( len(self.have)) else: return 1.0 def is_snubbed(self): if (self.interested and not self.choked and clock() - self.last2 > self.downloader.snub_time): for index, begin, length in self.active_requests: self.connection.send_cancel(index, begin, length) self.got_choke() # treat it just like a choke return clock() - self.last > self.downloader.snub_time
class SingleDownload(object): def __init__(self, downloader, connection, logcollector): self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.config['max_rate_period']) self.peermeasure = Measure(max(downloader.storage.piece_size / 10000, 20)) #intialize a bitfield of lenght 'numpieces' self.have = Bitfield(downloader.numpieces) self.last = 0 self.example_interest = None self.backlog = 2 self.guard = BadDataGuard(self) self.logcollector=logcollector def _backlog(self): backlog = 2 + int(4 * self.measure.get_rate() / self.downloader.chunksize) if backlog > 50: backlog = max(50, int(.075 * backlog)) self.backlog = backlog return backlog def disconnected(self): self.downloader.lost_peer(self) for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.lost_have(i) self._letgo() self.guard.download = None def _letgo(self): if not self.active_requests: return if self.downloader.storage.endgame: self.active_requests = [] return lost = [] for index, begin, length in self.active_requests: self.downloader.storage.request_lost(index, begin, length) if index not in lost: lost.append(index) self.active_requests = [] ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more(lost) for d in self.downloader.downloads: if d.choked and not d.interested: for l in lost: if d.have[l] and self.downloader.storage.do_I_have_requests(l): d.interested = True d.connection.send_interested() break def got_choke(self): if not self.choked: self.logcollector.log(None, 'R C ' + str(self.connection.ip)) self.choked = True self._letgo() def got_unchoke(self): if self.choked: self.logcollector.log(None, 'R UC ' + str(self.connection.ip)) self.choked = False if self.interested: self._request_more() #this method returns True if the block received completes a piece, #false, otherwise. The result of this method is used to decide when #to send the HAVE message. def got_piece(self, index, begin, piece): try: #the received block was not requested if it is not in active_requests. #It is discarded self.active_requests.remove((index, begin, len(piece))) except ValueError: self.downloader.discarded_bytes += len(piece) return False #count all the received packet that are requested. self.logcollector.log(None, 'R P ' + str(self.connection.ip) + ' i ' + str(index) + ' b ' + str(begin)) if self.downloader.storage.endgame: self.downloader.all_requests.remove((index, begin, len(piece))) self.last = bttime() self.measure.update_rate(len(piece)) self.downloader.measurefunc(len(piece)) self.downloader.downmeasure.update_rate(len(piece)) if not self.downloader.storage.piece_came_in(index, begin, piece, self.guard): if self.downloader.storage.endgame: while self.downloader.storage.do_I_have_requests(index): nb, nl = self.downloader.storage.new_request(index) self.downloader.all_requests.append((index, nb, nl)) for d in self.downloader.downloads: d.fix_download_endgame() return False ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more([index]) return False if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.storage.endgame: for d in self.downloader.downloads: if d is not self and d.interested: if d.choked: d.fix_download_endgame() else: try: d.active_requests.remove((index, begin, len(piece))) except ValueError: continue d.connection.send_cancel(index, begin, len(piece)) d.fix_download_endgame() self._request_more() if self.downloader.picker.am_I_complete(): for d in [i for i in self.downloader.downloads if i.have.numfalse == 0]: self.logcollector.log(None, 'CON C ' + str(d.connection.ip) + ' S') d.connection.close() return self.downloader.storage.do_I_have(index) #return true if the remote peer has the piece and #there are blocks that can be requested for this piece. def _want(self, index): return self.have[index] and self.downloader.storage.do_I_have_requests(index) #indices is used for the strict priority. When a connection is lost, the pending pieces #are requested. This is done in _letgo. def _request_more(self, indices = None): assert not self.choked if len(self.active_requests) >= self._backlog(): return if self.downloader.storage.endgame: self.fix_download_endgame() return lost_interests = [] while len(self.active_requests) < self.backlog: if indices is None: interest = self.downloader.picker.next(self._want, self.have.numfalse == 0) else: interest = None for i in indices: if self.have[i] and self.downloader.storage.do_I_have_requests(i): interest = i break if interest is None: break if not self.interested: self.interested = True self.connection.send_interested() self.example_interest = interest self.downloader.picker.requested(interest, self.have.numfalse == 0) while len(self.active_requests) < (self.backlog-2) * 5 + 2: begin, length = self.downloader.storage.new_request(interest) self.active_requests.append((interest, begin, length)) self.connection.send_request(interest, begin, length) if not self.downloader.storage.do_I_have_requests(interest): lost_interests.append(interest) break if not self.active_requests and self.interested: self.interested = False self.connection.send_not_interested() if lost_interests: for d in self.downloader.downloads: if d.active_requests or not d.interested: continue if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest): continue for lost in lost_interests: if d.have[lost]: break else: continue interest = self.downloader.picker.next(d._want, d.have.numfalse == 0) if interest is None: d.interested = False d.connection.send_not_interested() else: d.example_interest = interest if self.downloader.storage.endgame: self.downloader.all_requests = [] for d in self.downloader.downloads: self.downloader.all_requests.extend(d.active_requests) for d in self.downloader.downloads: d.fix_download_endgame() def fix_download_endgame(self): want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests] if self.interested and not self.active_requests and not want: self.interested = False self.connection.send_not_interested() return if not self.interested and want: self.interested = True self.connection.send_interested() if self.choked or len(self.active_requests) >= self._backlog(): return shuffle(want) del want[self.backlog - len(self.active_requests):] self.active_requests.extend(want) for piece, begin, length in want: self.connection.send_request(piece, begin, length) def got_have(self, index): if self.have[index]: return self.logcollector.log(None, 'R H ' + str(self.connection.ip) + ' i ' + str(index)) #Update the download rate. For each piece received, it uses the piece size #to compute the download rate over the last period of time. #As the last piece can have a smaller size than piece_size, #the exact size of the last piece is considered. if index == self.downloader.numpieces-1: self.peermeasure.update_rate(self.downloader.storage.total_length- (self.downloader.numpieces-1)*self.downloader.storage.piece_size) else: self.peermeasure.update_rate(self.downloader.storage.piece_size) self.have[index] = True self.downloader.picker.got_have(index) if self.downloader.picker.am_I_complete() and self.have.numfalse == 0: self.logcollector.log(None, 'CON C ' + str(self.connection.ip) + ' S') self.connection.close() return if self.downloader.storage.endgame: self.fix_download_endgame() elif self.downloader.storage.do_I_have_requests(index): if not self.choked: self._request_more([index]) else: if not self.interested: self.interested = True self.connection.send_interested() #initial bitfield def got_have_bitfield(self, have): if have.numfalse == 0: self.logcollector.log(None, 'P ' + str(self.connection.ip) + ' S') if self.downloader.picker.am_I_complete() and have.numfalse == 0: self.logcollector.log(None, 'CON C ' + str(self.connection.ip) + ' S') self.connection.close() return self.have = have #the string bitfield is just used for logging. bitfield='' for i in xrange(len(self.have)): if self.have[i]: bitfield += str(i) + ' ' self.downloader.picker.got_have(i) self.logcollector.log(None, 'R BF ' + str(self.connection.ip) + ' ' + bitfield) #receive bitfield while in endgame mode. In this case, it sends INTERESTED #to the remote peer as soon as the local peer does not have #at least one piece advertised by the remote peer. if self.downloader.storage.endgame: for piece, begin, length in self.downloader.all_requests: if self.have[piece]: self.interested = True self.connection.send_interested() return for i in xrange(len(self.have)): if self.have[i] and self.downloader.storage.do_I_have_requests(i): self.interested = True self.connection.send_interested() return def get_rate(self): return self.measure.get_rate() #self.last is the time since the last piece was received. #once a peer is snubbed, the only way to change this state is to #be optimistically unchoked. Indeed, to be unsnubbed, self.last #must be updated. However, it is only updated in got_piece(). def is_snubbed(self): return bttime() - self.last > self.downloader.snub_time
class DownloadPeer: def __init__(self, downloader, connection): self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.max_rate_period) self.peermeasure = Measure(downloader.max_rate_period) self.have = Bitfield(downloader.numpieces) self.last = -1000 self.last2 = -1000 self.example_interest = None # self.backlog = 2 self.backlog = 8 self.ip = connection.get_ip() self.guard = BadDataGuard(self) def _backlog(self, just_unchoked): # self.backlog = min( # 2+int(4*self.measure.get_rate()/self.downloader.chunksize), # (2*just_unchoked)+self.downloader.queue_limit() ) # if self.backlog > 50: # self.backlog = max(50, self.backlog * 0.075) # return self.backlog self.backlog = 4+int(8*self.measure.get_rate()/self.downloader.chunksize) return self.backlog def disconnected(self): self.downloader.lost_peer(self) if self.have.complete(): self.downloader.picker.lost_seed() else: for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.lost_have(i) if self.have.complete() and self.downloader.storage.is_endgame(): self.downloader.add_disconnected_seed(self.connection.get_readable_id()) self._letgo() self.guard.download = None def _letgo(self): if self.downloader.queued_out.has_key(self): del self.downloader.queued_out[self] if not self.active_requests: return if self.downloader.endgamemode: self.active_requests = [] return lost = {} for index, begin, length in self.active_requests: self.downloader.storage.request_lost(index, begin, length) lost[index] = 1 lost = lost.keys() self.active_requests = [] if self.downloader.paused: return ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more() for d in self.downloader.downloads: if d.choked and not d.interested: for l in lost: if d.have[l] and self.downloader.storage.do_I_have_requests(l): d.send_interested() break def got_choke(self): if not self.choked: self.choked = True self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more(new_unchoke = True) self.last2 = clock() def is_choked(self): return self.choked def is_interested(self): return self.interested def send_interested(self): if not self.interested: self.interested = True self.connection.send_interested() if not self.choked: self.last2 = clock() def send_not_interested(self): if self.interested: self.interested = False self.connection.send_not_interested() def got_piece(self, index, begin, piece): length = len(piece) try: self.active_requests.remove((index, begin, length)) except ValueError: self.downloader.discarded += length return False if self.downloader.endgamemode: self.downloader.all_requests.remove((index, begin, length)) self.last = clock() self.last2 = clock() self.measure.update_rate(length) self.downloader.measurefunc(length) if not self.downloader.storage.piece_came_in(index, begin, piece, self.guard): self.downloader.piece_flunked(index) return False if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.endgamemode: for d in self.downloader.downloads: if d is not self: if d.interested: if d.choked: assert not d.active_requests d.fix_download_endgame() else: try: d.active_requests.remove((index, begin, length)) except ValueError: continue d.connection.send_cancel(index, begin, length) d.fix_download_endgame() else: assert not d.active_requests self._request_more() self.downloader.check_complete(index) return self.downloader.storage.do_I_have(index) def _request_more(self, new_unchoke = False): assert not self.choked if self.downloader.endgamemode: self.fix_download_endgame(new_unchoke) return if self.downloader.paused: return if len(self.active_requests) >= self._backlog(new_unchoke): if not (self.active_requests or self.backlog): self.downloader.queued_out[self] = 1 return lost_interests = [] while len(self.active_requests) < self.backlog: interest = self.downloader.picker.next(self.have, self.downloader.storage.do_I_have_requests, self.downloader.too_many_partials()) if interest is None: break self.example_interest = interest self.send_interested() loop = True while len(self.active_requests) < self.backlog and loop: begin, length = self.downloader.storage.new_request(interest) self.downloader.picker.requested(interest) self.active_requests.append((interest, begin, length)) self.connection.send_request(interest, begin, length) self.downloader.chunk_requested(length) if not self.downloader.storage.do_I_have_requests(interest): loop = False lost_interests.append(interest) if not self.active_requests: self.send_not_interested() if lost_interests: for d in self.downloader.downloads: if d.active_requests or not d.interested: continue if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest): continue for lost in lost_interests: if d.have[lost]: break else: continue interest = self.downloader.picker.next(d.have, self.downloader.storage.do_I_have_requests, self.downloader.too_many_partials()) if interest is None: d.send_not_interested() else: d.example_interest = interest if self.downloader.storage.is_endgame(): self.downloader.start_endgame() def fix_download_endgame(self, new_unchoke = False): if self.downloader.paused: return if len(self.active_requests) >= self._backlog(new_unchoke): if not (self.active_requests or self.backlog) and not self.choked: self.downloader.queued_out[self] = 1 return want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests] if not (self.active_requests or want): self.send_not_interested() return if want: self.send_interested() if self.choked: return shuffle(want) del want[self.backlog - len(self.active_requests):] self.active_requests.extend(want) for piece, begin, length in want: self.connection.send_request(piece, begin, length) self.downloader.chunk_requested(length) def got_have(self, index): if index == self.downloader.numpieces-1: self.downloader.totalmeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length) self.peermeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length) else: self.downloader.totalmeasure.update_rate(self.downloader.storage.piece_length) self.peermeasure.update_rate(self.downloader.storage.piece_length) if not self.have[index]: self.have[index] = True self.downloader.picker.got_have(index) if self.have.complete(): self.downloader.picker.became_seed() if self.downloader.storage.am_I_complete(): self.downloader.add_disconnected_seed(self.connection.get_readable_id()) self.connection.close() elif self.downloader.endgamemode: self.fix_download_endgame() elif ( not self.downloader.paused and not self.downloader.picker.is_blocked(index) and self.downloader.storage.do_I_have_requests(index) ): if not self.choked: self._request_more() else: self.send_interested() return self.have.complete() def _check_interests(self): if self.interested or self.downloader.paused: return for i in xrange(len(self.have)): if ( self.have[i] and not self.downloader.picker.is_blocked(i) and ( self.downloader.endgamemode or self.downloader.storage.do_I_have_requests(i) ) ): self.send_interested() return def got_have_bitfield(self, have): if self.downloader.storage.am_I_complete() and have.complete(): if self.downloader.super_seeding: self.connection.send_bitfield(have.tostring()) # be nice, show you're a seed too self.connection.close() self.downloader.add_disconnected_seed(self.connection.get_readable_id()) return False self.have = have if have.complete(): self.downloader.picker.got_seed() else: temp = 4 for i in xrange(len(have)): if have[i]: self.downloader.picker.got_have(i) if self.downloader.endgamemode and not self.downloader.paused: for piece, begin, length in self.downloader.all_requests: if self.have[piece]: self.send_interested() break else: self._check_interests() return have.complete() def get_rate(self): return self.measure.get_rate() def get_peer_completion(self): if len(self.have) > 0: return float(len(self.have)-self.have.numfalse)/float(len(self.have)) else: return 1.0 def is_snubbed(self): if ( self.interested and not self.choked and clock() - self.last2 > self.downloader.snub_time ): for index, begin, length in self.active_requests: self.connection.send_cancel(index, begin, length) self.got_choke() # treat it just like a choke return clock() - self.last > self.downloader.snub_time
class UploadPeer: def __init__(self, connection, ratelimiter, totalup, choker, storage, picker, config): self.connection = connection self.ratelimiter = ratelimiter self.totalup = totalup self.choker = choker self.storage = storage self.picker = picker self.config = config self.max_slice_length = config['max_slice_length'] self.choked = True self.cleared = True self.interested = False self.super_seeding = False self.buffer = [] self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge']) self.was_ever_interested = False if storage.get_amount_left() == 0: if choker.super_seed: self.super_seeding = True # flag, and don't send bitfield self.seed_have_list = [] # set from piecepicker self.skipped_count = 0 else: if config['breakup_seed_bitfield']: bitfield, msgs = storage.get_have_list_cloaked() connection.send_bitfield(bitfield) for have in msgs: connection.send_have(have) else: connection.send_bitfield(storage.get_have_list()) else: if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) self.piecedl = None self.piecebuf = None def got_not_interested(self): if self.interested: self.interested = False del self.buffer[:] self.piecedl = None if self.piecebuf: self.piecebuf.release() self.piecebuf = None self.choker.not_interested(self.connection) def got_interested(self): if not self.interested: self.interested = True self.was_ever_interested = True self.choker.interested(self.connection) def get_upload_chunk(self): if self.choked or not self.buffer: return None index, begin, length = self.buffer.pop(0) if self.config['buffer_reads']: if index != self.piecedl: if self.piecebuf: self.piecebuf.release() self.piecedl = index self.piecebuf = self.storage.get_piece(index, 0, -1) piece = None if self.piecebuf: piece = self.piecebuf[begin:begin + length] # fails if storage.get_piece returns None or if out of range if not piece or len(piece) != length: self.connection.close() return None else: if self.piecebuf: self.piecebuf.release() self.piecedl = None piece = self.storage.get_piece(index, begin, length) if piece is None: self.connection.close() return None self.measure.update_rate(len(piece)) self.totalup.update_rate(len(piece)) return (index, begin, piece) def got_request(self, index, begin, length): if ((self.super_seeding and not index in self.seed_have_list) or not self.interested or length > self.max_slice_length): self.connection.close() return if not self.cleared: self.buffer.append((index, begin, length)) if not self.choked and self.connection.next_upload is None: self.ratelimiter.queue(self.connection) def got_cancel(self, index, begin, length): try: self.buffer.remove((index, begin, length)) except ValueError: pass def choke(self): if not self.choked: self.choked = True self.connection.send_choke() self.piecedl = None if self.piecebuf: self.piecebuf.release() self.piecebuf = None def choke_sent(self): del self.buffer[:] self.cleared = True def unchoke(self): if self.choked: self.choked = False self.cleared = False self.connection.send_unchoke() def disconnected(self): if self.piecebuf: self.piecebuf.release() self.piecebuf = None def is_choked(self): return self.choked def is_interested(self): return self.interested def has_queries(self): return not self.choked and len(self.buffer) > 0 def get_rate(self): return self.measure.get_rate()
class Upload(object): def __init__(self, connection, ratelimiter, totalup, totalup2, choker, storage, max_slice_length, max_rate_period, logcollector): self.connection = connection self.ratelimiter = ratelimiter self.totalup = totalup self.totalup2 = totalup2 self.choker = choker self.storage = storage self.max_slice_length = max_slice_length self.max_rate_period = max_rate_period self.choked = True self.unchoke_time = None self.interested = False #the list buffer contains tuples (index, begin, lenght) for each #block requested by the remote peer. A non empty buffer means that #there is data to send to the remote peer already requested by the #remote peer. self.buffer = [] #PFS begin self.config = choker.config self.I = {} # I[piece id] = block uploaded count in the piece id self.r = {} # r[piece_id] = block requested count in the piece id #PFS end self.measure = Measure(max_rate_period) #send the bittfield of the peer the first time it connects to the peers. if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) self.logcollector = logcollector def got_not_interested(self): if self.interested: self.logcollector.log(None, 'R NI ' + str(self.connection.ip)) self.interested = False del self.buffer[:] self.choker.not_interested(self.connection) def got_interested(self): if not self.interested: self.logcollector.log(None, 'R I ' + str(self.connection.ip)) self.interested = True self.choker.interested(self.connection) def get_upload_chunk(self): if not self.buffer: return None #buffer.pop(0) return the element with index 0 and remove #this element from buffer. index, begin, length = self.buffer.pop(0) #PFS begin if self.choker.done(): if index in self.I: self.I[index] += 1 else: self.I[index] = 1 if index in self.choker.I: self.choker.I[index] += 1 else: self.choker.I[index] = 1 self.logcollector.log(None, 'PFS ' + str(self.connection.ip) + \ ' theta(' + str(index) + ') ' + str(self.choker.I[index])) if index not in self.choker.theta: self.choker.theta[index] = 1.0 #PFS end piece = self.storage.get_piece(index, begin, length) if piece is None: self.logcollector.log(None, 'CON C ' + str(self.connection.ip) + ' E 1') self.connection.close() return None self.measure.update_rate(len(piece)) self.totalup.update_rate(len(piece)) self.totalup2.update_rate(len(piece)) return (index, begin, piece) def got_request(self, index, begin, length): if not self.interested or length > self.max_slice_length: self.logcollector.log(None, 'CON C ' + str(self.connection.ip) + ' E 2') self.connection.close() return self.logcollector.log(None, 'R R ' + str(self.connection.ip) + ' i ' + str(index) + ' b ' + str(begin) + \ ' l ' + str(length)) if not self.connection.choke_sent: self.buffer.append((index, begin, length)) if self.connection.next_upload is None and \ self.connection.connection.is_flushed(): self.ratelimiter.queue(self.connection) # EPFS begin if self.choker.done(): # update vector of requests {r1,...} self.PFS_update_r(index) # EPFS end # EPFS step 5: Seed updates his data structure when receiving REQUEST from leechers def PFS_update_r(self, index): if self.config['scheduling_algorithm'] == 'BT': return False if self.choker.tm_first_req == 0: self.choker.tm_first_req = bttime() if index in self.r: self.r[index] += 1 else: self.r[index] = 1 if index in self.choker.r: self.choker.r[index] += 1.0 else: self.choker.r[index] = 1.0 self.logcollector.log(None, 'PFS ' + str(self.connection.ip) + \ ' r[' + str(index) + '] ' + str(self.choker.r[index])) return True # EPFS end def got_cancel(self, index, begin, length): try: self.buffer.remove((index, begin, length)) except ValueError: pass def choke(self): if not self.choked: self.choked = True self.connection.send_choke() def sent_choke(self): assert self.choked del self.buffer[:] def unchoke(self, time): if self.choked: self.choked = False self.unchoke_time = time self.connection.send_unchoke() def has_queries(self): return len(self.buffer) > 0 def get_rate(self): return self.measure.get_rate()
class Upload(object): def __init__(self, connection, ratelimiter, totalup, totalup2, choker, storage, max_slice_length, max_rate_period): self.connection = connection self.ratelimiter = ratelimiter self.totalup = totalup self.totalup2 = totalup2 self.choker = choker self.storage = storage self.max_slice_length = max_slice_length self.max_rate_period = max_rate_period self.choked = True self.unchoke_time = None self.interested = False self.buffer = [] self.measure = Measure(max_rate_period) if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) def got_not_interested(self): if self.interested: self.interested = False del self.buffer[:] self.choker.not_interested(self.connection) def got_interested(self): if not self.interested: self.interested = True self.choker.interested(self.connection) def get_upload_chunk(self): if not self.buffer: return None index, begin, length = self.buffer.pop(0) if 'BF2-0-0' in self.connection.id: piece = self.storage.get_DF_piece(index, begin, length) else: piece = self.storage.get_piece(index, begin, length) if piece is None: self.connection.close() return None return (index, begin, piece) def update_rate(self, bytes): self.measure.update_rate(bytes) self.totalup.update_rate(bytes) self.totalup2.update_rate(bytes) def got_request(self, index, begin, length): if not self.interested or length > self.max_slice_length: self.connection.close() return if not self.connection.choke_sent: print 'IN reqst: (piece %d[%d:%d] - SN: %d)' % ( index, begin, begin + length, begin / length) self.buffer.append((index, begin, length)) if self.connection.next_upload is None and \ self.connection.connection.is_flushed(): self.ratelimiter.queue(self.connection, self.connection.encoder.context.rlgroup) def got_cancel(self, index, begin, length): try: self.buffer.remove((index, begin, length)) except ValueError: pass def choke(self): if not self.choked: self.choked = True self.connection.send_choke() def sent_choke(self): assert self.choked del self.buffer[:] def unchoke(self, time): if self.choked: self.choked = False self.unchoke_time = time self.connection.send_unchoke() def has_queries(self): return len(self.buffer) > 0 def get_rate(self): return self.measure.get_rate()
class Upload(object): """Upload over a single connection.""" def __init__(self, connection, ratelimiter, totalup,choker, storage, max_slice_length, max_rate_period, num_fast, torrent): assert isinstance(connection, BitTorrent.Connector.Connection) assert isinstance(torrent, BitTorrent.Torrent.Torrent) self.connection = connection self.ratelimiter = ratelimiter self.totalup = totalup self.torrent = torrent self.choker = choker self.num_fast = num_fast self.storage = storage self.max_slice_length = max_slice_length self.max_rate_period = max_rate_period self.choked = True self.unchoke_time = None self.interested = False self.buffer = [] # contains piece data about to be sent. self.measure = Measure(max_rate_period) self.allowed_fast_pieces = [] if connection.uses_fast_extension: if storage.get_amount_left() == 0: connection.send_have_all() elif storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) else: connection.send_have_none() self._send_allowed_fast_list() elif storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) def _send_allowed_fast_list(self): """Computes and sends the 'allowed fast' set. """ self.allowed_fast_pieces = _compute_allowed_fast_list( self.torrent.infohash, self.connection.ip, self.num_fast, self.storage.get_num_pieces()) for index in self.allowed_fast_pieces: self.connection.send_allowed_fast(index) def got_not_interested(self): if self.interested: self.interested = False self.choker.not_interested(self.connection) def got_interested(self): if not self.interested: self.interested = True self.choker.interested(self.connection) def get_upload_chunk(self, index, begin, length): df = self.storage.read(index, begin, length) def fail(e): log( "get_upload_chunk failed", exc_info=e ) self.connection.close() return None def update_rate(piece): # piece is actual data. if piece is None: return fail("Piece is None") return (index, begin, piece) df.addCallback(update_rate) df.addErrback(fail) return df def update_rate(self, bytes): self.measure.update_rate(bytes) self.totalup.update_rate(bytes) def got_request(self, index, begin, length): if not self.interested or length > self.max_slice_length: self.connection.close() return if index in self.allowed_fast_pieces or not self.connection.choke_sent: df = self.get_upload_chunk(index, begin, length) def got_piece(piece): # 3rd elem in tuple is piece data. if self.connection.closed or piece is None: return index, begin, piece = piece # piece changes from tuple to data. if self.choked: if not self.connection.uses_fast_extension: return if index not in self.allowed_fast_pieces: self.connection.send_reject_request( index, begin, len(piece)) return self.buffer.append(((index, begin, len(piece)), piece)) if self.connection.next_upload is None and \ self.connection.connection.is_flushed(): self.ratelimiter.queue(self.connection) df.addCallback(got_piece) elif self.connection.uses_fast_extension: self.connection.send_reject_request( index, begin, length ) def got_cancel(self, index, begin, length): req = (index, begin, length) for pos, (r, p) in enumerate(self.buffer): if r == req: del self.buffer[pos] if self.connection.uses_fast_extension: self.connection.send_reject_request(*req) break def choke(self): if not self.choked: self.choked = True self.connection.send_choke() def sent_choke(self): assert self.choked if self.connection.uses_fast_extension: b2 = [] for r in self.buffer: ((index,begin,length),piecedata) = r if index not in self.allowed_fast_pieces: self.connection.send_reject_request( index, begin, length ) else: b2.append(r) self.buffer = b2 else: del self.buffer[:] def unchoke(self, time): if self.choked: self.choked = False self.unchoke_time = time self.connection.send_unchoke() def has_queries(self): return len(self.buffer) > 0 def get_rate(self): return self.measure.get_rate()
class UploadPeer: def __init__(self, connection, ratelimiter, totalup, choker, storage, picker, config): self.connection = connection self.ratelimiter = ratelimiter self.totalup = totalup self.choker = choker self.storage = storage self.picker = picker self.config = config self.max_slice_length = config['max_slice_length'] self.choked = True self.cleared = True self.interested = False self.super_seeding = False self.buffer = [] self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge']) self.was_ever_interested = False if storage.get_amount_left() == 0: if choker.super_seed: self.super_seeding = True # flag, and don't send bitfield self.seed_have_list = [] # set from piecepicker self.skipped_count = 0 else: if config['breakup_seed_bitfield']: bitfield, msgs = storage.get_have_list_cloaked() connection.send_bitfield(bitfield) for have in msgs: connection.send_have(have) else: connection.send_bitfield(storage.get_have_list()) else: if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) self.piecedl = None self.piecebuf = None def got_not_interested(self): if self.interested: self.interested = False del self.buffer[:] self.piecedl = None if self.piecebuf: self.piecebuf.release() self.piecebuf = None self.choker.not_interested(self.connection) def got_interested(self): if not self.interested: self.interested = True self.was_ever_interested = True self.choker.interested(self.connection) def get_upload_chunk(self): if self.choked or not self.buffer: return None index, begin, length = self.buffer.pop(0) if self.config['buffer_reads']: if index != self.piecedl: if self.piecebuf: self.piecebuf.release() self.piecedl = index self.piecebuf = self.storage.get_piece(index, 0, -1) piece = None if self.piecebuf: piece = self.piecebuf[begin:begin+length] # fails if storage.get_piece returns None or if out of range if not piece or len(piece) != length: self.connection.close() return None else: if self.piecebuf: self.piecebuf.release() self.piecedl = None piece = self.storage.get_piece(index, begin, length) if piece is None: self.connection.close() return None self.measure.update_rate(len(piece)) self.totalup.update_rate(len(piece)) return (index, begin, piece) def got_request(self, index, begin, length): if ( (self.super_seeding and not index in self.seed_have_list) or not self.interested or length > self.max_slice_length ): self.connection.close() return if not self.cleared: self.buffer.append((index, begin, length)) if not self.choked and self.connection.next_upload is None: self.ratelimiter.queue(self.connection) def got_cancel(self, index, begin, length): try: self.buffer.remove((index, begin, length)) except ValueError: pass def choke(self): if not self.choked: self.choked = True self.connection.send_choke() self.piecedl = None if self.piecebuf: self.piecebuf.release() self.piecebuf = None def choke_sent(self): del self.buffer[:] self.cleared = True def unchoke(self): if self.choked: self.choked = False self.cleared = False self.connection.send_unchoke() def disconnected(self): if self.piecebuf: self.piecebuf.release() self.piecebuf = None def is_choked(self): return self.choked def is_interested(self): return self.interested def has_queries(self): return not self.choked and len(self.buffer) > 0 def get_rate(self): return self.measure.get_rate()